File: | vbdev_compress.c |
Warning: | line 283, column 2 Value stored to 'num_blocks' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright (C) 2018 Intel Corporation. |
3 | * All rights reserved. |
4 | * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
5 | */ |
6 | |
7 | #include "vbdev_compress.h" |
8 | |
9 | #include "spdk/reduce.h" |
10 | #include "spdk/stdinc.h" |
11 | #include "spdk/rpc.h" |
12 | #include "spdk/env.h" |
13 | #include "spdk/endian.h" |
14 | #include "spdk/string.h" |
15 | #include "spdk/thread.h" |
16 | #include "spdk/util.h" |
17 | #include "spdk/bdev_module.h" |
18 | #include "spdk/likely.h" |
19 | #include "spdk/log.h" |
20 | #include "spdk/accel.h" |
21 | |
22 | #include "spdk/accel_module.h" |
23 | |
24 | #define CHUNK_SIZE(1024 * 16) (1024 * 16) |
25 | #define COMP_BDEV_NAME"compress" "compress" |
26 | #define BACKING_IO_SZ(4 * 1024) (4 * 1024) |
27 | |
28 | /* This namespace UUID was generated using uuid_generate() method. */ |
29 | #define BDEV_COMPRESS_NAMESPACE_UUID"c3fad6da-832f-4cc0-9cdc-5c552b225e7b" "c3fad6da-832f-4cc0-9cdc-5c552b225e7b" |
30 | |
31 | struct vbdev_comp_delete_ctx { |
32 | spdk_delete_compress_complete cb_fn; |
33 | void *cb_arg; |
34 | int cb_rc; |
35 | struct spdk_thread *orig_thread; |
36 | }; |
37 | |
38 | /* List of virtual bdevs and associated info for each. */ |
39 | struct vbdev_compress { |
40 | struct spdk_bdev *base_bdev; /* the thing we're attaching to */ |
41 | struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ |
42 | struct spdk_io_channel *base_ch; /* IO channel of base device */ |
43 | struct spdk_bdev comp_bdev; /* the compression virtual bdev */ |
44 | struct comp_io_channel *comp_ch; /* channel associated with this bdev */ |
45 | struct spdk_io_channel *accel_channel; /* to communicate with the accel framework */ |
46 | struct spdk_thread *reduce_thread; |
47 | pthread_mutex_t reduce_lock; |
48 | uint32_t ch_count; |
49 | TAILQ_HEAD(, spdk_bdev_io)struct { struct spdk_bdev_io *tqh_first; struct spdk_bdev_io * *tqh_last; } pending_comp_ios; /* outstanding operations to a comp library */ |
50 | struct spdk_poller *poller; /* completion poller */ |
51 | struct spdk_reduce_vol_params params; /* params for the reduce volume */ |
52 | struct spdk_reduce_backing_dev backing_dev; /* backing device info for the reduce volume */ |
53 | struct spdk_reduce_vol *vol; /* the reduce volume */ |
54 | struct vbdev_comp_delete_ctx *delete_ctx; |
55 | bool_Bool orphaned; /* base bdev claimed but comp_bdev not registered */ |
56 | int reduce_errno; |
57 | TAILQ_HEAD(, vbdev_comp_op)struct { struct vbdev_comp_op *tqh_first; struct vbdev_comp_op * *tqh_last; } queued_comp_ops; |
58 | TAILQ_ENTRY(vbdev_compress)struct { struct vbdev_compress *tqe_next; struct vbdev_compress * *tqe_prev; } link; |
59 | struct spdk_thread *thread; /* thread where base device is opened */ |
60 | enum spdk_accel_comp_algo comp_algo; /* compression algorithm for compress bdev */ |
61 | uint32_t comp_level; /* compression algorithm level */ |
62 | bool_Bool init_failed; /* compress bdev initialization failed */ |
63 | }; |
64 | static TAILQ_HEAD(, vbdev_compress)struct { struct vbdev_compress *tqh_first; struct vbdev_compress * *tqh_last; } g_vbdev_comp = TAILQ_HEAD_INITIALIZER(g_vbdev_comp){ ((void*)0), &(g_vbdev_comp).tqh_first }; |
65 | |
66 | /* The comp vbdev channel struct. It is allocated and freed on my behalf by the io channel code. |
67 | */ |
68 | struct comp_io_channel { |
69 | struct spdk_io_channel_iter *iter; /* used with for_each_channel in reset */ |
70 | }; |
71 | |
72 | /* Records the unmap operation split status */ |
73 | struct comp_unmap_split { |
74 | uint64_t current_offset_blocks; |
75 | uint64_t remaining_num_blocks; |
76 | }; |
77 | |
78 | /* Per I/O context for the compression vbdev. */ |
79 | struct comp_bdev_io { |
80 | struct comp_io_channel *comp_ch; /* used in completion handling */ |
81 | struct vbdev_compress *comp_bdev; /* vbdev associated with this IO */ |
82 | struct spdk_bdev_io_wait_entry bdev_io_wait; /* for bdev_io_wait */ |
83 | struct spdk_bdev_io *orig_io; /* the original IO */ |
84 | struct comp_unmap_split split_io; /* save unmap op split io */ |
85 | int status; /* save for completion on orig thread */ |
86 | }; |
87 | |
88 | static void vbdev_compress_examine(struct spdk_bdev *bdev); |
89 | static int vbdev_compress_claim(struct vbdev_compress *comp_bdev); |
90 | struct vbdev_compress *_prepare_for_load_init(struct spdk_bdev_desc *bdev_desc, uint32_t lb_size, |
91 | uint8_t comp_algo, uint32_t comp_level); |
92 | static void vbdev_compress_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); |
93 | static void comp_bdev_ch_destroy_cb(void *io_device, void *ctx_buf); |
94 | static void vbdev_compress_delete_done(void *cb_arg, int bdeverrno); |
95 | static void _comp_reduce_resubmit_backing_io(void *_backing_io); |
96 | |
97 | /* for completing rw requests on the orig IO thread. */ |
98 | static void |
99 | _reduce_rw_blocks_cb(void *arg) |
100 | { |
101 | struct comp_bdev_io *io_ctx = arg; |
102 | |
103 | if (spdk_likely(io_ctx->status == 0)__builtin_expect(!!(io_ctx->status == 0), 1)) { |
104 | spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_SUCCESS); |
105 | } else if (io_ctx->status == -ENOMEM12) { |
106 | spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_NOMEM); |
107 | } else { |
108 | SPDK_ERRLOG("Failed to execute reduce api. %s\n", spdk_strerror(-io_ctx->status))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 108, __func__, "Failed to execute reduce api. %s\n" , spdk_strerror(-io_ctx->status)); |
109 | spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_FAILED); |
110 | } |
111 | } |
112 | |
113 | /* Completion callback for r/w that were issued via reducelib. */ |
114 | static void |
115 | reduce_rw_blocks_cb(void *arg, int reduce_errno) |
116 | { |
117 | struct spdk_bdev_io *bdev_io = arg; |
118 | struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx; |
119 | struct spdk_io_channel *ch = spdk_io_channel_from_ctx(io_ctx->comp_ch); |
120 | struct spdk_thread *orig_thread; |
121 | |
122 | /* TODO: need to decide which error codes are bdev_io success vs failure; |
123 | * example examine calls reading metadata */ |
124 | |
125 | io_ctx->status = reduce_errno; |
126 | |
127 | /* Send this request to the orig IO thread. */ |
128 | orig_thread = spdk_io_channel_get_thread(ch); |
129 | |
130 | spdk_thread_exec_msg(orig_thread, _reduce_rw_blocks_cb, io_ctx); |
131 | } |
132 | |
133 | static int |
134 | _compress_operation(struct spdk_reduce_backing_dev *backing_dev, struct iovec *src_iovs, |
135 | int src_iovcnt, struct iovec *dst_iovs, |
136 | int dst_iovcnt, bool_Bool compress, void *cb_arg) |
137 | { |
138 | struct spdk_reduce_vol_cb_args *reduce_cb_arg = cb_arg; |
139 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(backing_dev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)backing_dev - __builtin_offsetof (struct vbdev_compress, backing_dev))) |
140 | backing_dev)((struct vbdev_compress *)((uintptr_t)backing_dev - __builtin_offsetof (struct vbdev_compress, backing_dev))); |
141 | int rc; |
142 | |
143 | if (compress) { |
144 | assert(dst_iovcnt == 1)((void) sizeof ((dst_iovcnt == 1) ? 1 : 0), __extension__ ({ if (dst_iovcnt == 1) ; else __assert_fail ("dst_iovcnt == 1", "vbdev_compress.c" , 144, __extension__ __PRETTY_FUNCTION__); })); |
145 | rc = spdk_accel_submit_compress_ext(comp_bdev->accel_channel, dst_iovs[0].iov_base, |
146 | dst_iovs[0].iov_len, src_iovs, src_iovcnt, |
147 | comp_bdev->comp_algo, comp_bdev->comp_level, |
148 | &reduce_cb_arg->output_size, reduce_cb_arg->cb_fn, |
149 | reduce_cb_arg->cb_arg); |
150 | } else { |
151 | rc = spdk_accel_submit_decompress_ext(comp_bdev->accel_channel, dst_iovs, dst_iovcnt, |
152 | src_iovs, src_iovcnt, comp_bdev->comp_algo, |
153 | &reduce_cb_arg->output_size, reduce_cb_arg->cb_fn, |
154 | reduce_cb_arg->cb_arg); |
155 | } |
156 | |
157 | return rc; |
158 | } |
159 | |
160 | /* Entry point for reduce lib to issue a compress operation. */ |
161 | static void |
162 | _comp_reduce_compress(struct spdk_reduce_backing_dev *dev, |
163 | struct iovec *src_iovs, int src_iovcnt, |
164 | struct iovec *dst_iovs, int dst_iovcnt, |
165 | struct spdk_reduce_vol_cb_args *cb_arg) |
166 | { |
167 | int rc; |
168 | |
169 | rc = _compress_operation(dev, src_iovs, src_iovcnt, dst_iovs, dst_iovcnt, true1, cb_arg); |
170 | if (rc) { |
171 | SPDK_ERRLOG("with compress operation code %d (%s)\n", rc, spdk_strerror(-rc))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 171, __func__, "with compress operation code %d (%s)\n" , rc, spdk_strerror(-rc)); |
172 | cb_arg->cb_fn(cb_arg->cb_arg, rc); |
173 | } |
174 | } |
175 | |
176 | /* Entry point for reduce lib to issue a decompress operation. */ |
177 | static void |
178 | _comp_reduce_decompress(struct spdk_reduce_backing_dev *dev, |
179 | struct iovec *src_iovs, int src_iovcnt, |
180 | struct iovec *dst_iovs, int dst_iovcnt, |
181 | struct spdk_reduce_vol_cb_args *cb_arg) |
182 | { |
183 | int rc; |
184 | |
185 | rc = _compress_operation(dev, src_iovs, src_iovcnt, dst_iovs, dst_iovcnt, false0, cb_arg); |
186 | if (rc) { |
187 | SPDK_ERRLOG("with decompress operation code %d (%s)\n", rc, spdk_strerror(-rc))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 187, __func__, "with decompress operation code %d (%s)\n" , rc, spdk_strerror(-rc)); |
188 | cb_arg->cb_fn(cb_arg->cb_arg, rc); |
189 | } |
190 | } |
191 | |
192 | static void |
193 | _comp_submit_write(void *ctx) |
194 | { |
195 | struct spdk_bdev_io *bdev_io = ctx; |
196 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))) |
197 | comp_bdev)((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))); |
198 | |
199 | spdk_reduce_vol_writev(comp_bdev->vol, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, |
200 | bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, |
201 | reduce_rw_blocks_cb, bdev_io); |
202 | } |
203 | |
204 | static void |
205 | _comp_submit_read(void *ctx) |
206 | { |
207 | struct spdk_bdev_io *bdev_io = ctx; |
208 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))) |
209 | comp_bdev)((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))); |
210 | |
211 | spdk_reduce_vol_readv(comp_bdev->vol, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, |
212 | bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, |
213 | reduce_rw_blocks_cb, bdev_io); |
214 | } |
215 | |
216 | |
217 | /* Callback for getting a buf from the bdev pool in the event that the caller passed |
218 | * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module |
219 | * beneath us before we're done with it. |
220 | */ |
221 | static void |
222 | comp_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool_Bool success) |
223 | { |
224 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))) |
225 | comp_bdev)((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))); |
226 | |
227 | if (spdk_unlikely(!success)__builtin_expect((!success), 0)) { |
228 | SPDK_ERRLOG("Failed to get data buffer\n")spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 228, __func__, "Failed to get data buffer\n" ); |
229 | reduce_rw_blocks_cb(bdev_io, -ENOMEM12); |
230 | return; |
231 | } |
232 | |
233 | spdk_thread_exec_msg(comp_bdev->reduce_thread, _comp_submit_read, bdev_io); |
234 | } |
235 | |
236 | static void _comp_submit_unmap_split(void *ctx); |
237 | |
238 | /* When mkfs or fstrim, large unmap requests may be generated. |
239 | * Large request will be split into multiple small unmap op and processed recursively. |
240 | * Run too many small unmap op recursively may cause stack overflow or monopolize the thread, |
241 | * delaying other tasks. To avoid this, next unmap op need to be processed asynchronously |
242 | * by 'spdk_thread_send_msg'. |
243 | */ |
244 | static void |
245 | _comp_submit_unmap_split_done(void *arg, int reduce_errno) |
246 | { |
247 | struct spdk_bdev_io *bdev_io = arg; |
248 | struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx; |
249 | struct spdk_io_channel *ch = spdk_io_channel_from_ctx(io_ctx->comp_ch); |
250 | struct spdk_thread *orig_thread; |
251 | |
252 | if (spdk_unlikely(reduce_errno != 0)__builtin_expect((reduce_errno != 0), 0)) { |
253 | reduce_rw_blocks_cb(bdev_io, reduce_errno); |
254 | return; |
255 | } |
256 | |
257 | orig_thread = spdk_io_channel_get_thread(ch); |
258 | |
259 | if (spdk_unlikely(io_ctx->split_io.remaining_num_blocks > 0)__builtin_expect((io_ctx->split_io.remaining_num_blocks > 0), 0)) { |
260 | spdk_thread_send_msg(orig_thread, _comp_submit_unmap_split, bdev_io); |
261 | return; |
262 | } |
263 | assert(io_ctx->split_io.remaining_num_blocks == 0)((void) sizeof ((io_ctx->split_io.remaining_num_blocks == 0 ) ? 1 : 0), __extension__ ({ if (io_ctx->split_io.remaining_num_blocks == 0) ; else __assert_fail ("io_ctx->split_io.remaining_num_blocks == 0" , "vbdev_compress.c", 263, __extension__ __PRETTY_FUNCTION__) ; })); |
264 | |
265 | reduce_rw_blocks_cb(bdev_io, reduce_errno); |
266 | } |
267 | |
268 | static void |
269 | _comp_submit_unmap_split(void *ctx) |
270 | { |
271 | struct spdk_bdev_io *bdev_io = ctx; |
272 | struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx; |
273 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))) |
274 | comp_bdev)((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))); |
275 | const struct spdk_reduce_vol_params *params = spdk_reduce_vol_get_params(comp_bdev->vol); |
276 | struct comp_unmap_split *split_io = &io_ctx->split_io; |
277 | uint64_t offset_blocks, num_blocks, optimal_io_boundary; |
278 | |
279 | assert(params->chunk_size % params->logical_block_size == 0)((void) sizeof ((params->chunk_size % params->logical_block_size == 0) ? 1 : 0), __extension__ ({ if (params->chunk_size % params->logical_block_size == 0) ; else __assert_fail ("params->chunk_size % params->logical_block_size == 0" , "vbdev_compress.c", 279, __extension__ __PRETTY_FUNCTION__) ; })); |
280 | optimal_io_boundary = params->chunk_size / params->logical_block_size; |
281 | |
282 | offset_blocks = split_io->current_offset_blocks; |
283 | num_blocks = optimal_io_boundary - offset_blocks % optimal_io_boundary; |
Value stored to 'num_blocks' is never read | |
284 | num_blocks = spdk_min(optimal_io_boundary, split_io->remaining_num_blocks)(((optimal_io_boundary)<(split_io->remaining_num_blocks ))?(optimal_io_boundary):(split_io->remaining_num_blocks)); |
285 | |
286 | spdk_reduce_vol_unmap(comp_bdev->vol, offset_blocks, num_blocks, |
287 | _comp_submit_unmap_split_done, bdev_io); |
288 | |
289 | split_io->current_offset_blocks += num_blocks; |
290 | split_io->remaining_num_blocks -= num_blocks; |
291 | } |
292 | |
293 | static void |
294 | _comp_submit_unmap(void *ctx) |
295 | { |
296 | struct spdk_bdev_io *bdev_io = ctx; |
297 | struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx; |
298 | |
299 | io_ctx->split_io.current_offset_blocks = bdev_io->u.bdev.offset_blocks; |
300 | io_ctx->split_io.remaining_num_blocks = bdev_io->u.bdev.num_blocks; |
301 | |
302 | _comp_submit_unmap_split(bdev_io); |
303 | } |
304 | |
305 | /* Called when someone above submits IO to this vbdev. */ |
306 | static void |
307 | vbdev_compress_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) |
308 | { |
309 | struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx; |
310 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))) |
311 | comp_bdev)((struct vbdev_compress *)((uintptr_t)bdev_io->bdev - __builtin_offsetof (struct vbdev_compress, comp_bdev))); |
312 | struct comp_io_channel *comp_ch = spdk_io_channel_get_ctx(ch); |
313 | |
314 | memset(io_ctx, 0, sizeof(struct comp_bdev_io)); |
315 | io_ctx->comp_bdev = comp_bdev; |
316 | io_ctx->comp_ch = comp_ch; |
317 | io_ctx->orig_io = bdev_io; |
318 | |
319 | switch (bdev_io->type) { |
320 | case SPDK_BDEV_IO_TYPE_READ: |
321 | spdk_bdev_io_get_buf(bdev_io, comp_read_get_buf_cb, |
322 | bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); |
323 | return; |
324 | case SPDK_BDEV_IO_TYPE_WRITE: |
325 | spdk_thread_exec_msg(comp_bdev->reduce_thread, _comp_submit_write, bdev_io); |
326 | return; |
327 | case SPDK_BDEV_IO_TYPE_UNMAP: |
328 | spdk_thread_exec_msg(comp_bdev->reduce_thread, _comp_submit_unmap, bdev_io); |
329 | return; |
330 | /* TODO support RESET in future patch in the series */ |
331 | case SPDK_BDEV_IO_TYPE_RESET: |
332 | case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: |
333 | case SPDK_BDEV_IO_TYPE_FLUSH: |
334 | default: |
335 | SPDK_ERRLOG("Unknown I/O type %d\n", bdev_io->type)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 335, __func__, "Unknown I/O type %d\n" , bdev_io->type); |
336 | spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_FAILED); |
337 | break; |
338 | } |
339 | } |
340 | |
341 | static bool_Bool |
342 | vbdev_compress_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) |
343 | { |
344 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx; |
345 | |
346 | switch (io_type) { |
347 | case SPDK_BDEV_IO_TYPE_READ: |
348 | case SPDK_BDEV_IO_TYPE_WRITE: |
349 | return spdk_bdev_io_type_supported(comp_bdev->base_bdev, io_type); |
350 | case SPDK_BDEV_IO_TYPE_UNMAP: |
351 | return true1; |
352 | case SPDK_BDEV_IO_TYPE_RESET: |
353 | case SPDK_BDEV_IO_TYPE_FLUSH: |
354 | case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: |
355 | default: |
356 | return false0; |
357 | } |
358 | } |
359 | |
360 | /* Callback for unregistering the IO device. */ |
361 | static void |
362 | _device_unregister_cb(void *io_device) |
363 | { |
364 | struct vbdev_compress *comp_bdev = io_device; |
365 | |
366 | /* Done with this comp_bdev. */ |
367 | pthread_mutex_destroy(&comp_bdev->reduce_lock); |
368 | free(comp_bdev->comp_bdev.name); |
369 | free(comp_bdev); |
370 | } |
371 | |
372 | static void |
373 | _vbdev_compress_destruct_cb(void *ctx) |
374 | { |
375 | struct vbdev_compress *comp_bdev = ctx; |
376 | |
377 | /* Close the underlying bdev on its same opened thread. */ |
378 | spdk_bdev_close(comp_bdev->base_desc); |
379 | comp_bdev->vol = NULL((void*)0); |
380 | if (comp_bdev->init_failed) { |
381 | free(comp_bdev); |
382 | return; |
383 | } |
384 | |
385 | TAILQ_REMOVE(&g_vbdev_comp, comp_bdev, link)do { __typeof__(comp_bdev) _elm; if (((comp_bdev)->link.tqe_next ) != ((void*)0)) (comp_bdev)->link.tqe_next->link.tqe_prev = (comp_bdev)->link.tqe_prev; else (&g_vbdev_comp)-> tqh_last = (comp_bdev)->link.tqe_prev; *(comp_bdev)->link .tqe_prev = (comp_bdev)->link.tqe_next; for ((_elm) = ((& g_vbdev_comp)->tqh_first); (_elm); (_elm) = ((_elm)->link .tqe_next)) { ((void) sizeof ((_elm != comp_bdev) ? 1 : 0), __extension__ ({ if (_elm != comp_bdev) ; else __assert_fail ("_elm != comp_bdev" , "vbdev_compress.c", 385, __extension__ __PRETTY_FUNCTION__) ; })); } } while (0); |
386 | spdk_bdev_module_release_bdev(comp_bdev->base_bdev); |
387 | |
388 | if (comp_bdev->orphaned == false0) { |
389 | spdk_io_device_unregister(comp_bdev, _device_unregister_cb); |
390 | } else { |
391 | vbdev_compress_delete_done(comp_bdev->delete_ctx, 0); |
392 | _device_unregister_cb(comp_bdev); |
393 | } |
394 | } |
395 | |
396 | static void |
397 | vbdev_compress_destruct_cb(void *cb_arg, int reduce_errno) |
398 | { |
399 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)cb_arg; |
400 | |
401 | if (reduce_errno) { |
402 | SPDK_ERRLOG("number %d\n", reduce_errno)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 402, __func__, "number %d\n" , reduce_errno); |
403 | } else { |
404 | if (comp_bdev->thread && comp_bdev->thread != spdk_get_thread()) { |
405 | spdk_thread_send_msg(comp_bdev->thread, |
406 | _vbdev_compress_destruct_cb, comp_bdev); |
407 | } else { |
408 | _vbdev_compress_destruct_cb(comp_bdev); |
409 | } |
410 | } |
411 | } |
412 | |
413 | static void |
414 | _reduce_destroy_cb(void *ctx, int reduce_errno) |
415 | { |
416 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx; |
417 | |
418 | if (reduce_errno) { |
419 | SPDK_ERRLOG("number %d\n", reduce_errno)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 419, __func__, "number %d\n" , reduce_errno); |
420 | } |
421 | |
422 | comp_bdev->vol = NULL((void*)0); |
423 | spdk_put_io_channel(comp_bdev->base_ch); |
424 | if (comp_bdev->init_failed || comp_bdev->orphaned) { |
425 | vbdev_compress_destruct_cb((void *)comp_bdev, 0); |
426 | } else { |
427 | spdk_bdev_unregister(&comp_bdev->comp_bdev, vbdev_compress_delete_done, |
428 | comp_bdev->delete_ctx); |
429 | } |
430 | |
431 | } |
432 | |
433 | static void |
434 | _delete_vol_unload_cb(void *ctx) |
435 | { |
436 | struct vbdev_compress *comp_bdev = ctx; |
437 | |
438 | /* FIXME: Assert if these conditions are not satisfied for now. */ |
439 | assert(!comp_bdev->reduce_thread ||((void) sizeof ((!comp_bdev->reduce_thread || comp_bdev-> reduce_thread == spdk_get_thread()) ? 1 : 0), __extension__ ( { if (!comp_bdev->reduce_thread || comp_bdev->reduce_thread == spdk_get_thread()) ; else __assert_fail ("!comp_bdev->reduce_thread || comp_bdev->reduce_thread == spdk_get_thread()" , "vbdev_compress.c", 440, __extension__ __PRETTY_FUNCTION__) ; })) |
440 | comp_bdev->reduce_thread == spdk_get_thread())((void) sizeof ((!comp_bdev->reduce_thread || comp_bdev-> reduce_thread == spdk_get_thread()) ? 1 : 0), __extension__ ( { if (!comp_bdev->reduce_thread || comp_bdev->reduce_thread == spdk_get_thread()) ; else __assert_fail ("!comp_bdev->reduce_thread || comp_bdev->reduce_thread == spdk_get_thread()" , "vbdev_compress.c", 440, __extension__ __PRETTY_FUNCTION__) ; })); |
441 | |
442 | /* reducelib needs a channel to comm with the backing device */ |
443 | comp_bdev->base_ch = spdk_bdev_get_io_channel(comp_bdev->base_desc); |
444 | |
445 | /* Clean the device before we free our resources. */ |
446 | spdk_reduce_vol_destroy(&comp_bdev->backing_dev, _reduce_destroy_cb, comp_bdev); |
447 | } |
448 | |
449 | /* Called by reduceLib after performing unload vol actions */ |
450 | static void |
451 | delete_vol_unload_cb(void *cb_arg, int reduce_errno) |
452 | { |
453 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)cb_arg; |
454 | |
455 | if (reduce_errno) { |
456 | SPDK_ERRLOG("Failed to unload vol, error %s\n", spdk_strerror(-reduce_errno))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 456, __func__, "Failed to unload vol, error %s\n" , spdk_strerror(-reduce_errno)); |
457 | vbdev_compress_delete_done(comp_bdev->delete_ctx, reduce_errno); |
458 | return; |
459 | } |
460 | |
461 | pthread_mutex_lock(&comp_bdev->reduce_lock); |
462 | if (comp_bdev->reduce_thread && comp_bdev->reduce_thread != spdk_get_thread()) { |
463 | spdk_thread_send_msg(comp_bdev->reduce_thread, |
464 | _delete_vol_unload_cb, comp_bdev); |
465 | pthread_mutex_unlock(&comp_bdev->reduce_lock); |
466 | } else { |
467 | pthread_mutex_unlock(&comp_bdev->reduce_lock); |
468 | |
469 | _delete_vol_unload_cb(comp_bdev); |
470 | } |
471 | } |
472 | |
473 | const char * |
474 | compress_get_name(const struct vbdev_compress *comp_bdev) |
475 | { |
476 | return comp_bdev->comp_bdev.name; |
477 | } |
478 | |
479 | struct vbdev_compress * |
480 | compress_bdev_first(void) |
481 | { |
482 | struct vbdev_compress *comp_bdev; |
483 | |
484 | comp_bdev = TAILQ_FIRST(&g_vbdev_comp)((&g_vbdev_comp)->tqh_first); |
485 | |
486 | return comp_bdev; |
487 | } |
488 | |
489 | struct vbdev_compress * |
490 | compress_bdev_next(struct vbdev_compress *prev) |
491 | { |
492 | struct vbdev_compress *comp_bdev; |
493 | |
494 | comp_bdev = TAILQ_NEXT(prev, link)((prev)->link.tqe_next); |
495 | |
496 | return comp_bdev; |
497 | } |
498 | |
499 | bool_Bool |
500 | compress_has_orphan(const char *name) |
501 | { |
502 | struct vbdev_compress *comp_bdev; |
503 | |
504 | TAILQ_FOREACH(comp_bdev, &g_vbdev_comp, link)for ((comp_bdev) = ((&g_vbdev_comp)->tqh_first); (comp_bdev ); (comp_bdev) = ((comp_bdev)->link.tqe_next)) { |
505 | if (comp_bdev->orphaned && strcmp(name, comp_bdev->comp_bdev.name) == 0) { |
506 | return true1; |
507 | } |
508 | } |
509 | return false0; |
510 | } |
511 | |
512 | /* Called after we've unregistered following a hot remove callback. |
513 | * Our finish entry point will be called next. |
514 | */ |
515 | static int |
516 | vbdev_compress_destruct(void *ctx) |
517 | { |
518 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx; |
519 | |
520 | if (comp_bdev->vol != NULL((void*)0)) { |
521 | /* Tell reducelib that we're done with this volume. */ |
522 | spdk_reduce_vol_unload(comp_bdev->vol, vbdev_compress_destruct_cb, comp_bdev); |
523 | } else { |
524 | vbdev_compress_destruct_cb(comp_bdev, 0); |
525 | } |
526 | |
527 | return 0; |
528 | } |
529 | |
530 | /* We supplied this as an entry point for upper layers who want to communicate to this |
531 | * bdev. This is how they get a channel. |
532 | */ |
533 | static struct spdk_io_channel * |
534 | vbdev_compress_get_io_channel(void *ctx) |
535 | { |
536 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx; |
537 | |
538 | /* The IO channel code will allocate a channel for us which consists of |
539 | * the SPDK channel structure plus the size of our comp_io_channel struct |
540 | * that we passed in when we registered our IO device. It will then call |
541 | * our channel create callback to populate any elements that we need to |
542 | * update. |
543 | */ |
544 | return spdk_get_io_channel(comp_bdev); |
545 | } |
546 | |
547 | /* This is the output for bdev_get_bdevs() for this vbdev */ |
548 | static int |
549 | vbdev_compress_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) |
550 | { |
551 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx; |
552 | const struct spdk_reduce_vol_info *vol_info; |
553 | char *comp_algo = NULL((void*)0); |
554 | |
555 | if (comp_bdev->params.comp_algo == SPDK_ACCEL_COMP_ALGO_LZ4) { |
556 | comp_algo = "lz4"; |
557 | } else if (comp_bdev->params.comp_algo == SPDK_ACCEL_COMP_ALGO_DEFLATE) { |
558 | comp_algo = "deflate"; |
559 | } else { |
560 | assert(false)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail ("false", "vbdev_compress.c", 560, __extension__ __PRETTY_FUNCTION__ ); })); |
561 | } |
562 | |
563 | spdk_json_write_name(w, "compress"); |
564 | spdk_json_write_object_begin(w); |
565 | spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&comp_bdev->comp_bdev)); |
566 | spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(comp_bdev->base_bdev)); |
567 | spdk_json_write_named_string(w, "pm_path", spdk_reduce_vol_get_pm_path(comp_bdev->vol)); |
568 | spdk_json_write_named_string(w, "comp_algo", comp_algo); |
569 | spdk_json_write_named_uint32(w, "comp_level", comp_bdev->params.comp_level); |
570 | spdk_json_write_named_uint32(w, "chunk_size", comp_bdev->params.chunk_size); |
571 | spdk_json_write_named_uint32(w, "backing_io_unit_size", comp_bdev->params.backing_io_unit_size); |
572 | vol_info = spdk_reduce_vol_get_info(comp_bdev->vol); |
573 | spdk_json_write_named_uint64(w, "allocated_io_units", vol_info->allocated_io_units); |
574 | spdk_json_write_object_end(w); |
575 | |
576 | return 0; |
577 | } |
578 | |
579 | static int |
580 | vbdev_compress_config_json(struct spdk_json_write_ctx *w) |
581 | { |
582 | /* Nothing to dump as compress bdev configuration is saved on physical device. */ |
583 | return 0; |
584 | } |
585 | |
586 | struct vbdev_init_reduce_ctx { |
587 | struct vbdev_compress *comp_bdev; |
588 | int status; |
589 | bdev_compress_create_cb cb_fn; |
590 | void *cb_ctx; |
591 | }; |
592 | |
593 | static void |
594 | _cleanup_vol_unload_cb(void *ctx) |
595 | { |
596 | struct vbdev_compress *comp_bdev = ctx; |
597 | |
598 | assert(!comp_bdev->reduce_thread ||((void) sizeof ((!comp_bdev->reduce_thread || comp_bdev-> reduce_thread == spdk_get_thread()) ? 1 : 0), __extension__ ( { if (!comp_bdev->reduce_thread || comp_bdev->reduce_thread == spdk_get_thread()) ; else __assert_fail ("!comp_bdev->reduce_thread || comp_bdev->reduce_thread == spdk_get_thread()" , "vbdev_compress.c", 599, __extension__ __PRETTY_FUNCTION__) ; })) |
599 | comp_bdev->reduce_thread == spdk_get_thread())((void) sizeof ((!comp_bdev->reduce_thread || comp_bdev-> reduce_thread == spdk_get_thread()) ? 1 : 0), __extension__ ( { if (!comp_bdev->reduce_thread || comp_bdev->reduce_thread == spdk_get_thread()) ; else __assert_fail ("!comp_bdev->reduce_thread || comp_bdev->reduce_thread == spdk_get_thread()" , "vbdev_compress.c", 599, __extension__ __PRETTY_FUNCTION__) ; })); |
600 | |
601 | comp_bdev->base_ch = spdk_bdev_get_io_channel(comp_bdev->base_desc); |
602 | |
603 | spdk_reduce_vol_destroy(&comp_bdev->backing_dev, _reduce_destroy_cb, comp_bdev); |
604 | } |
605 | |
606 | static void |
607 | init_vol_unload_cb(void *ctx, int reduce_errno) |
608 | { |
609 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx; |
610 | |
611 | if (reduce_errno) { |
612 | SPDK_ERRLOG("Failed to unload vol, error %s\n", spdk_strerror(-reduce_errno))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 612, __func__, "Failed to unload vol, error %s\n" , spdk_strerror(-reduce_errno)); |
613 | } |
614 | |
615 | pthread_mutex_lock(&comp_bdev->reduce_lock); |
616 | if (comp_bdev->reduce_thread && comp_bdev->reduce_thread != spdk_get_thread()) { |
617 | spdk_thread_send_msg(comp_bdev->reduce_thread, |
618 | _cleanup_vol_unload_cb, comp_bdev); |
619 | pthread_mutex_unlock(&comp_bdev->reduce_lock); |
620 | } else { |
621 | pthread_mutex_unlock(&comp_bdev->reduce_lock); |
622 | |
623 | _cleanup_vol_unload_cb(comp_bdev); |
624 | } |
625 | } |
626 | |
627 | static void |
628 | _vbdev_reduce_init_cb(void *ctx) |
629 | { |
630 | struct vbdev_init_reduce_ctx *init_ctx = ctx; |
631 | struct vbdev_compress *comp_bdev = init_ctx->comp_bdev; |
632 | int rc = init_ctx->status; |
633 | |
634 | assert(comp_bdev->base_desc != NULL)((void) sizeof ((comp_bdev->base_desc != ((void*)0)) ? 1 : 0), __extension__ ({ if (comp_bdev->base_desc != ((void*) 0)) ; else __assert_fail ("comp_bdev->base_desc != NULL", "vbdev_compress.c" , 634, __extension__ __PRETTY_FUNCTION__); })); |
635 | |
636 | /* We're done with metadata operations */ |
637 | spdk_put_io_channel(comp_bdev->base_ch); |
638 | |
639 | if (rc != 0) { |
640 | goto err; |
641 | } |
642 | |
643 | assert(comp_bdev->vol != NULL)((void) sizeof ((comp_bdev->vol != ((void*)0)) ? 1 : 0), __extension__ ({ if (comp_bdev->vol != ((void*)0)) ; else __assert_fail ("comp_bdev->vol != NULL", "vbdev_compress.c", 643, __extension__ __PRETTY_FUNCTION__); })); |
644 | |
645 | rc = vbdev_compress_claim(comp_bdev); |
646 | if (rc != 0) { |
647 | comp_bdev->init_failed = true1; |
648 | spdk_reduce_vol_unload(comp_bdev->vol, init_vol_unload_cb, comp_bdev); |
649 | } |
650 | |
651 | init_ctx->cb_fn(init_ctx->cb_ctx, rc); |
652 | free(init_ctx); |
653 | return; |
654 | |
655 | err: |
656 | init_ctx->cb_fn(init_ctx->cb_ctx, rc); |
657 | /* Close the underlying bdev on its same opened thread. */ |
658 | spdk_bdev_close(comp_bdev->base_desc); |
659 | free(comp_bdev); |
660 | free(init_ctx); |
661 | } |
662 | |
663 | /* Callback from reduce for when init is complete. We'll pass the vbdev_comp struct |
664 | * used for initial metadata operations to claim where it will be further filled out |
665 | * and added to the global list. |
666 | */ |
667 | static void |
668 | vbdev_reduce_init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno) |
669 | { |
670 | struct vbdev_init_reduce_ctx *init_ctx = cb_arg; |
671 | struct vbdev_compress *comp_bdev = init_ctx->comp_bdev; |
672 | |
673 | if (reduce_errno == 0) { |
674 | comp_bdev->vol = vol; |
675 | } else { |
676 | SPDK_ERRLOG("for vol %s, error %s\n",spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 677, __func__, "for vol %s, error %s\n" , spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror( -reduce_errno)) |
677 | spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror(-reduce_errno))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 677, __func__, "for vol %s, error %s\n" , spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror( -reduce_errno)); |
678 | } |
679 | |
680 | init_ctx->status = reduce_errno; |
681 | |
682 | if (comp_bdev->thread && comp_bdev->thread != spdk_get_thread()) { |
683 | spdk_thread_send_msg(comp_bdev->thread, _vbdev_reduce_init_cb, init_ctx); |
684 | } else { |
685 | _vbdev_reduce_init_cb(init_ctx); |
686 | } |
687 | } |
688 | |
689 | /* Callback for the function used by reduceLib to perform IO to/from the backing device. We just |
690 | * call the callback provided by reduceLib when it called the read/write/unmap function and |
691 | * free the bdev_io. |
692 | */ |
693 | static void |
694 | comp_reduce_io_cb(struct spdk_bdev_io *bdev_io, bool_Bool success, void *arg) |
695 | { |
696 | struct spdk_reduce_vol_cb_args *cb_args = arg; |
697 | int reduce_errno; |
698 | |
699 | if (success) { |
700 | reduce_errno = 0; |
701 | } else { |
702 | reduce_errno = -EIO5; |
703 | } |
704 | spdk_bdev_free_io(bdev_io); |
705 | cb_args->cb_fn(cb_args->cb_arg, reduce_errno); |
706 | } |
707 | |
708 | static void |
709 | _comp_backing_bdev_queue_io_wait(struct vbdev_compress *comp_bdev, |
710 | struct spdk_reduce_backing_io *backing_io) |
711 | { |
712 | struct spdk_bdev_io_wait_entry *waitq_entry; |
713 | int rc; |
714 | |
715 | waitq_entry = (struct spdk_bdev_io_wait_entry *) &backing_io->user_ctx; |
716 | waitq_entry->bdev = spdk_bdev_desc_get_bdev(comp_bdev->base_desc); |
717 | waitq_entry->cb_fn = _comp_reduce_resubmit_backing_io; |
718 | waitq_entry->cb_arg = backing_io; |
719 | |
720 | rc = spdk_bdev_queue_io_wait(waitq_entry->bdev, comp_bdev->base_ch, waitq_entry); |
721 | if (rc) { |
722 | SPDK_ERRLOG("Queue io failed in _comp_backing_bdev_queue_io_wait, rc=%d.\n", rc)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 722, __func__, "Queue io failed in _comp_backing_bdev_queue_io_wait, rc=%d.\n" , rc); |
723 | assert(false)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail ("false", "vbdev_compress.c", 723, __extension__ __PRETTY_FUNCTION__ ); })); |
724 | backing_io->backing_cb_args->cb_fn(backing_io->backing_cb_args->cb_arg, rc); |
725 | } |
726 | } |
727 | |
728 | static void |
729 | _comp_backing_bdev_read(struct spdk_reduce_backing_io *backing_io) |
730 | { |
731 | struct spdk_reduce_vol_cb_args *backing_cb_args = backing_io->backing_cb_args; |
732 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(backing_io->dev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)backing_io->dev - __builtin_offsetof (struct vbdev_compress, backing_dev))) |
733 | backing_dev)((struct vbdev_compress *)((uintptr_t)backing_io->dev - __builtin_offsetof (struct vbdev_compress, backing_dev))); |
734 | int rc; |
735 | |
736 | rc = spdk_bdev_readv_blocks(comp_bdev->base_desc, comp_bdev->base_ch, |
737 | backing_io->iov, backing_io->iovcnt, |
738 | backing_io->lba, backing_io->lba_count, |
739 | comp_reduce_io_cb, |
740 | backing_cb_args); |
741 | |
742 | if (rc) { |
743 | if (rc == -ENOMEM12) { |
744 | _comp_backing_bdev_queue_io_wait(comp_bdev, backing_io); |
745 | return; |
746 | } else { |
747 | SPDK_ERRLOG("submitting readv request, rc=%d\n", rc)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 747, __func__, "submitting readv request, rc=%d\n" , rc); |
748 | } |
749 | backing_cb_args->cb_fn(backing_cb_args->cb_arg, rc); |
750 | } |
751 | } |
752 | |
753 | static void |
754 | _comp_backing_bdev_write(struct spdk_reduce_backing_io *backing_io) |
755 | { |
756 | struct spdk_reduce_vol_cb_args *backing_cb_args = backing_io->backing_cb_args; |
757 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(backing_io->dev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)backing_io->dev - __builtin_offsetof (struct vbdev_compress, backing_dev))) |
758 | backing_dev)((struct vbdev_compress *)((uintptr_t)backing_io->dev - __builtin_offsetof (struct vbdev_compress, backing_dev))); |
759 | int rc; |
760 | |
761 | rc = spdk_bdev_writev_blocks(comp_bdev->base_desc, comp_bdev->base_ch, |
762 | backing_io->iov, backing_io->iovcnt, |
763 | backing_io->lba, backing_io->lba_count, |
764 | comp_reduce_io_cb, |
765 | backing_cb_args); |
766 | |
767 | if (rc) { |
768 | if (rc == -ENOMEM12) { |
769 | _comp_backing_bdev_queue_io_wait(comp_bdev, backing_io); |
770 | return; |
771 | } else { |
772 | SPDK_ERRLOG("error submitting writev request, rc=%d\n", rc)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 772, __func__, "error submitting writev request, rc=%d\n" , rc); |
773 | } |
774 | backing_cb_args->cb_fn(backing_cb_args->cb_arg, rc); |
775 | } |
776 | } |
777 | |
778 | static void |
779 | _comp_backing_bdev_unmap(struct spdk_reduce_backing_io *backing_io) |
780 | { |
781 | struct spdk_reduce_vol_cb_args *backing_cb_args = backing_io->backing_cb_args; |
782 | struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(backing_io->dev, struct vbdev_compress,((struct vbdev_compress *)((uintptr_t)backing_io->dev - __builtin_offsetof (struct vbdev_compress, backing_dev))) |
783 | backing_dev)((struct vbdev_compress *)((uintptr_t)backing_io->dev - __builtin_offsetof (struct vbdev_compress, backing_dev))); |
784 | int rc; |
785 | |
786 | rc = spdk_bdev_unmap_blocks(comp_bdev->base_desc, comp_bdev->base_ch, |
787 | backing_io->lba, backing_io->lba_count, |
788 | comp_reduce_io_cb, |
789 | backing_cb_args); |
790 | |
791 | if (rc) { |
792 | if (rc == -ENOMEM12) { |
793 | _comp_backing_bdev_queue_io_wait(comp_bdev, backing_io); |
794 | return; |
795 | } else { |
796 | SPDK_ERRLOG("submitting unmap request, rc=%d\n", rc)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 796, __func__, "submitting unmap request, rc=%d\n" , rc); |
797 | } |
798 | backing_cb_args->cb_fn(backing_cb_args->cb_arg, rc); |
799 | } |
800 | } |
801 | |
802 | /* This is the function provided to the reduceLib for sending reads/writes/unmaps |
803 | * directly to the backing device. |
804 | */ |
805 | static void |
806 | _comp_reduce_submit_backing_io(struct spdk_reduce_backing_io *backing_io) |
807 | { |
808 | switch (backing_io->backing_io_type) { |
809 | case SPDK_REDUCE_BACKING_IO_WRITE: |
810 | _comp_backing_bdev_write(backing_io); |
811 | break; |
812 | case SPDK_REDUCE_BACKING_IO_READ: |
813 | _comp_backing_bdev_read(backing_io); |
814 | break; |
815 | case SPDK_REDUCE_BACKING_IO_UNMAP: |
816 | _comp_backing_bdev_unmap(backing_io); |
817 | break; |
818 | default: |
819 | SPDK_ERRLOG("Unknown I/O type %d\n", backing_io->backing_io_type)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 819, __func__, "Unknown I/O type %d\n" , backing_io->backing_io_type); |
820 | backing_io->backing_cb_args->cb_fn(backing_io->backing_cb_args->cb_arg, -EINVAL22); |
821 | break; |
822 | } |
823 | } |
824 | |
825 | static void |
826 | _comp_reduce_resubmit_backing_io(void *_backing_io) |
827 | { |
828 | struct spdk_reduce_backing_io *backing_io = _backing_io; |
829 | |
830 | _comp_reduce_submit_backing_io(backing_io); |
831 | } |
832 | |
833 | /* Called by reduceLib after performing unload vol actions following base bdev hotremove */ |
834 | static void |
835 | bdev_hotremove_vol_unload_cb(void *cb_arg, int reduce_errno) |
836 | { |
837 | struct vbdev_compress *comp_bdev = (struct vbdev_compress *)cb_arg; |
838 | |
839 | if (reduce_errno) { |
840 | SPDK_ERRLOG("number %d\n", reduce_errno)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 840, __func__, "number %d\n" , reduce_errno); |
841 | } |
842 | |
843 | comp_bdev->vol = NULL((void*)0); |
844 | spdk_bdev_unregister(&comp_bdev->comp_bdev, NULL((void*)0), NULL((void*)0)); |
845 | } |
846 | |
847 | static void |
848 | vbdev_compress_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find) |
849 | { |
850 | struct vbdev_compress *comp_bdev, *tmp; |
851 | |
852 | TAILQ_FOREACH_SAFE(comp_bdev, &g_vbdev_comp, link, tmp)for ((comp_bdev) = (((&g_vbdev_comp))->tqh_first); (comp_bdev ) && ((tmp) = (((comp_bdev))->link.tqe_next), 1); ( comp_bdev) = (tmp)) { |
853 | if (bdev_find == comp_bdev->base_bdev) { |
854 | /* Tell reduceLib that we're done with this volume. */ |
855 | spdk_reduce_vol_unload(comp_bdev->vol, bdev_hotremove_vol_unload_cb, comp_bdev); |
856 | } |
857 | } |
858 | } |
859 | |
860 | /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */ |
861 | static void |
862 | vbdev_compress_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, |
863 | void *event_ctx) |
864 | { |
865 | switch (type) { |
866 | case SPDK_BDEV_EVENT_REMOVE: |
867 | vbdev_compress_base_bdev_hotremove_cb(bdev); |
868 | break; |
869 | default: |
870 | SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type)spdk_log(SPDK_LOG_NOTICE, "vbdev_compress.c", 870, __func__, "Unsupported bdev event: type %d\n" , type); |
871 | break; |
872 | } |
873 | } |
874 | |
875 | /* TODO: determine which parms we want user configurable, HC for now |
876 | * params.vol_size |
877 | * params.chunk_size |
878 | * compression PMD, algorithm, window size, comp level, etc. |
879 | * DEV_MD_PATH |
880 | */ |
881 | |
882 | /* Common function for init and load to allocate and populate the minimal |
883 | * information for reducelib to init or load. |
884 | */ |
885 | struct vbdev_compress * |
886 | _prepare_for_load_init(struct spdk_bdev_desc *bdev_desc, uint32_t lb_size, uint8_t comp_algo, |
887 | uint32_t comp_level) |
888 | { |
889 | struct vbdev_compress *comp_bdev; |
890 | struct spdk_bdev *bdev; |
891 | |
892 | comp_bdev = calloc(1, sizeof(struct vbdev_compress)); |
893 | if (comp_bdev == NULL((void*)0)) { |
894 | SPDK_ERRLOG("failed to alloc comp_bdev\n")spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 894, __func__, "failed to alloc comp_bdev\n" ); |
895 | return NULL((void*)0); |
896 | } |
897 | |
898 | comp_bdev->backing_dev.submit_backing_io = _comp_reduce_submit_backing_io; |
899 | comp_bdev->backing_dev.compress = _comp_reduce_compress; |
900 | comp_bdev->backing_dev.decompress = _comp_reduce_decompress; |
901 | |
902 | comp_bdev->base_desc = bdev_desc; |
903 | bdev = spdk_bdev_desc_get_bdev(bdev_desc); |
904 | comp_bdev->base_bdev = bdev; |
905 | |
906 | comp_bdev->backing_dev.blocklen = bdev->blocklen; |
907 | comp_bdev->backing_dev.blockcnt = bdev->blockcnt; |
908 | |
909 | comp_bdev->backing_dev.user_ctx_size = sizeof(struct spdk_bdev_io_wait_entry); |
910 | |
911 | comp_bdev->comp_algo = comp_algo; |
912 | comp_bdev->comp_level = comp_level; |
913 | comp_bdev->params.comp_algo = comp_algo; |
914 | comp_bdev->params.comp_level = comp_level; |
915 | comp_bdev->params.chunk_size = CHUNK_SIZE(1024 * 16); |
916 | if (lb_size == 0) { |
917 | comp_bdev->params.logical_block_size = bdev->blocklen; |
918 | } else { |
919 | comp_bdev->params.logical_block_size = lb_size; |
920 | } |
921 | |
922 | comp_bdev->params.backing_io_unit_size = BACKING_IO_SZ(4 * 1024); |
923 | return comp_bdev; |
924 | } |
925 | |
926 | /* Call reducelib to initialize a new volume */ |
927 | static int |
928 | vbdev_init_reduce(const char *bdev_name, const char *pm_path, uint32_t lb_size, uint8_t comp_algo, |
929 | uint32_t comp_level, bdev_compress_create_cb cb_fn, void *cb_arg) |
930 | { |
931 | struct spdk_bdev_desc *bdev_desc = NULL((void*)0); |
932 | struct vbdev_init_reduce_ctx *init_ctx; |
933 | struct vbdev_compress *comp_bdev; |
934 | int rc; |
935 | |
936 | init_ctx = calloc(1, sizeof(*init_ctx)); |
937 | if (init_ctx == NULL((void*)0)) { |
938 | SPDK_ERRLOG("failed to alloc init contexts\n")spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 938, __func__, "failed to alloc init contexts\n" ); |
939 | return - ENOMEM12; |
940 | } |
941 | |
942 | init_ctx->cb_fn = cb_fn; |
943 | init_ctx->cb_ctx = cb_arg; |
944 | |
945 | rc = spdk_bdev_open_ext(bdev_name, true1, vbdev_compress_base_bdev_event_cb, |
946 | NULL((void*)0), &bdev_desc); |
947 | if (rc) { |
948 | SPDK_ERRLOG("could not open bdev %s, error %s\n", bdev_name, spdk_strerror(-rc))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 948, __func__, "could not open bdev %s, error %s\n" , bdev_name, spdk_strerror(-rc)); |
949 | free(init_ctx); |
950 | return rc; |
951 | } |
952 | |
953 | comp_bdev = _prepare_for_load_init(bdev_desc, lb_size, comp_algo, comp_level); |
954 | if (comp_bdev == NULL((void*)0)) { |
955 | free(init_ctx); |
956 | spdk_bdev_close(bdev_desc); |
957 | return -EINVAL22; |
958 | } |
959 | |
960 | init_ctx->comp_bdev = comp_bdev; |
961 | |
962 | /* Save the thread where the base device is opened */ |
963 | comp_bdev->thread = spdk_get_thread(); |
964 | |
965 | comp_bdev->base_ch = spdk_bdev_get_io_channel(comp_bdev->base_desc); |
966 | |
967 | spdk_reduce_vol_init(&comp_bdev->params, &comp_bdev->backing_dev, |
968 | pm_path, |
969 | vbdev_reduce_init_cb, |
970 | init_ctx); |
971 | return 0; |
972 | } |
973 | |
974 | /* We provide this callback for the SPDK channel code to create a channel using |
975 | * the channel struct we provided in our module get_io_channel() entry point. Here |
976 | * we get and save off an underlying base channel of the device below us so that |
977 | * we can communicate with the base bdev on a per channel basis. If we needed |
978 | * our own poller for this vbdev, we'd register it here. |
979 | */ |
980 | static int |
981 | comp_bdev_ch_create_cb(void *io_device, void *ctx_buf) |
982 | { |
983 | struct vbdev_compress *comp_bdev = io_device; |
984 | |
985 | /* Now set the reduce channel if it's not already set. */ |
986 | pthread_mutex_lock(&comp_bdev->reduce_lock); |
987 | if (comp_bdev->ch_count == 0) { |
988 | /* We use this queue to track outstanding IO in our layer. */ |
989 | TAILQ_INIT(&comp_bdev->pending_comp_ios)do { (&comp_bdev->pending_comp_ios)->tqh_first = (( void*)0); (&comp_bdev->pending_comp_ios)->tqh_last = &(&comp_bdev->pending_comp_ios)->tqh_first; } while ( 0); |
990 | |
991 | /* We use this to queue up compression operations as needed. */ |
992 | TAILQ_INIT(&comp_bdev->queued_comp_ops)do { (&comp_bdev->queued_comp_ops)->tqh_first = ((void *)0); (&comp_bdev->queued_comp_ops)->tqh_last = & (&comp_bdev->queued_comp_ops)->tqh_first; } while ( 0); |
993 | |
994 | comp_bdev->base_ch = spdk_bdev_get_io_channel(comp_bdev->base_desc); |
995 | comp_bdev->reduce_thread = spdk_get_thread(); |
996 | comp_bdev->accel_channel = spdk_accel_get_io_channel(); |
997 | } |
998 | comp_bdev->ch_count++; |
999 | pthread_mutex_unlock(&comp_bdev->reduce_lock); |
1000 | |
1001 | return 0; |
1002 | } |
1003 | |
1004 | static void |
1005 | _channel_cleanup(struct vbdev_compress *comp_bdev) |
1006 | { |
1007 | spdk_put_io_channel(comp_bdev->base_ch); |
1008 | spdk_put_io_channel(comp_bdev->accel_channel); |
1009 | comp_bdev->reduce_thread = NULL((void*)0); |
1010 | } |
1011 | |
1012 | /* Used to reroute destroy_ch to the correct thread */ |
1013 | static void |
1014 | _comp_bdev_ch_destroy_cb(void *arg) |
1015 | { |
1016 | struct vbdev_compress *comp_bdev = arg; |
1017 | |
1018 | pthread_mutex_lock(&comp_bdev->reduce_lock); |
1019 | _channel_cleanup(comp_bdev); |
1020 | pthread_mutex_unlock(&comp_bdev->reduce_lock); |
1021 | } |
1022 | |
1023 | /* We provide this callback for the SPDK channel code to destroy a channel |
1024 | * created with our create callback. We just need to undo anything we did |
1025 | * when we created. If this bdev used its own poller, we'd unregister it here. |
1026 | */ |
1027 | static void |
1028 | comp_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) |
1029 | { |
1030 | struct vbdev_compress *comp_bdev = io_device; |
1031 | |
1032 | pthread_mutex_lock(&comp_bdev->reduce_lock); |
1033 | comp_bdev->ch_count--; |
1034 | if (comp_bdev->ch_count == 0) { |
1035 | /* Send this request to the thread where the channel was created. */ |
1036 | if (comp_bdev->reduce_thread != spdk_get_thread()) { |
1037 | spdk_thread_send_msg(comp_bdev->reduce_thread, |
1038 | _comp_bdev_ch_destroy_cb, comp_bdev); |
1039 | } else { |
1040 | _channel_cleanup(comp_bdev); |
1041 | } |
1042 | } |
1043 | pthread_mutex_unlock(&comp_bdev->reduce_lock); |
1044 | } |
1045 | |
1046 | static int |
1047 | _check_compress_bdev_comp_algo(enum spdk_accel_comp_algo algo, uint32_t comp_level) |
1048 | { |
1049 | uint32_t min_level, max_level; |
1050 | int rc; |
1051 | |
1052 | rc = spdk_accel_get_compress_level_range(algo, &min_level, &max_level); |
1053 | if (rc != 0) { |
1054 | return rc; |
1055 | } |
1056 | |
1057 | /* If both min_level and max_level are 0, the compression level can be ignored. |
1058 | * The back-end implementation hardcodes the compression level. |
1059 | */ |
1060 | if (min_level == 0 && max_level == 0) { |
1061 | return 0; |
1062 | } |
1063 | |
1064 | if (comp_level > max_level || comp_level < min_level) { |
1065 | return -EINVAL22; |
1066 | } |
1067 | |
1068 | return 0; |
1069 | } |
1070 | |
1071 | /* RPC entry point for compression vbdev creation. */ |
1072 | int |
1073 | create_compress_bdev(const char *bdev_name, const char *pm_path, uint32_t lb_size, |
1074 | uint8_t comp_algo, uint32_t comp_level, |
1075 | bdev_compress_create_cb cb_fn, void *cb_arg) |
1076 | { |
1077 | struct vbdev_compress *comp_bdev = NULL((void*)0); |
1078 | struct stat info; |
1079 | int rc; |
1080 | |
1081 | if (stat(pm_path, &info) != 0) { |
1082 | SPDK_ERRLOG("PM path %s does not exist.\n", pm_path)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1082, __func__, "PM path %s does not exist.\n" , pm_path); |
1083 | return -EINVAL22; |
1084 | } else if (!S_ISDIR(info.st_mode)((((info.st_mode)) & 0170000) == (0040000))) { |
1085 | SPDK_ERRLOG("PM path %s is not a directory.\n", pm_path)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1085, __func__, "PM path %s is not a directory.\n" , pm_path); |
1086 | return -EINVAL22; |
1087 | } |
1088 | |
1089 | if ((lb_size != 0) && (lb_size != LB_SIZE_4K0x1000UL) && (lb_size != LB_SIZE_512B0x200UL)) { |
1090 | SPDK_ERRLOG("Logical block size must be 512 or 4096\n")spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1090, __func__, "Logical block size must be 512 or 4096\n" ); |
1091 | return -EINVAL22; |
1092 | } |
1093 | |
1094 | rc = _check_compress_bdev_comp_algo(comp_algo, comp_level); |
1095 | if (rc != 0) { |
1096 | SPDK_ERRLOG("Compress bdev doesn't support compression algo(%u) or level(%u)\n",spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1097, __func__, "Compress bdev doesn't support compression algo(%u) or level(%u)\n" , comp_algo, comp_level) |
1097 | comp_algo, comp_level)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1097, __func__, "Compress bdev doesn't support compression algo(%u) or level(%u)\n" , comp_algo, comp_level); |
1098 | return rc; |
1099 | } |
1100 | |
1101 | TAILQ_FOREACH(comp_bdev, &g_vbdev_comp, link)for ((comp_bdev) = ((&g_vbdev_comp)->tqh_first); (comp_bdev ); (comp_bdev) = ((comp_bdev)->link.tqe_next)) { |
1102 | if (strcmp(bdev_name, comp_bdev->base_bdev->name) == 0) { |
1103 | SPDK_ERRLOG("Bass bdev %s already being used for a compress bdev\n", bdev_name)spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1103, __func__, "Bass bdev %s already being used for a compress bdev\n" , bdev_name); |
1104 | return -EBUSY16; |
1105 | } |
1106 | } |
1107 | return vbdev_init_reduce(bdev_name, pm_path, lb_size, comp_algo, comp_level, cb_fn, cb_arg); |
1108 | } |
1109 | |
1110 | static int |
1111 | vbdev_compress_init(void) |
1112 | { |
1113 | return 0; |
1114 | } |
1115 | |
1116 | /* Called when the entire module is being torn down. */ |
1117 | static void |
1118 | vbdev_compress_finish(void) |
1119 | { |
1120 | /* TODO: unload vol in a future patch */ |
1121 | } |
1122 | |
1123 | /* During init we'll be asked how much memory we'd like passed to us |
1124 | * in bev_io structures as context. Here's where we specify how |
1125 | * much context we want per IO. |
1126 | */ |
1127 | static int |
1128 | vbdev_compress_get_ctx_size(void) |
1129 | { |
1130 | return sizeof(struct comp_bdev_io); |
1131 | } |
1132 | |
1133 | /* When we register our bdev this is how we specify our entry points. */ |
1134 | static const struct spdk_bdev_fn_table vbdev_compress_fn_table = { |
1135 | .destruct = vbdev_compress_destruct, |
1136 | .submit_request = vbdev_compress_submit_request, |
1137 | .io_type_supported = vbdev_compress_io_type_supported, |
1138 | .get_io_channel = vbdev_compress_get_io_channel, |
1139 | .dump_info_json = vbdev_compress_dump_info_json, |
1140 | .write_config_json = NULL((void*)0), |
1141 | }; |
1142 | |
1143 | static struct spdk_bdev_module compress_if = { |
1144 | .name = "compress", |
1145 | .module_init = vbdev_compress_init, |
1146 | .get_ctx_size = vbdev_compress_get_ctx_size, |
1147 | .examine_disk = vbdev_compress_examine, |
1148 | .module_fini = vbdev_compress_finish, |
1149 | .config_json = vbdev_compress_config_json |
1150 | }; |
1151 | |
1152 | SPDK_BDEV_MODULE_REGISTER(compress, &compress_if)static void __attribute__((constructor)) _spdk_bdev_module_register_compress (void) { spdk_bdev_module_list_add(&compress_if); } |
1153 | |
1154 | static int _set_compbdev_name(struct vbdev_compress *comp_bdev) |
1155 | { |
1156 | struct spdk_bdev_alias *aliases; |
1157 | |
1158 | if (!TAILQ_EMPTY(spdk_bdev_get_aliases(comp_bdev->base_bdev))((spdk_bdev_get_aliases(comp_bdev->base_bdev))->tqh_first == ((void*)0))) { |
1159 | aliases = TAILQ_FIRST(spdk_bdev_get_aliases(comp_bdev->base_bdev))((spdk_bdev_get_aliases(comp_bdev->base_bdev))->tqh_first ); |
1160 | comp_bdev->comp_bdev.name = spdk_sprintf_alloc("COMP_%s", aliases->alias.name); |
1161 | if (!comp_bdev->comp_bdev.name) { |
1162 | SPDK_ERRLOG("could not allocate comp_bdev name for alias\n")spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1162, __func__, "could not allocate comp_bdev name for alias\n" ); |
1163 | return -ENOMEM12; |
1164 | } |
1165 | } else { |
1166 | comp_bdev->comp_bdev.name = spdk_sprintf_alloc("COMP_%s", comp_bdev->base_bdev->name); |
1167 | if (!comp_bdev->comp_bdev.name) { |
1168 | SPDK_ERRLOG("could not allocate comp_bdev name for unique name\n")spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1168, __func__, "could not allocate comp_bdev name for unique name\n" ); |
1169 | return -ENOMEM12; |
1170 | } |
1171 | } |
1172 | return 0; |
1173 | } |
1174 | |
1175 | static int |
1176 | vbdev_compress_claim(struct vbdev_compress *comp_bdev) |
1177 | { |
1178 | struct spdk_uuid ns_uuid; |
1179 | int rc; |
1180 | |
1181 | if (_set_compbdev_name(comp_bdev)) { |
1182 | return -EINVAL22; |
1183 | } |
1184 | |
1185 | /* Note: some of the fields below will change in the future - for example, |
1186 | * blockcnt specifically will not match (the compressed volume size will |
1187 | * be slightly less than the base bdev size) |
1188 | */ |
1189 | comp_bdev->comp_bdev.product_name = COMP_BDEV_NAME"compress"; |
1190 | comp_bdev->comp_bdev.write_cache = comp_bdev->base_bdev->write_cache; |
1191 | |
1192 | comp_bdev->comp_bdev.optimal_io_boundary = |
1193 | comp_bdev->params.chunk_size / comp_bdev->params.logical_block_size; |
1194 | |
1195 | comp_bdev->comp_bdev.split_on_optimal_io_boundary = true1; |
1196 | |
1197 | comp_bdev->comp_bdev.blocklen = comp_bdev->params.logical_block_size; |
1198 | comp_bdev->comp_bdev.blockcnt = comp_bdev->params.vol_size / comp_bdev->comp_bdev.blocklen; |
1199 | assert(comp_bdev->comp_bdev.blockcnt > 0)((void) sizeof ((comp_bdev->comp_bdev.blockcnt > 0) ? 1 : 0), __extension__ ({ if (comp_bdev->comp_bdev.blockcnt > 0) ; else __assert_fail ("comp_bdev->comp_bdev.blockcnt > 0" , "vbdev_compress.c", 1199, __extension__ __PRETTY_FUNCTION__ ); })); |
1200 | |
1201 | /* This is the context that is passed to us when the bdev |
1202 | * layer calls in so we'll save our comp_bdev node here. |
1203 | */ |
1204 | comp_bdev->comp_bdev.ctxt = comp_bdev; |
1205 | comp_bdev->comp_bdev.fn_table = &vbdev_compress_fn_table; |
1206 | comp_bdev->comp_bdev.module = &compress_if; |
1207 | |
1208 | /* Generate UUID based on namespace UUID + base bdev UUID. */ |
1209 | spdk_uuid_parse(&ns_uuid, BDEV_COMPRESS_NAMESPACE_UUID"c3fad6da-832f-4cc0-9cdc-5c552b225e7b"); |
1210 | rc = spdk_uuid_generate_sha1(&comp_bdev->comp_bdev.uuid, &ns_uuid, |
1211 | (const char *)&comp_bdev->base_bdev->uuid, sizeof(struct spdk_uuid)); |
1212 | if (rc) { |
1213 | SPDK_ERRLOG("Unable to generate new UUID for compress bdev, error %s\n", spdk_strerror(-rc))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1213, __func__, "Unable to generate new UUID for compress bdev, error %s\n" , spdk_strerror(-rc)); |
1214 | return -EINVAL22; |
1215 | } |
1216 | |
1217 | pthread_mutex_init(&comp_bdev->reduce_lock, NULL((void*)0)); |
1218 | |
1219 | /* Save the thread where the base device is opened */ |
1220 | comp_bdev->thread = spdk_get_thread(); |
1221 | |
1222 | spdk_io_device_register(comp_bdev, comp_bdev_ch_create_cb, comp_bdev_ch_destroy_cb, |
1223 | sizeof(struct comp_io_channel), |
1224 | comp_bdev->comp_bdev.name); |
1225 | |
1226 | rc = spdk_bdev_module_claim_bdev(comp_bdev->base_bdev, comp_bdev->base_desc, |
1227 | comp_bdev->comp_bdev.module); |
1228 | if (rc) { |
1229 | SPDK_ERRLOG("could not claim bdev %s, error %s\n", spdk_bdev_get_name(comp_bdev->base_bdev),spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1230, __func__, "could not claim bdev %s, error %s\n" , spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror( -rc)) |
1230 | spdk_strerror(-rc))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1230, __func__, "could not claim bdev %s, error %s\n" , spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror( -rc)); |
1231 | goto error_claim; |
1232 | } |
1233 | |
1234 | rc = spdk_bdev_register(&comp_bdev->comp_bdev); |
1235 | if (rc < 0) { |
1236 | SPDK_ERRLOG("trying to register bdev, error %s\n", spdk_strerror(-rc))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1236, __func__, "trying to register bdev, error %s\n" , spdk_strerror(-rc)); |
1237 | goto error_bdev_register; |
1238 | } |
1239 | |
1240 | TAILQ_INSERT_TAIL(&g_vbdev_comp, comp_bdev, link)do { (comp_bdev)->link.tqe_next = ((void*)0); (comp_bdev)-> link.tqe_prev = (&g_vbdev_comp)->tqh_last; *(&g_vbdev_comp )->tqh_last = (comp_bdev); (&g_vbdev_comp)->tqh_last = &(comp_bdev)->link.tqe_next; } while ( 0); |
1241 | |
1242 | SPDK_NOTICELOG("registered io_device and virtual bdev for: %s\n", comp_bdev->comp_bdev.name)spdk_log(SPDK_LOG_NOTICE, "vbdev_compress.c", 1242, __func__, "registered io_device and virtual bdev for: %s\n", comp_bdev ->comp_bdev.name); |
1243 | |
1244 | return 0; |
1245 | |
1246 | /* Error cleanup paths. */ |
1247 | error_bdev_register: |
1248 | spdk_bdev_module_release_bdev(comp_bdev->base_bdev); |
1249 | error_claim: |
1250 | spdk_io_device_unregister(comp_bdev, NULL((void*)0)); |
1251 | free(comp_bdev->comp_bdev.name); |
1252 | return rc; |
1253 | } |
1254 | |
1255 | static void |
1256 | _vbdev_compress_delete_done(void *_ctx) |
1257 | { |
1258 | struct vbdev_comp_delete_ctx *ctx = _ctx; |
1259 | |
1260 | ctx->cb_fn(ctx->cb_arg, ctx->cb_rc); |
1261 | |
1262 | free(ctx); |
1263 | } |
1264 | |
1265 | static void |
1266 | vbdev_compress_delete_done(void *cb_arg, int bdeverrno) |
1267 | { |
1268 | struct vbdev_comp_delete_ctx *ctx = cb_arg; |
1269 | |
1270 | ctx->cb_rc = bdeverrno; |
1271 | |
1272 | if (ctx->orig_thread != spdk_get_thread()) { |
1273 | spdk_thread_send_msg(ctx->orig_thread, _vbdev_compress_delete_done, ctx); |
1274 | } else { |
1275 | _vbdev_compress_delete_done(ctx); |
1276 | } |
1277 | } |
1278 | |
1279 | void |
1280 | bdev_compress_delete(const char *name, spdk_delete_compress_complete cb_fn, void *cb_arg) |
1281 | { |
1282 | struct vbdev_compress *comp_bdev = NULL((void*)0); |
1283 | struct vbdev_comp_delete_ctx *ctx; |
1284 | |
1285 | TAILQ_FOREACH(comp_bdev, &g_vbdev_comp, link)for ((comp_bdev) = ((&g_vbdev_comp)->tqh_first); (comp_bdev ); (comp_bdev) = ((comp_bdev)->link.tqe_next)) { |
1286 | if (strcmp(name, comp_bdev->comp_bdev.name) == 0) { |
1287 | break; |
1288 | } |
1289 | } |
1290 | |
1291 | if (comp_bdev == NULL((void*)0)) { |
1292 | cb_fn(cb_arg, -ENODEV19); |
1293 | return; |
1294 | } |
1295 | |
1296 | ctx = calloc(1, sizeof(*ctx)); |
1297 | if (ctx == NULL((void*)0)) { |
1298 | SPDK_ERRLOG("Failed to allocate delete context\n")spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1298, __func__, "Failed to allocate delete context\n" ); |
1299 | cb_fn(cb_arg, -ENOMEM12); |
1300 | return; |
1301 | } |
1302 | |
1303 | /* Save these for after the vol is destroyed. */ |
1304 | ctx->cb_fn = cb_fn; |
1305 | ctx->cb_arg = cb_arg; |
1306 | ctx->orig_thread = spdk_get_thread(); |
1307 | |
1308 | comp_bdev->delete_ctx = ctx; |
1309 | |
1310 | /* Tell reducelib that we're done with this volume. */ |
1311 | if (comp_bdev->orphaned == false0) { |
1312 | spdk_reduce_vol_unload(comp_bdev->vol, delete_vol_unload_cb, comp_bdev); |
1313 | } else { |
1314 | delete_vol_unload_cb(comp_bdev, 0); |
1315 | } |
1316 | } |
1317 | |
1318 | static void |
1319 | _vbdev_reduce_load_unload_cb(void *ctx, int reduce_errno) |
1320 | { |
1321 | } |
1322 | |
1323 | static void |
1324 | _vbdev_reduce_load_cb(void *ctx) |
1325 | { |
1326 | struct vbdev_compress *comp_bdev = ctx; |
1327 | int rc; |
1328 | |
1329 | assert(comp_bdev->base_desc != NULL)((void) sizeof ((comp_bdev->base_desc != ((void*)0)) ? 1 : 0), __extension__ ({ if (comp_bdev->base_desc != ((void*) 0)) ; else __assert_fail ("comp_bdev->base_desc != NULL", "vbdev_compress.c" , 1329, __extension__ __PRETTY_FUNCTION__); })); |
1330 | |
1331 | /* Done with metadata operations */ |
1332 | spdk_put_io_channel(comp_bdev->base_ch); |
1333 | |
1334 | if (comp_bdev->reduce_errno == 0) { |
1335 | rc = vbdev_compress_claim(comp_bdev); |
1336 | if (rc != 0) { |
1337 | spdk_reduce_vol_unload(comp_bdev->vol, _vbdev_reduce_load_unload_cb, NULL((void*)0)); |
1338 | goto err; |
1339 | } |
1340 | } else if (comp_bdev->reduce_errno == -ENOENT2) { |
1341 | if (_set_compbdev_name(comp_bdev)) { |
1342 | goto err; |
1343 | } |
1344 | |
1345 | /* Save the thread where the base device is opened */ |
1346 | comp_bdev->thread = spdk_get_thread(); |
1347 | |
1348 | comp_bdev->comp_bdev.module = &compress_if; |
1349 | pthread_mutex_init(&comp_bdev->reduce_lock, NULL((void*)0)); |
1350 | rc = spdk_bdev_module_claim_bdev(comp_bdev->base_bdev, comp_bdev->base_desc, |
1351 | comp_bdev->comp_bdev.module); |
1352 | if (rc) { |
1353 | SPDK_ERRLOG("could not claim bdev %s, error %s\n", spdk_bdev_get_name(comp_bdev->base_bdev),spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1354, __func__, "could not claim bdev %s, error %s\n" , spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror( -rc)) |
1354 | spdk_strerror(-rc))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1354, __func__, "could not claim bdev %s, error %s\n" , spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror( -rc)); |
1355 | free(comp_bdev->comp_bdev.name); |
1356 | goto err; |
1357 | } |
1358 | |
1359 | comp_bdev->orphaned = true1; |
1360 | TAILQ_INSERT_TAIL(&g_vbdev_comp, comp_bdev, link)do { (comp_bdev)->link.tqe_next = ((void*)0); (comp_bdev)-> link.tqe_prev = (&g_vbdev_comp)->tqh_last; *(&g_vbdev_comp )->tqh_last = (comp_bdev); (&g_vbdev_comp)->tqh_last = &(comp_bdev)->link.tqe_next; } while ( 0); |
1361 | } else { |
1362 | if (comp_bdev->reduce_errno != -EILSEQ84) { |
1363 | SPDK_ERRLOG("for vol %s, error %s\n", spdk_bdev_get_name(comp_bdev->base_bdev),spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1364, __func__, "for vol %s, error %s\n" , spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror( -comp_bdev->reduce_errno)) |
1364 | spdk_strerror(-comp_bdev->reduce_errno))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1364, __func__, "for vol %s, error %s\n" , spdk_bdev_get_name(comp_bdev->base_bdev), spdk_strerror( -comp_bdev->reduce_errno)); |
1365 | } |
1366 | goto err; |
1367 | } |
1368 | |
1369 | spdk_bdev_module_examine_done(&compress_if); |
1370 | return; |
1371 | |
1372 | err: |
1373 | /* Close the underlying bdev on its same opened thread. */ |
1374 | spdk_bdev_close(comp_bdev->base_desc); |
1375 | free(comp_bdev); |
1376 | spdk_bdev_module_examine_done(&compress_if); |
1377 | } |
1378 | |
1379 | /* Callback from reduce for then load is complete. We'll pass the vbdev_comp struct |
1380 | * used for initial metadata operations to claim where it will be further filled out |
1381 | * and added to the global list. |
1382 | */ |
1383 | static void |
1384 | vbdev_reduce_load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno) |
1385 | { |
1386 | struct vbdev_compress *comp_bdev = cb_arg; |
1387 | |
1388 | if (reduce_errno == 0) { |
1389 | /* Update information following volume load. */ |
1390 | comp_bdev->vol = vol; |
1391 | memcpy(&comp_bdev->params, spdk_reduce_vol_get_params(vol), |
1392 | sizeof(struct spdk_reduce_vol_params)); |
1393 | comp_bdev->comp_algo = comp_bdev->params.comp_algo; |
1394 | comp_bdev->comp_level = comp_bdev->params.comp_level; |
1395 | } |
1396 | |
1397 | comp_bdev->reduce_errno = reduce_errno; |
1398 | |
1399 | if (comp_bdev->thread && comp_bdev->thread != spdk_get_thread()) { |
1400 | spdk_thread_send_msg(comp_bdev->thread, _vbdev_reduce_load_cb, comp_bdev); |
1401 | } else { |
1402 | _vbdev_reduce_load_cb(comp_bdev); |
1403 | } |
1404 | |
1405 | } |
1406 | |
1407 | /* Examine_disk entry point: will do a metadata load to see if this is ours, |
1408 | * and if so will go ahead and claim it. |
1409 | */ |
1410 | static void |
1411 | vbdev_compress_examine(struct spdk_bdev *bdev) |
1412 | { |
1413 | struct spdk_bdev_desc *bdev_desc = NULL((void*)0); |
1414 | struct vbdev_compress *comp_bdev; |
1415 | int rc; |
1416 | |
1417 | if (strcmp(bdev->product_name, COMP_BDEV_NAME"compress") == 0) { |
1418 | spdk_bdev_module_examine_done(&compress_if); |
1419 | return; |
1420 | } |
1421 | |
1422 | rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), false0, |
1423 | vbdev_compress_base_bdev_event_cb, NULL((void*)0), &bdev_desc); |
1424 | if (rc) { |
1425 | SPDK_ERRLOG("could not open bdev %s, error %s\n", spdk_bdev_get_name(bdev),spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1426, __func__, "could not open bdev %s, error %s\n" , spdk_bdev_get_name(bdev), spdk_strerror(-rc)) |
1426 | spdk_strerror(-rc))spdk_log(SPDK_LOG_ERROR, "vbdev_compress.c", 1426, __func__, "could not open bdev %s, error %s\n" , spdk_bdev_get_name(bdev), spdk_strerror(-rc)); |
1427 | spdk_bdev_module_examine_done(&compress_if); |
1428 | return; |
1429 | } |
1430 | |
1431 | comp_bdev = _prepare_for_load_init(bdev_desc, 0, SPDK_ACCEL_COMP_ALGO_DEFLATE, 1); |
1432 | if (comp_bdev == NULL((void*)0)) { |
1433 | spdk_bdev_close(bdev_desc); |
1434 | spdk_bdev_module_examine_done(&compress_if); |
1435 | return; |
1436 | } |
1437 | |
1438 | /* Save the thread where the base device is opened */ |
1439 | comp_bdev->thread = spdk_get_thread(); |
1440 | |
1441 | comp_bdev->base_ch = spdk_bdev_get_io_channel(comp_bdev->base_desc); |
1442 | spdk_reduce_vol_load(&comp_bdev->backing_dev, vbdev_reduce_load_cb, comp_bdev); |
1443 | } |
1444 | |
1445 | SPDK_LOG_REGISTER_COMPONENT(vbdev_compress)struct spdk_log_flag SPDK_LOG_vbdev_compress = { .name = "vbdev_compress" , .enabled = 0, }; __attribute__((constructor)) static void register_flag_vbdev_compress (void) { spdk_log_register_flag("vbdev_compress", &SPDK_LOG_vbdev_compress ); } |