Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2017 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "spdk/blob_bdev.h"
10 : #include "spdk/blob.h"
11 : #include "spdk/thread.h"
12 : #include "spdk/log.h"
13 : #include "spdk/endian.h"
14 : #define __SPDK_BDEV_MODULE_ONLY
15 : #include "spdk/bdev_module.h"
16 :
17 : struct blob_bdev {
18 : struct spdk_bs_dev bs_dev;
19 : struct spdk_bdev *bdev;
20 : struct spdk_bdev_desc *desc;
21 : bool write;
22 : int32_t refs;
23 : struct spdk_spinlock lock;
24 : };
25 :
26 : struct blob_resubmit {
27 : struct spdk_bdev_io_wait_entry bdev_io_wait;
28 : enum spdk_bdev_io_type io_type;
29 : struct spdk_bs_dev *dev;
30 : struct spdk_io_channel *channel;
31 : void *payload;
32 : int iovcnt;
33 : uint64_t lba;
34 : uint64_t src_lba;
35 : uint32_t lba_count;
36 : struct spdk_bs_dev_cb_args *cb_args;
37 : struct spdk_blob_ext_io_opts *ext_io_opts;
38 : };
39 : static void bdev_blob_resubmit(void *);
40 :
41 : static inline struct spdk_bdev_desc *
42 0 : __get_desc(struct spdk_bs_dev *dev)
43 : {
44 0 : return ((struct blob_bdev *)dev)->desc;
45 : }
46 :
47 : static inline struct spdk_bdev *
48 0 : __get_bdev(struct spdk_bs_dev *dev)
49 : {
50 0 : return ((struct blob_bdev *)dev)->bdev;
51 : }
52 :
53 : static void
54 0 : bdev_blob_io_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
55 : {
56 0 : struct spdk_bs_dev_cb_args *cb_args = arg;
57 : int bserrno;
58 :
59 0 : if (success) {
60 0 : bserrno = 0;
61 : } else {
62 0 : bserrno = -EIO;
63 : }
64 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, bserrno);
65 0 : spdk_bdev_free_io(bdev_io);
66 0 : }
67 :
68 : static void
69 0 : bdev_blob_queue_io(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
70 : int iovcnt, uint64_t lba, uint64_t src_lba, uint32_t lba_count,
71 : enum spdk_bdev_io_type io_type, struct spdk_bs_dev_cb_args *cb_args,
72 : struct spdk_blob_ext_io_opts *ext_io_opts)
73 : {
74 : int rc;
75 0 : struct spdk_bdev *bdev = __get_bdev(dev);
76 : struct blob_resubmit *ctx;
77 :
78 0 : ctx = calloc(1, sizeof(struct blob_resubmit));
79 :
80 0 : if (ctx == NULL) {
81 0 : SPDK_ERRLOG("Not enough memory to queue io\n");
82 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, -ENOMEM);
83 0 : return;
84 : }
85 :
86 0 : ctx->io_type = io_type;
87 0 : ctx->dev = dev;
88 0 : ctx->channel = channel;
89 0 : ctx->payload = payload;
90 0 : ctx->iovcnt = iovcnt;
91 0 : ctx->lba = lba;
92 0 : ctx->src_lba = src_lba;
93 0 : ctx->lba_count = lba_count;
94 0 : ctx->cb_args = cb_args;
95 0 : ctx->bdev_io_wait.bdev = bdev;
96 0 : ctx->bdev_io_wait.cb_fn = bdev_blob_resubmit;
97 0 : ctx->bdev_io_wait.cb_arg = ctx;
98 0 : ctx->ext_io_opts = ext_io_opts;
99 :
100 0 : rc = spdk_bdev_queue_io_wait(bdev, channel, &ctx->bdev_io_wait);
101 0 : if (rc != 0) {
102 0 : SPDK_ERRLOG("Queue io failed, rc=%d\n", rc);
103 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
104 0 : free(ctx);
105 0 : assert(false);
106 : }
107 : }
108 :
109 : static void
110 0 : bdev_blob_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
111 : uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
112 : {
113 : int rc;
114 :
115 0 : rc = spdk_bdev_read_blocks(__get_desc(dev), channel, payload, lba,
116 : lba_count, bdev_blob_io_complete, cb_args);
117 0 : if (rc == -ENOMEM) {
118 0 : bdev_blob_queue_io(dev, channel, payload, 0, lba, 0,
119 : lba_count, SPDK_BDEV_IO_TYPE_READ, cb_args, NULL);
120 0 : } else if (rc != 0) {
121 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
122 : }
123 0 : }
124 :
125 : static void
126 0 : bdev_blob_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
127 : uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
128 : {
129 : int rc;
130 :
131 0 : rc = spdk_bdev_write_blocks(__get_desc(dev), channel, payload, lba,
132 : lba_count, bdev_blob_io_complete, cb_args);
133 0 : if (rc == -ENOMEM) {
134 0 : bdev_blob_queue_io(dev, channel, payload, 0, lba, 0,
135 : lba_count, SPDK_BDEV_IO_TYPE_WRITE, cb_args, NULL);
136 0 : } else if (rc != 0) {
137 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
138 : }
139 0 : }
140 :
141 : static void
142 0 : bdev_blob_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
143 : struct iovec *iov, int iovcnt,
144 : uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
145 : {
146 : int rc;
147 :
148 0 : rc = spdk_bdev_readv_blocks(__get_desc(dev), channel, iov, iovcnt, lba,
149 : lba_count, bdev_blob_io_complete, cb_args);
150 0 : if (rc == -ENOMEM) {
151 0 : bdev_blob_queue_io(dev, channel, iov, iovcnt, lba, 0,
152 : lba_count, SPDK_BDEV_IO_TYPE_READ, cb_args, NULL);
153 0 : } else if (rc != 0) {
154 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
155 : }
156 0 : }
157 :
158 : static void
159 0 : bdev_blob_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
160 : struct iovec *iov, int iovcnt,
161 : uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
162 : {
163 : int rc;
164 :
165 0 : rc = spdk_bdev_writev_blocks(__get_desc(dev), channel, iov, iovcnt, lba,
166 : lba_count, bdev_blob_io_complete, cb_args);
167 0 : if (rc == -ENOMEM) {
168 0 : bdev_blob_queue_io(dev, channel, iov, iovcnt, lba, 0,
169 : lba_count, SPDK_BDEV_IO_TYPE_WRITE, cb_args, NULL);
170 0 : } else if (rc != 0) {
171 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
172 : }
173 0 : }
174 :
175 : static inline void
176 0 : blob_ext_io_opts_to_bdev_opts(struct spdk_bdev_ext_io_opts *dst, struct spdk_blob_ext_io_opts *src)
177 : {
178 0 : memset(dst, 0, sizeof(*dst));
179 0 : dst->size = sizeof(*dst);
180 0 : dst->memory_domain = src->memory_domain;
181 0 : dst->memory_domain_ctx = src->memory_domain_ctx;
182 0 : }
183 :
184 : static void
185 0 : bdev_blob_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
186 : struct iovec *iov, int iovcnt,
187 : uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args,
188 : struct spdk_blob_ext_io_opts *io_opts)
189 : {
190 0 : struct spdk_bdev_ext_io_opts bdev_io_opts;
191 : int rc;
192 :
193 0 : blob_ext_io_opts_to_bdev_opts(&bdev_io_opts, io_opts);
194 0 : rc = spdk_bdev_readv_blocks_ext(__get_desc(dev), channel, iov, iovcnt, lba, lba_count,
195 : bdev_blob_io_complete, cb_args, &bdev_io_opts);
196 0 : if (rc == -ENOMEM) {
197 0 : bdev_blob_queue_io(dev, channel, iov, iovcnt, lba, 0, lba_count, SPDK_BDEV_IO_TYPE_READ, cb_args,
198 : io_opts);
199 0 : } else if (rc != 0) {
200 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
201 : }
202 0 : }
203 :
204 : static void
205 0 : bdev_blob_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
206 : struct iovec *iov, int iovcnt,
207 : uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args,
208 : struct spdk_blob_ext_io_opts *io_opts)
209 : {
210 0 : struct spdk_bdev_ext_io_opts bdev_io_opts;
211 : int rc;
212 :
213 0 : blob_ext_io_opts_to_bdev_opts(&bdev_io_opts, io_opts);
214 0 : rc = spdk_bdev_writev_blocks_ext(__get_desc(dev), channel, iov, iovcnt, lba, lba_count,
215 : bdev_blob_io_complete, cb_args, &bdev_io_opts);
216 0 : if (rc == -ENOMEM) {
217 0 : bdev_blob_queue_io(dev, channel, iov, iovcnt, lba, 0, lba_count, SPDK_BDEV_IO_TYPE_WRITE, cb_args,
218 : io_opts);
219 0 : } else if (rc != 0) {
220 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
221 : }
222 0 : }
223 :
224 : static void
225 0 : bdev_blob_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, uint64_t lba,
226 : uint64_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
227 : {
228 : int rc;
229 :
230 0 : rc = spdk_bdev_write_zeroes_blocks(__get_desc(dev), channel, lba,
231 : lba_count, bdev_blob_io_complete, cb_args);
232 0 : if (rc == -ENOMEM) {
233 0 : bdev_blob_queue_io(dev, channel, NULL, 0, lba, 0,
234 : lba_count, SPDK_BDEV_IO_TYPE_WRITE_ZEROES, cb_args, NULL);
235 0 : } else if (rc != 0) {
236 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
237 : }
238 0 : }
239 :
240 : static void
241 0 : bdev_blob_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, uint64_t lba,
242 : uint64_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
243 : {
244 0 : struct blob_bdev *blob_bdev = (struct blob_bdev *)dev;
245 : int rc;
246 :
247 0 : if (spdk_bdev_io_type_supported(blob_bdev->bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
248 0 : rc = spdk_bdev_unmap_blocks(__get_desc(dev), channel, lba, lba_count,
249 : bdev_blob_io_complete, cb_args);
250 0 : if (rc == -ENOMEM) {
251 0 : bdev_blob_queue_io(dev, channel, NULL, 0, lba, 0,
252 : lba_count, SPDK_BDEV_IO_TYPE_UNMAP, cb_args, NULL);
253 0 : } else if (rc != 0) {
254 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
255 : }
256 : } else {
257 : /*
258 : * If the device doesn't support unmap, immediately complete
259 : * the request. Blobstore does not rely on unmap zeroing
260 : * data.
261 : */
262 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
263 : }
264 0 : }
265 :
266 : static void
267 0 : bdev_blob_copy(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
268 : uint64_t dst_lba, uint64_t src_lba, uint64_t lba_count,
269 : struct spdk_bs_dev_cb_args *cb_args)
270 : {
271 : int rc;
272 :
273 0 : rc = spdk_bdev_copy_blocks(__get_desc(dev), channel,
274 : dst_lba, src_lba, lba_count,
275 : bdev_blob_io_complete, cb_args);
276 0 : if (rc == -ENOMEM) {
277 0 : bdev_blob_queue_io(dev, channel, NULL, 0, dst_lba, src_lba,
278 : lba_count, SPDK_BDEV_IO_TYPE_COPY, cb_args, NULL);
279 0 : } else if (rc != 0) {
280 0 : cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
281 : }
282 0 : }
283 :
284 : static void
285 0 : bdev_blob_resubmit(void *arg)
286 : {
287 0 : struct blob_resubmit *ctx = (struct blob_resubmit *) arg;
288 :
289 0 : switch (ctx->io_type) {
290 0 : case SPDK_BDEV_IO_TYPE_READ:
291 0 : if (ctx->iovcnt > 0) {
292 0 : bdev_blob_readv_ext(ctx->dev, ctx->channel, (struct iovec *) ctx->payload, ctx->iovcnt,
293 : ctx->lba, ctx->lba_count, ctx->cb_args, ctx->ext_io_opts);
294 : } else {
295 0 : bdev_blob_read(ctx->dev, ctx->channel, ctx->payload,
296 : ctx->lba, ctx->lba_count, ctx->cb_args);
297 : }
298 0 : break;
299 0 : case SPDK_BDEV_IO_TYPE_WRITE:
300 0 : if (ctx->iovcnt > 0) {
301 0 : bdev_blob_writev_ext(ctx->dev, ctx->channel, (struct iovec *) ctx->payload, ctx->iovcnt,
302 : ctx->lba, ctx->lba_count, ctx->cb_args, ctx->ext_io_opts);
303 : } else {
304 0 : bdev_blob_write(ctx->dev, ctx->channel, ctx->payload,
305 : ctx->lba, ctx->lba_count, ctx->cb_args);
306 : }
307 0 : break;
308 0 : case SPDK_BDEV_IO_TYPE_UNMAP:
309 0 : bdev_blob_unmap(ctx->dev, ctx->channel,
310 0 : ctx->lba, ctx->lba_count, ctx->cb_args);
311 0 : break;
312 0 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
313 0 : bdev_blob_write_zeroes(ctx->dev, ctx->channel,
314 0 : ctx->lba, ctx->lba_count, ctx->cb_args);
315 0 : break;
316 0 : case SPDK_BDEV_IO_TYPE_COPY:
317 0 : bdev_blob_copy(ctx->dev, ctx->channel,
318 0 : ctx->lba, ctx->src_lba, ctx->lba_count, ctx->cb_args);
319 0 : break;
320 0 : default:
321 0 : SPDK_ERRLOG("Unsupported io type %d\n", ctx->io_type);
322 0 : assert(false);
323 : break;
324 : }
325 0 : free(ctx);
326 0 : }
327 :
328 : int
329 3 : spdk_bs_bdev_claim(struct spdk_bs_dev *bs_dev, struct spdk_bdev_module *module)
330 : {
331 3 : struct blob_bdev *blob_bdev = (struct blob_bdev *)bs_dev;
332 3 : struct spdk_bdev_desc *desc = blob_bdev->desc;
333 : enum spdk_bdev_claim_type claim_type;
334 : int rc;
335 :
336 3 : claim_type = blob_bdev->write ? SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE :
337 : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
338 3 : rc = spdk_bdev_module_claim_bdev_desc(desc, claim_type, NULL, module);
339 3 : if (rc != 0) {
340 1 : SPDK_ERRLOG("could not claim bs dev\n");
341 1 : return rc;
342 : }
343 :
344 2 : return rc;
345 : }
346 :
347 : static struct spdk_io_channel *
348 8 : bdev_blob_create_channel(struct spdk_bs_dev *dev)
349 : {
350 8 : struct blob_bdev *blob_bdev = (struct blob_bdev *)dev;
351 : struct spdk_io_channel *ch;
352 :
353 8 : ch = spdk_bdev_get_io_channel(blob_bdev->desc);
354 8 : if (ch != NULL) {
355 7 : spdk_spin_lock(&blob_bdev->lock);
356 7 : blob_bdev->refs++;
357 7 : spdk_spin_unlock(&blob_bdev->lock);
358 : }
359 :
360 8 : return ch;
361 : }
362 :
363 : static void
364 9 : bdev_blob_free(struct blob_bdev *blob_bdev)
365 : {
366 9 : assert(blob_bdev->refs == 0);
367 :
368 9 : spdk_spin_destroy(&blob_bdev->lock);
369 9 : free(blob_bdev);
370 9 : }
371 :
372 : static void
373 7 : bdev_blob_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
374 : {
375 7 : struct blob_bdev *blob_bdev = (struct blob_bdev *)dev;
376 : int32_t refs;
377 :
378 7 : spdk_spin_lock(&blob_bdev->lock);
379 :
380 7 : assert(blob_bdev->refs > 0);
381 7 : blob_bdev->refs--;
382 7 : refs = blob_bdev->refs;
383 :
384 7 : spdk_spin_unlock(&blob_bdev->lock);
385 :
386 7 : spdk_put_io_channel(channel);
387 :
388 : /*
389 : * If the value of blob_bdev->refs taken while holding blob_bdev->refs is zero, the blob and
390 : * this channel have been destroyed. This means that dev->destroy() has been called and it
391 : * would be an error (akin to use after free) if dev is dereferenced after destroying it.
392 : * Thus, there should be no race with bdev_blob_create_channel().
393 : *
394 : * Because the value of blob_bdev->refs was taken while holding the lock here and the same
395 : * is done in bdev_blob_destroy(), there is no race with bdev_blob_destroy().
396 : */
397 7 : if (refs == 0) {
398 2 : bdev_blob_free(blob_bdev);
399 : }
400 7 : }
401 :
402 : static void
403 9 : bdev_blob_destroy(struct spdk_bs_dev *bs_dev)
404 : {
405 9 : struct blob_bdev *blob_bdev = (struct blob_bdev *)bs_dev;
406 : struct spdk_bdev_desc *desc;
407 : int32_t refs;
408 :
409 9 : spdk_spin_lock(&blob_bdev->lock);
410 :
411 9 : desc = blob_bdev->desc;
412 9 : blob_bdev->desc = NULL;
413 9 : blob_bdev->refs--;
414 9 : refs = blob_bdev->refs;
415 :
416 9 : spdk_spin_unlock(&blob_bdev->lock);
417 :
418 9 : spdk_bdev_close(desc);
419 :
420 : /*
421 : * If the value of blob_bdev->refs taken while holding blob_bdev->refs is zero,
422 : * bs_dev->destroy() has been called and all the channels have been destroyed. It would be
423 : * an error (akin to use after free) if bs_dev is dereferenced after destroying it. Thus,
424 : * there should be no race with bdev_blob_create_channel().
425 : *
426 : * Because the value of blob_bdev->refs was taken while holding the lock here and the same
427 : * is done in bdev_blob_destroy_channel(), there is no race with
428 : * bdev_blob_destroy_channel().
429 : */
430 9 : if (refs == 0) {
431 7 : bdev_blob_free(blob_bdev);
432 : }
433 9 : }
434 :
435 : static struct spdk_bdev *
436 0 : bdev_blob_get_base_bdev(struct spdk_bs_dev *bs_dev)
437 : {
438 0 : return __get_bdev(bs_dev);
439 : }
440 :
441 : static bool
442 0 : bdev_blob_is_zeroes(struct spdk_bs_dev *dev, uint64_t lba, uint64_t lba_count)
443 : {
444 0 : return false;
445 : }
446 :
447 : static bool
448 0 : bdev_blob_is_range_valid(struct spdk_bs_dev *dev, uint64_t lba, uint64_t lba_count)
449 : {
450 0 : struct spdk_bdev *bdev = __get_bdev(dev);
451 :
452 : /* The lba requested should be within the bounds of this bs_dev. */
453 0 : if (lba >= spdk_bdev_get_num_blocks(bdev)) {
454 0 : return false;
455 0 : } else if (lba + lba_count > spdk_bdev_get_num_blocks(bdev)) {
456 : /* bdevs used for esnaps must currently be an exact multiple of the
457 : * blobstore cluster size (see spdk_lvol_create_esnap_clone()), but if that
458 : * ever changes this code here needs to be updated to account for it. */
459 0 : SPDK_ERRLOG("Entire range must be within the bs_dev bounds for CoW.\n"
460 : "lba(lba_count): %lu(%lu), num_blks: %lu\n", lba, lba_count, spdk_bdev_get_num_blocks(bdev));
461 0 : assert(false);
462 : return false;
463 : }
464 :
465 0 : return true;
466 : }
467 :
468 : static bool
469 0 : bdev_blob_translate_lba(struct spdk_bs_dev *dev, uint64_t lba, uint64_t *base_lba)
470 : {
471 0 : *base_lba = lba;
472 0 : return true;
473 : }
474 :
475 : static void
476 9 : blob_bdev_init(struct blob_bdev *b, struct spdk_bdev_desc *desc)
477 : {
478 : struct spdk_bdev *bdev;
479 :
480 9 : bdev = spdk_bdev_desc_get_bdev(desc);
481 9 : assert(bdev != NULL);
482 :
483 9 : b->bdev = bdev;
484 9 : b->desc = desc;
485 9 : b->bs_dev.blockcnt = spdk_bdev_get_num_blocks(bdev);
486 9 : b->bs_dev.blocklen = spdk_bdev_get_block_size(bdev);
487 9 : b->bs_dev.create_channel = bdev_blob_create_channel;
488 9 : b->bs_dev.destroy_channel = bdev_blob_destroy_channel;
489 9 : b->bs_dev.destroy = bdev_blob_destroy;
490 9 : b->bs_dev.read = bdev_blob_read;
491 9 : b->bs_dev.write = bdev_blob_write;
492 9 : b->bs_dev.readv = bdev_blob_readv;
493 9 : b->bs_dev.writev = bdev_blob_writev;
494 9 : b->bs_dev.readv_ext = bdev_blob_readv_ext;
495 9 : b->bs_dev.writev_ext = bdev_blob_writev_ext;
496 9 : b->bs_dev.write_zeroes = bdev_blob_write_zeroes;
497 9 : b->bs_dev.unmap = bdev_blob_unmap;
498 9 : if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY)) {
499 0 : b->bs_dev.copy = bdev_blob_copy;
500 : }
501 9 : b->bs_dev.get_base_bdev = bdev_blob_get_base_bdev;
502 9 : b->bs_dev.is_zeroes = bdev_blob_is_zeroes;
503 9 : b->bs_dev.is_range_valid = bdev_blob_is_range_valid;
504 9 : b->bs_dev.translate_lba = bdev_blob_translate_lba;
505 9 : }
506 :
507 : void
508 0 : spdk_bdev_update_bs_blockcnt(struct spdk_bs_dev *bs_dev)
509 : {
510 0 : struct blob_bdev *blob_bdev = (struct blob_bdev *)bs_dev;
511 :
512 0 : assert(bs_dev->blocklen == spdk_bdev_get_block_size(blob_bdev->bdev));
513 0 : bs_dev->blockcnt = spdk_bdev_get_num_blocks(blob_bdev->bdev);
514 0 : }
515 :
516 : int
517 13 : spdk_bdev_create_bs_dev(const char *bdev_name, bool write,
518 : struct spdk_bdev_bs_dev_opts *opts, size_t opts_size,
519 : spdk_bdev_event_cb_t event_cb, void *event_ctx,
520 : struct spdk_bs_dev **bs_dev)
521 : {
522 : struct blob_bdev *b;
523 13 : struct spdk_bdev_desc *desc;
524 : int rc;
525 :
526 13 : assert(spdk_get_thread() != NULL);
527 :
528 13 : if (opts != NULL && opts_size != sizeof(*opts)) {
529 1 : SPDK_ERRLOG("bdev name '%s': unsupported options\n", bdev_name);
530 1 : return -EINVAL;
531 : }
532 :
533 12 : b = calloc(1, sizeof(*b));
534 :
535 12 : if (b == NULL) {
536 0 : SPDK_ERRLOG("could not allocate blob_bdev\n");
537 0 : return -ENOMEM;
538 : }
539 :
540 12 : rc = spdk_bdev_open_ext(bdev_name, write, event_cb, event_ctx, &desc);
541 12 : if (rc != 0) {
542 3 : free(b);
543 3 : return rc;
544 : }
545 :
546 9 : blob_bdev_init(b, desc);
547 :
548 9 : *bs_dev = &b->bs_dev;
549 9 : b->write = write;
550 9 : b->refs = 1;
551 9 : spdk_spin_init(&b->lock);
552 :
553 9 : return 0;
554 : }
555 :
556 : int
557 4 : spdk_bdev_create_bs_dev_ext(const char *bdev_name, spdk_bdev_event_cb_t event_cb,
558 : void *event_ctx, struct spdk_bs_dev **bs_dev)
559 : {
560 4 : return spdk_bdev_create_bs_dev(bdev_name, true, NULL, 0, event_cb, event_ctx, bs_dev);
561 : }
|