Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2018 Intel Corporation.
3 : * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4 : * All rights reserved.
5 : */
6 :
7 : /*
8 : * Common code for partition-like virtual bdevs.
9 : */
10 :
11 : #include "spdk/bdev.h"
12 : #include "spdk/likely.h"
13 : #include "spdk/log.h"
14 : #include "spdk/string.h"
15 : #include "spdk/thread.h"
16 :
17 : #include "spdk/bdev_module.h"
18 :
19 : /* This namespace UUID was generated using uuid_generate() method. */
20 : #define BDEV_PART_NAMESPACE_UUID "976b899e-3e1e-4d71-ab69-c2b08e9df8b8"
21 :
22 : struct spdk_bdev_part_base {
23 : struct spdk_bdev *bdev;
24 : struct spdk_bdev_desc *desc;
25 : uint32_t ref;
26 : uint32_t channel_size;
27 : spdk_bdev_part_base_free_fn base_free_fn;
28 : void *ctx;
29 : bool claimed;
30 : struct spdk_bdev_module *module;
31 : struct spdk_bdev_fn_table *fn_table;
32 : struct bdev_part_tailq *tailq;
33 : spdk_io_channel_create_cb ch_create_cb;
34 : spdk_io_channel_destroy_cb ch_destroy_cb;
35 : spdk_bdev_remove_cb_t remove_cb;
36 : struct spdk_thread *thread;
37 : };
38 :
39 : struct spdk_bdev *
40 0 : spdk_bdev_part_base_get_bdev(struct spdk_bdev_part_base *part_base)
41 : {
42 0 : return part_base->bdev;
43 : }
44 :
45 : struct spdk_bdev_desc *
46 0 : spdk_bdev_part_base_get_desc(struct spdk_bdev_part_base *part_base)
47 : {
48 0 : return part_base->desc;
49 : }
50 :
51 : struct bdev_part_tailq *
52 0 : spdk_bdev_part_base_get_tailq(struct spdk_bdev_part_base *part_base)
53 : {
54 0 : return part_base->tailq;
55 : }
56 :
57 : void *
58 0 : spdk_bdev_part_base_get_ctx(struct spdk_bdev_part_base *part_base)
59 : {
60 0 : return part_base->ctx;
61 : }
62 :
63 : const char *
64 0 : spdk_bdev_part_base_get_bdev_name(struct spdk_bdev_part_base *part_base)
65 : {
66 0 : return part_base->bdev->name;
67 : }
68 :
69 : static void
70 0 : bdev_part_base_free(void *ctx)
71 : {
72 0 : struct spdk_bdev_desc *desc = ctx;
73 :
74 0 : spdk_bdev_close(desc);
75 0 : }
76 :
77 : void
78 4 : spdk_bdev_part_base_free(struct spdk_bdev_part_base *base)
79 : {
80 4 : if (base->desc) {
81 : /* Close the underlying bdev on its same opened thread. */
82 4 : if (base->thread && base->thread != spdk_get_thread()) {
83 0 : spdk_thread_send_msg(base->thread, bdev_part_base_free, base->desc);
84 : } else {
85 4 : spdk_bdev_close(base->desc);
86 : }
87 : }
88 :
89 4 : if (base->base_free_fn != NULL) {
90 0 : base->base_free_fn(base->ctx);
91 : }
92 :
93 4 : free(base);
94 4 : }
95 :
96 : static void
97 2 : bdev_part_free_cb(void *io_device)
98 : {
99 2 : struct spdk_bdev_part *part = io_device;
100 : struct spdk_bdev_part_base *base;
101 :
102 2 : assert(part);
103 2 : assert(part->internal.base);
104 :
105 2 : base = part->internal.base;
106 :
107 2 : TAILQ_REMOVE(base->tailq, part, tailq);
108 :
109 2 : if (--base->ref == 0) {
110 2 : spdk_bdev_module_release_bdev(base->bdev);
111 2 : spdk_bdev_part_base_free(base);
112 : }
113 :
114 2 : spdk_bdev_destruct_done(&part->internal.bdev, 0);
115 2 : free(part->internal.bdev.name);
116 2 : free(part->internal.bdev.product_name);
117 2 : free(part);
118 2 : }
119 :
120 : int
121 2 : spdk_bdev_part_free(struct spdk_bdev_part *part)
122 : {
123 2 : spdk_io_device_unregister(part, bdev_part_free_cb);
124 :
125 : /* Return 1 to indicate that this is an asynchronous operation that isn't complete
126 : * until spdk_bdev_destruct_done is called */
127 2 : return 1;
128 : }
129 :
130 : void
131 2 : spdk_bdev_part_base_hotremove(struct spdk_bdev_part_base *part_base, struct bdev_part_tailq *tailq)
132 : {
133 : struct spdk_bdev_part *part, *tmp;
134 :
135 5 : TAILQ_FOREACH_SAFE(part, tailq, tailq, tmp) {
136 3 : if (part->internal.base == part_base) {
137 3 : spdk_bdev_unregister(&part->internal.bdev, NULL, NULL);
138 : }
139 : }
140 2 : }
141 :
142 : static bool
143 15 : bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type)
144 : {
145 15 : struct spdk_bdev_part *part = _part;
146 :
147 : /* We can't decode/modify passthrough NVMe commands, so don't report
148 : * that a partition supports these io types, even if the underlying
149 : * bdev does.
150 : */
151 15 : switch (io_type) {
152 0 : case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
153 : case SPDK_BDEV_IO_TYPE_NVME_IO:
154 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
155 0 : return false;
156 15 : default:
157 15 : break;
158 : }
159 :
160 15 : return part->internal.base->bdev->fn_table->io_type_supported(part->internal.base->bdev->ctxt,
161 : io_type);
162 : }
163 :
164 : static struct spdk_io_channel *
165 1 : bdev_part_get_io_channel(void *_part)
166 : {
167 1 : struct spdk_bdev_part *part = _part;
168 :
169 1 : return spdk_get_io_channel(part);
170 : }
171 :
172 : struct spdk_bdev *
173 0 : spdk_bdev_part_get_bdev(struct spdk_bdev_part *part)
174 : {
175 0 : return &part->internal.bdev;
176 : }
177 :
178 : struct spdk_bdev_part_base *
179 0 : spdk_bdev_part_get_base(struct spdk_bdev_part *part)
180 : {
181 0 : return part->internal.base;
182 : }
183 :
184 : struct spdk_bdev *
185 0 : spdk_bdev_part_get_base_bdev(struct spdk_bdev_part *part)
186 : {
187 0 : return part->internal.base->bdev;
188 : }
189 :
190 : uint64_t
191 0 : spdk_bdev_part_get_offset_blocks(struct spdk_bdev_part *part)
192 : {
193 0 : return part->internal.offset_blocks;
194 : }
195 :
196 : static int
197 0 : bdev_part_remap_dif(struct spdk_bdev_io *bdev_io, uint32_t offset,
198 : uint32_t remapped_offset)
199 : {
200 0 : struct spdk_bdev *bdev = bdev_io->bdev;
201 0 : struct spdk_dif_ctx dif_ctx;
202 0 : struct spdk_dif_error err_blk = {};
203 : int rc;
204 0 : struct spdk_dif_ctx_init_ext_opts dif_opts;
205 :
206 0 : if (spdk_likely(!(bdev_io->u.bdev.dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK))) {
207 0 : return 0;
208 : }
209 :
210 0 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
211 0 : dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
212 0 : rc = spdk_dif_ctx_init(&dif_ctx,
213 0 : bdev->blocklen, bdev->md_len, bdev->md_interleave,
214 0 : bdev->dif_is_head_of_md, bdev->dif_type, bdev_io->u.bdev.dif_check_flags,
215 : offset, 0, 0, 0, 0, &dif_opts);
216 0 : if (rc != 0) {
217 0 : SPDK_ERRLOG("Initialization of DIF context failed\n");
218 0 : return rc;
219 : }
220 :
221 0 : spdk_dif_ctx_set_remapped_init_ref_tag(&dif_ctx, remapped_offset);
222 :
223 0 : if (bdev->md_interleave) {
224 0 : rc = spdk_dif_remap_ref_tag(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
225 0 : bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk, true);
226 : } else {
227 0 : struct iovec md_iov = {
228 0 : .iov_base = bdev_io->u.bdev.md_buf,
229 0 : .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len,
230 : };
231 :
232 0 : rc = spdk_dix_remap_ref_tag(&md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk, true);
233 : }
234 :
235 0 : if (rc != 0) {
236 0 : SPDK_ERRLOG("Remapping reference tag failed. type=%d, offset=%" PRIu32 "\n",
237 : err_blk.err_type, err_blk.err_offset);
238 : }
239 :
240 0 : return rc;
241 : }
242 :
243 : static void
244 0 : bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
245 : {
246 0 : struct spdk_bdev_io *part_io = cb_arg;
247 : uint32_t offset, remapped_offset;
248 : int rc;
249 :
250 0 : switch (bdev_io->type) {
251 0 : case SPDK_BDEV_IO_TYPE_READ:
252 0 : if (success) {
253 0 : offset = bdev_io->u.bdev.offset_blocks;
254 0 : remapped_offset = part_io->u.bdev.offset_blocks;
255 :
256 0 : rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset);
257 0 : if (rc != 0) {
258 0 : success = false;
259 : }
260 : }
261 0 : break;
262 0 : case SPDK_BDEV_IO_TYPE_ZCOPY:
263 0 : spdk_bdev_io_set_buf(part_io, bdev_io->u.bdev.iovs[0].iov_base,
264 0 : bdev_io->u.bdev.iovs[0].iov_len);
265 0 : break;
266 0 : default:
267 0 : break;
268 : }
269 :
270 0 : if (part_io->internal.f.split) {
271 0 : part_io->internal.split.stored_user_cb(part_io, success, NULL);
272 : } else {
273 0 : spdk_bdev_io_complete_base_io_status(part_io, bdev_io);
274 : }
275 :
276 0 : spdk_bdev_free_io(bdev_io);
277 0 : }
278 :
279 : static inline void
280 0 : bdev_part_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
281 : {
282 0 : memset(opts, 0, sizeof(*opts));
283 0 : opts->size = sizeof(*opts);
284 0 : opts->memory_domain = bdev_io->u.bdev.memory_domain;
285 0 : opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
286 0 : opts->metadata = bdev_io->u.bdev.md_buf;
287 0 : opts->dif_check_flags_exclude_mask = ~bdev_io->u.bdev.dif_check_flags;
288 0 : }
289 :
290 : int
291 0 : spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io,
292 : spdk_bdev_io_completion_cb cb)
293 : {
294 0 : struct spdk_bdev_part *part = ch->part;
295 0 : struct spdk_io_channel *base_ch = ch->base_ch;
296 0 : struct spdk_bdev_desc *base_desc = part->internal.base->desc;
297 0 : struct spdk_bdev_ext_io_opts io_opts;
298 : uint64_t offset, remapped_offset, remapped_src_offset;
299 0 : int rc = 0;
300 :
301 0 : if (cb != NULL) {
302 0 : bdev_io->internal.f.split = true;
303 0 : bdev_io->internal.split.stored_user_cb = cb;
304 : }
305 :
306 0 : offset = bdev_io->u.bdev.offset_blocks;
307 0 : remapped_offset = offset + part->internal.offset_blocks;
308 :
309 : /* Modify the I/O to adjust for the offset within the base bdev. */
310 0 : switch (bdev_io->type) {
311 0 : case SPDK_BDEV_IO_TYPE_READ:
312 0 : bdev_part_init_ext_io_opts(bdev_io, &io_opts);
313 0 : rc = spdk_bdev_readv_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs,
314 : bdev_io->u.bdev.iovcnt, remapped_offset,
315 : bdev_io->u.bdev.num_blocks,
316 : bdev_part_complete_io, bdev_io, &io_opts);
317 0 : break;
318 0 : case SPDK_BDEV_IO_TYPE_WRITE:
319 0 : rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset);
320 0 : if (rc != 0) {
321 0 : return SPDK_BDEV_IO_STATUS_FAILED;
322 : }
323 0 : bdev_part_init_ext_io_opts(bdev_io, &io_opts);
324 0 : rc = spdk_bdev_writev_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs,
325 : bdev_io->u.bdev.iovcnt, remapped_offset,
326 : bdev_io->u.bdev.num_blocks,
327 : bdev_part_complete_io, bdev_io, &io_opts);
328 0 : break;
329 0 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
330 0 : rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, remapped_offset,
331 : bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
332 : bdev_io);
333 0 : break;
334 0 : case SPDK_BDEV_IO_TYPE_UNMAP:
335 0 : rc = spdk_bdev_unmap_blocks(base_desc, base_ch, remapped_offset,
336 : bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
337 : bdev_io);
338 0 : break;
339 0 : case SPDK_BDEV_IO_TYPE_FLUSH:
340 0 : rc = spdk_bdev_flush_blocks(base_desc, base_ch, remapped_offset,
341 : bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
342 : bdev_io);
343 0 : break;
344 0 : case SPDK_BDEV_IO_TYPE_RESET:
345 0 : rc = spdk_bdev_reset(base_desc, base_ch,
346 : bdev_part_complete_io, bdev_io);
347 0 : break;
348 0 : case SPDK_BDEV_IO_TYPE_ABORT:
349 0 : rc = spdk_bdev_abort(base_desc, base_ch, bdev_io->u.abort.bio_to_abort,
350 : bdev_part_complete_io, bdev_io);
351 0 : break;
352 0 : case SPDK_BDEV_IO_TYPE_ZCOPY:
353 0 : rc = spdk_bdev_zcopy_start(base_desc, base_ch, NULL, 0, remapped_offset,
354 0 : bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate,
355 : bdev_part_complete_io, bdev_io);
356 0 : break;
357 0 : case SPDK_BDEV_IO_TYPE_COMPARE:
358 0 : if (!bdev_io->u.bdev.md_buf) {
359 0 : rc = spdk_bdev_comparev_blocks(base_desc, base_ch,
360 : bdev_io->u.bdev.iovs,
361 : bdev_io->u.bdev.iovcnt,
362 : remapped_offset,
363 : bdev_io->u.bdev.num_blocks,
364 : bdev_part_complete_io, bdev_io);
365 : } else {
366 0 : rc = spdk_bdev_comparev_blocks_with_md(base_desc, base_ch,
367 : bdev_io->u.bdev.iovs,
368 : bdev_io->u.bdev.iovcnt,
369 : bdev_io->u.bdev.md_buf,
370 : remapped_offset,
371 : bdev_io->u.bdev.num_blocks,
372 : bdev_part_complete_io, bdev_io);
373 : }
374 0 : break;
375 0 : case SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE:
376 0 : rc = spdk_bdev_comparev_and_writev_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs,
377 : bdev_io->u.bdev.iovcnt,
378 : bdev_io->u.bdev.fused_iovs,
379 : bdev_io->u.bdev.fused_iovcnt,
380 : remapped_offset,
381 : bdev_io->u.bdev.num_blocks,
382 : bdev_part_complete_io, bdev_io);
383 0 : break;
384 0 : case SPDK_BDEV_IO_TYPE_COPY:
385 0 : remapped_src_offset = bdev_io->u.bdev.copy.src_offset_blocks + part->internal.offset_blocks;
386 0 : rc = spdk_bdev_copy_blocks(base_desc, base_ch, remapped_offset, remapped_src_offset,
387 : bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
388 : bdev_io);
389 0 : break;
390 0 : default:
391 0 : SPDK_ERRLOG("unknown I/O type %d\n", bdev_io->type);
392 0 : return SPDK_BDEV_IO_STATUS_FAILED;
393 : }
394 :
395 0 : return rc;
396 : }
397 :
398 : int
399 0 : spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io)
400 : {
401 0 : return spdk_bdev_part_submit_request_ext(ch, bdev_io, NULL);
402 : }
403 :
404 : static int
405 1 : bdev_part_channel_create_cb(void *io_device, void *ctx_buf)
406 : {
407 1 : struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device;
408 1 : struct spdk_bdev_part_channel *ch = ctx_buf;
409 :
410 1 : ch->part = part;
411 1 : ch->base_ch = spdk_bdev_get_io_channel(part->internal.base->desc);
412 1 : if (ch->base_ch == NULL) {
413 0 : return -1;
414 : }
415 :
416 1 : if (part->internal.base->ch_create_cb) {
417 0 : return part->internal.base->ch_create_cb(io_device, ctx_buf);
418 : } else {
419 1 : return 0;
420 : }
421 : }
422 :
423 : static void
424 1 : bdev_part_channel_destroy_cb(void *io_device, void *ctx_buf)
425 : {
426 1 : struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device;
427 1 : struct spdk_bdev_part_channel *ch = ctx_buf;
428 :
429 1 : if (part->internal.base->ch_destroy_cb) {
430 0 : part->internal.base->ch_destroy_cb(io_device, ctx_buf);
431 : }
432 1 : spdk_put_io_channel(ch->base_ch);
433 1 : }
434 :
435 : static void
436 0 : bdev_part_base_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
437 : void *event_ctx)
438 : {
439 0 : struct spdk_bdev_part_base *base = event_ctx;
440 :
441 0 : switch (type) {
442 0 : case SPDK_BDEV_EVENT_REMOVE:
443 0 : base->remove_cb(base);
444 0 : break;
445 0 : default:
446 0 : SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
447 0 : break;
448 : }
449 0 : }
450 :
451 : int
452 4 : spdk_bdev_part_base_construct_ext(const char *bdev_name,
453 : spdk_bdev_remove_cb_t remove_cb, struct spdk_bdev_module *module,
454 : struct spdk_bdev_fn_table *fn_table, struct bdev_part_tailq *tailq,
455 : spdk_bdev_part_base_free_fn free_fn, void *ctx,
456 : uint32_t channel_size, spdk_io_channel_create_cb ch_create_cb,
457 : spdk_io_channel_destroy_cb ch_destroy_cb,
458 : struct spdk_bdev_part_base **_base)
459 : {
460 : int rc;
461 : struct spdk_bdev_part_base *base;
462 :
463 4 : if (_base == NULL) {
464 0 : return -EINVAL;
465 : }
466 :
467 4 : base = calloc(1, sizeof(*base));
468 4 : if (!base) {
469 0 : SPDK_ERRLOG("Memory allocation failure\n");
470 0 : return -ENOMEM;
471 : }
472 4 : fn_table->get_io_channel = bdev_part_get_io_channel;
473 4 : fn_table->io_type_supported = bdev_part_io_type_supported;
474 :
475 4 : base->desc = NULL;
476 4 : base->ref = 0;
477 4 : base->module = module;
478 4 : base->fn_table = fn_table;
479 4 : base->tailq = tailq;
480 4 : base->base_free_fn = free_fn;
481 4 : base->ctx = ctx;
482 4 : base->claimed = false;
483 4 : base->channel_size = channel_size;
484 4 : base->ch_create_cb = ch_create_cb;
485 4 : base->ch_destroy_cb = ch_destroy_cb;
486 4 : base->remove_cb = remove_cb;
487 :
488 4 : rc = spdk_bdev_open_ext(bdev_name, false, bdev_part_base_event_cb, base, &base->desc);
489 4 : if (rc) {
490 0 : if (rc == -ENODEV) {
491 0 : free(base);
492 : } else {
493 0 : SPDK_ERRLOG("could not open bdev %s: %s\n", bdev_name, spdk_strerror(-rc));
494 0 : spdk_bdev_part_base_free(base);
495 : }
496 0 : return rc;
497 : }
498 :
499 4 : base->bdev = spdk_bdev_desc_get_bdev(base->desc);
500 :
501 : /* Save the thread where the base device is opened */
502 4 : base->thread = spdk_get_thread();
503 :
504 4 : *_base = base;
505 :
506 4 : return 0;
507 : }
508 :
509 : void
510 6 : spdk_bdev_part_construct_opts_init(struct spdk_bdev_part_construct_opts *opts, uint64_t size)
511 : {
512 6 : if (opts == NULL) {
513 0 : SPDK_ERRLOG("opts should not be NULL\n");
514 0 : assert(opts != NULL);
515 0 : return;
516 : }
517 6 : if (size == 0) {
518 0 : SPDK_ERRLOG("size should not be zero\n");
519 0 : assert(size != 0);
520 0 : return;
521 : }
522 :
523 6 : memset(opts, 0, size);
524 6 : opts->opts_size = size;
525 : }
526 :
527 : static void
528 1 : part_construct_opts_copy(const struct spdk_bdev_part_construct_opts *src,
529 : struct spdk_bdev_part_construct_opts *dst)
530 : {
531 1 : if (src->opts_size == 0) {
532 0 : SPDK_ERRLOG("size should not be zero\n");
533 0 : assert(false);
534 : }
535 :
536 1 : memset(dst, 0, sizeof(*dst));
537 1 : dst->opts_size = src->opts_size;
538 :
539 : #define FIELD_OK(field) \
540 : offsetof(struct spdk_bdev_part_construct_opts, field) + sizeof(src->field) <= src->opts_size
541 :
542 : #define SET_FIELD(field) \
543 : if (FIELD_OK(field)) { \
544 : dst->field = src->field; \
545 : } \
546 :
547 1 : SET_FIELD(uuid);
548 :
549 : /* You should not remove this statement, but need to update the assert statement
550 : * if you add a new field, and also add a corresponding SET_FIELD statement */
551 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_part_construct_opts) == 24, "Incorrect size");
552 :
553 : #undef FIELD_OK
554 : #undef SET_FIELD
555 1 : }
556 :
557 : int
558 6 : spdk_bdev_part_construct_ext(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base,
559 : char *name, uint64_t offset_blocks, uint64_t num_blocks,
560 : char *product_name, const struct spdk_bdev_part_construct_opts *_opts)
561 : {
562 : int rc;
563 6 : bool first_claimed = false;
564 6 : struct spdk_bdev_part_construct_opts opts;
565 6 : struct spdk_uuid ns_uuid;
566 :
567 6 : if (_opts == NULL) {
568 5 : spdk_bdev_part_construct_opts_init(&opts, sizeof(opts));
569 : } else {
570 1 : part_construct_opts_copy(_opts, &opts);
571 : }
572 :
573 6 : part->internal.bdev.blocklen = base->bdev->blocklen;
574 6 : part->internal.bdev.blockcnt = num_blocks;
575 6 : part->internal.offset_blocks = offset_blocks;
576 :
577 6 : part->internal.bdev.write_cache = base->bdev->write_cache;
578 6 : part->internal.bdev.required_alignment = base->bdev->required_alignment;
579 6 : part->internal.bdev.ctxt = part;
580 6 : part->internal.bdev.module = base->module;
581 6 : part->internal.bdev.fn_table = base->fn_table;
582 :
583 6 : part->internal.bdev.md_interleave = base->bdev->md_interleave;
584 6 : part->internal.bdev.md_len = base->bdev->md_len;
585 6 : part->internal.bdev.dif_type = base->bdev->dif_type;
586 6 : part->internal.bdev.dif_is_head_of_md = base->bdev->dif_is_head_of_md;
587 6 : part->internal.bdev.dif_check_flags = base->bdev->dif_check_flags;
588 :
589 6 : part->internal.bdev.name = strdup(name);
590 6 : if (part->internal.bdev.name == NULL) {
591 0 : SPDK_ERRLOG("Failed to allocate name for new part of bdev %s\n", spdk_bdev_get_name(base->bdev));
592 0 : return -1;
593 : }
594 :
595 6 : part->internal.bdev.product_name = strdup(product_name);
596 6 : if (part->internal.bdev.product_name == NULL) {
597 0 : free(part->internal.bdev.name);
598 0 : SPDK_ERRLOG("Failed to allocate product name for new part of bdev %s\n",
599 : spdk_bdev_get_name(base->bdev));
600 0 : return -1;
601 : }
602 :
603 : /* The caller may have already specified a UUID. If not, we'll generate one
604 : * based on the namespace UUID, the base bdev's UUID and the block range of the
605 : * partition.
606 : */
607 6 : if (!spdk_uuid_is_null(&opts.uuid)) {
608 1 : spdk_uuid_copy(&part->internal.bdev.uuid, &opts.uuid);
609 : } else {
610 : struct {
611 : struct spdk_uuid uuid;
612 : uint64_t offset_blocks;
613 : uint64_t num_blocks;
614 5 : } base_name;
615 :
616 : /* We need to create a unique base name for this partition. We can't just use
617 : * the base bdev's UUID, since it may be used for multiple partitions. So
618 : * construct a binary name consisting of the uuid + the block range for this
619 : * partition.
620 : */
621 5 : spdk_uuid_copy(&base_name.uuid, &base->bdev->uuid);
622 5 : base_name.offset_blocks = offset_blocks;
623 5 : base_name.num_blocks = num_blocks;
624 :
625 5 : spdk_uuid_parse(&ns_uuid, BDEV_PART_NAMESPACE_UUID);
626 5 : rc = spdk_uuid_generate_sha1(&part->internal.bdev.uuid, &ns_uuid,
627 : (const char *)&base_name, sizeof(base_name));
628 5 : if (rc) {
629 0 : SPDK_ERRLOG("Could not generate new UUID\n");
630 0 : free(part->internal.bdev.name);
631 0 : free(part->internal.bdev.product_name);
632 0 : return -1;
633 : }
634 : }
635 :
636 6 : base->ref++;
637 6 : part->internal.base = base;
638 :
639 6 : if (!base->claimed) {
640 : int rc;
641 :
642 4 : rc = spdk_bdev_module_claim_bdev(base->bdev, base->desc, base->module);
643 4 : if (rc) {
644 0 : SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(base->bdev));
645 0 : free(part->internal.bdev.name);
646 0 : free(part->internal.bdev.product_name);
647 0 : base->ref--;
648 0 : return -1;
649 : }
650 4 : base->claimed = true;
651 4 : first_claimed = true;
652 : }
653 :
654 6 : spdk_io_device_register(part, bdev_part_channel_create_cb,
655 : bdev_part_channel_destroy_cb,
656 : base->channel_size,
657 : name);
658 :
659 6 : rc = spdk_bdev_register(&part->internal.bdev);
660 6 : if (rc == 0) {
661 5 : TAILQ_INSERT_TAIL(base->tailq, part, tailq);
662 : } else {
663 1 : spdk_io_device_unregister(part, NULL);
664 1 : if (--base->ref == 0) {
665 0 : spdk_bdev_module_release_bdev(base->bdev);
666 : }
667 1 : free(part->internal.bdev.name);
668 1 : free(part->internal.bdev.product_name);
669 1 : if (first_claimed == true) {
670 0 : base->claimed = false;
671 : }
672 : }
673 :
674 6 : return rc;
675 : }
676 :
677 : int
678 5 : spdk_bdev_part_construct(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base,
679 : char *name, uint64_t offset_blocks, uint64_t num_blocks,
680 : char *product_name)
681 : {
682 5 : return spdk_bdev_part_construct_ext(part, base, name, offset_blocks, num_blocks,
683 : product_name, NULL);
684 : }
|