Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2017 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "bdev_malloc.h"
10 : #include "spdk/endian.h"
11 : #include "spdk/env.h"
12 : #include "spdk/accel.h"
13 : #include "spdk/dma.h"
14 : #include "spdk/likely.h"
15 : #include "spdk/string.h"
16 :
17 : #include "spdk/log.h"
18 :
19 : struct malloc_disk {
20 : struct spdk_bdev disk;
21 : void *malloc_buf;
22 : void *malloc_md_buf;
23 : TAILQ_ENTRY(malloc_disk) link;
24 : };
25 :
26 : struct malloc_task {
27 : struct iovec iov;
28 : int num_outstanding;
29 : enum spdk_bdev_io_status status;
30 : TAILQ_ENTRY(malloc_task) tailq;
31 : };
32 :
33 : struct malloc_channel {
34 : struct spdk_io_channel *accel_channel;
35 : struct spdk_poller *completion_poller;
36 : TAILQ_HEAD(, malloc_task) completed_tasks;
37 : };
38 :
39 : static int
40 0 : _malloc_verify_pi(struct spdk_bdev_io *bdev_io, struct iovec *iovs, int iovcnt,
41 : void *md_buf)
42 : {
43 0 : struct spdk_bdev *bdev = bdev_io->bdev;
44 : struct spdk_dif_ctx dif_ctx;
45 : struct spdk_dif_error err_blk;
46 : int rc;
47 : struct spdk_dif_ctx_init_ext_opts dif_opts;
48 :
49 0 : assert(bdev_io->u.bdev.memory_domain == NULL);
50 0 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
51 0 : dif_opts.dif_pi_format = bdev->dif_pi_format;
52 0 : rc = spdk_dif_ctx_init(&dif_ctx,
53 0 : bdev->blocklen,
54 0 : bdev->md_len,
55 0 : bdev->md_interleave,
56 0 : bdev->dif_is_head_of_md,
57 0 : bdev->dif_type,
58 0 : bdev_io->u.bdev.dif_check_flags,
59 0 : bdev_io->u.bdev.offset_blocks & 0xFFFFFFFF,
60 : 0xFFFF, 0, 0, 0, &dif_opts);
61 0 : if (rc != 0) {
62 0 : SPDK_ERRLOG("Failed to initialize DIF/DIX context\n");
63 0 : return rc;
64 : }
65 :
66 0 : if (spdk_bdev_is_md_interleaved(bdev)) {
67 0 : rc = spdk_dif_verify(iovs,
68 0 : iovcnt,
69 0 : bdev_io->u.bdev.num_blocks,
70 : &dif_ctx,
71 : &err_blk);
72 0 : } else {
73 0 : struct iovec md_iov = {
74 0 : .iov_base = md_buf,
75 0 : .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len,
76 : };
77 :
78 0 : if (bdev_io->u.bdev.md_buf == NULL) {
79 0 : return 0;
80 : }
81 :
82 0 : rc = spdk_dix_verify(iovs,
83 0 : iovcnt,
84 : &md_iov,
85 0 : bdev_io->u.bdev.num_blocks,
86 : &dif_ctx,
87 : &err_blk);
88 : }
89 :
90 0 : if (rc != 0) {
91 0 : SPDK_ERRLOG("DIF/DIX verify failed: lba %" PRIu64 ", num_blocks %" PRIu64 ", "
92 : "err_type %u, expected %lu, actual %lu, err_offset %u\n",
93 : bdev_io->u.bdev.offset_blocks,
94 : bdev_io->u.bdev.num_blocks,
95 : err_blk.err_type,
96 : err_blk.expected,
97 : err_blk.actual,
98 : err_blk.err_offset);
99 0 : }
100 :
101 0 : return rc;
102 0 : }
103 :
104 : static int
105 0 : malloc_verify_pi(struct spdk_bdev_io *bdev_io)
106 : {
107 0 : return _malloc_verify_pi(bdev_io,
108 0 : bdev_io->u.bdev.iovs,
109 0 : bdev_io->u.bdev.iovcnt,
110 0 : bdev_io->u.bdev.md_buf);
111 : }
112 :
113 : static int
114 0 : malloc_unmap_write_zeroes_generate_pi(struct spdk_bdev_io *bdev_io)
115 : {
116 0 : struct spdk_bdev *bdev = bdev_io->bdev;
117 0 : struct malloc_disk *mdisk = bdev_io->bdev->ctxt;
118 0 : uint32_t block_size = bdev_io->bdev->blocklen;
119 : uint32_t dif_check_flags;
120 : struct spdk_dif_ctx dif_ctx;
121 : struct spdk_dif_ctx_init_ext_opts dif_opts;
122 : int rc;
123 :
124 0 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
125 0 : dif_opts.dif_pi_format = bdev->dif_pi_format;
126 0 : dif_check_flags = bdev->dif_check_flags | SPDK_DIF_CHECK_TYPE_REFTAG |
127 : SPDK_DIF_FLAGS_APPTAG_CHECK;
128 0 : rc = spdk_dif_ctx_init(&dif_ctx,
129 0 : bdev->blocklen,
130 0 : bdev->md_len,
131 0 : bdev->md_interleave,
132 0 : bdev->dif_is_head_of_md,
133 0 : bdev->dif_type,
134 0 : dif_check_flags,
135 : SPDK_DIF_REFTAG_IGNORE,
136 : 0xFFFF, SPDK_DIF_APPTAG_IGNORE,
137 : 0, 0, &dif_opts);
138 0 : if (rc != 0) {
139 0 : SPDK_ERRLOG("Initialization of DIF/DIX context failed\n");
140 0 : return rc;
141 : }
142 :
143 0 : if (bdev->md_interleave) {
144 0 : struct iovec iov = {
145 0 : .iov_base = mdisk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size,
146 0 : .iov_len = bdev_io->u.bdev.num_blocks * block_size,
147 : };
148 :
149 0 : rc = spdk_dif_generate(&iov, 1, bdev_io->u.bdev.num_blocks, &dif_ctx);
150 0 : } else {
151 0 : struct iovec iov = {
152 0 : .iov_base = mdisk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size,
153 0 : .iov_len = bdev_io->u.bdev.num_blocks * block_size,
154 : };
155 :
156 0 : struct iovec md_iov = {
157 0 : .iov_base = mdisk->malloc_md_buf + bdev_io->u.bdev.offset_blocks * bdev->md_len,
158 0 : .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len,
159 : };
160 :
161 0 : rc = spdk_dix_generate(&iov, 1, &md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx);
162 : }
163 :
164 0 : if (rc != 0) {
165 0 : SPDK_ERRLOG("Formatting by DIF/DIX failed\n");
166 0 : }
167 :
168 :
169 0 : return rc;
170 0 : }
171 :
172 : static void
173 0 : malloc_done(void *ref, int status)
174 : {
175 0 : struct malloc_task *task = (struct malloc_task *)ref;
176 0 : struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
177 : int rc;
178 :
179 0 : if (status != 0) {
180 0 : if (status == -ENOMEM) {
181 0 : if (task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
182 0 : task->status = SPDK_BDEV_IO_STATUS_NOMEM;
183 0 : }
184 0 : } else {
185 0 : task->status = SPDK_BDEV_IO_STATUS_FAILED;
186 : }
187 0 : }
188 :
189 0 : if (--task->num_outstanding != 0) {
190 0 : return;
191 : }
192 :
193 0 : if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
194 0 : task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
195 0 : switch (bdev_io->type) {
196 : case SPDK_BDEV_IO_TYPE_READ:
197 0 : rc = malloc_verify_pi(bdev_io);
198 0 : break;
199 : case SPDK_BDEV_IO_TYPE_UNMAP:
200 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
201 0 : rc = malloc_unmap_write_zeroes_generate_pi(bdev_io);
202 0 : break;
203 : default:
204 0 : rc = 0;
205 0 : break;
206 : }
207 :
208 0 : if (rc != 0) {
209 0 : task->status = SPDK_BDEV_IO_STATUS_FAILED;
210 0 : }
211 0 : }
212 :
213 0 : assert(!bdev_io->u.bdev.accel_sequence || task->status == SPDK_BDEV_IO_STATUS_NOMEM);
214 0 : spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
215 0 : }
216 :
217 : static void
218 0 : malloc_complete_task(struct malloc_task *task, struct malloc_channel *mch,
219 : enum spdk_bdev_io_status status)
220 : {
221 0 : task->status = status;
222 0 : TAILQ_INSERT_TAIL(&mch->completed_tasks, task, tailq);
223 0 : }
224 :
225 : static TAILQ_HEAD(, malloc_disk) g_malloc_disks = TAILQ_HEAD_INITIALIZER(g_malloc_disks);
226 :
227 : int malloc_disk_count = 0;
228 :
229 : static int bdev_malloc_initialize(void);
230 : static void bdev_malloc_deinitialize(void);
231 :
232 : static int
233 0 : bdev_malloc_get_ctx_size(void)
234 : {
235 0 : return sizeof(struct malloc_task);
236 : }
237 :
238 : static struct spdk_bdev_module malloc_if = {
239 : .name = "malloc",
240 : .module_init = bdev_malloc_initialize,
241 : .module_fini = bdev_malloc_deinitialize,
242 : .get_ctx_size = bdev_malloc_get_ctx_size,
243 :
244 : };
245 :
246 0 : SPDK_BDEV_MODULE_REGISTER(malloc, &malloc_if)
247 :
248 : static void
249 0 : malloc_disk_free(struct malloc_disk *malloc_disk)
250 : {
251 0 : if (!malloc_disk) {
252 0 : return;
253 : }
254 :
255 0 : free(malloc_disk->disk.name);
256 0 : spdk_free(malloc_disk->malloc_buf);
257 0 : spdk_free(malloc_disk->malloc_md_buf);
258 0 : free(malloc_disk);
259 0 : }
260 :
261 : static int
262 0 : bdev_malloc_destruct(void *ctx)
263 : {
264 0 : struct malloc_disk *malloc_disk = ctx;
265 :
266 0 : TAILQ_REMOVE(&g_malloc_disks, malloc_disk, link);
267 0 : malloc_disk_free(malloc_disk);
268 0 : return 0;
269 : }
270 :
271 : static int
272 0 : bdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes)
273 : {
274 : int i;
275 :
276 0 : for (i = 0; i < iovcnt; i++) {
277 0 : if (nbytes < iovs[i].iov_len) {
278 0 : return 0;
279 : }
280 :
281 0 : nbytes -= iovs[i].iov_len;
282 0 : }
283 :
284 0 : return nbytes != 0;
285 0 : }
286 :
287 : static size_t
288 0 : malloc_get_md_len(struct spdk_bdev_io *bdev_io)
289 : {
290 0 : return bdev_io->u.bdev.num_blocks * bdev_io->bdev->md_len;
291 : }
292 :
293 : static uint64_t
294 0 : malloc_get_md_offset(struct spdk_bdev_io *bdev_io)
295 : {
296 0 : return bdev_io->u.bdev.offset_blocks * bdev_io->bdev->md_len;
297 : }
298 :
299 : static void *
300 0 : malloc_get_md_buf(struct spdk_bdev_io *bdev_io)
301 : {
302 0 : struct malloc_disk *mdisk = SPDK_CONTAINEROF(bdev_io->bdev, struct malloc_disk, disk);
303 :
304 0 : assert(spdk_bdev_is_md_separate(bdev_io->bdev));
305 :
306 0 : return (char *)mdisk->malloc_md_buf + malloc_get_md_offset(bdev_io);
307 : }
308 :
309 : static void
310 0 : malloc_sequence_fail(struct malloc_task *task, int status)
311 : {
312 0 : struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
313 :
314 : /* For ENOMEM, the IO will be retried by the bdev layer, so we don't abort the sequence */
315 0 : if (status != -ENOMEM) {
316 0 : spdk_accel_sequence_abort(bdev_io->u.bdev.accel_sequence);
317 0 : bdev_io->u.bdev.accel_sequence = NULL;
318 0 : }
319 :
320 0 : malloc_done(task, status);
321 0 : }
322 :
323 : static void
324 0 : malloc_sequence_done(void *ctx, int status)
325 : {
326 0 : struct malloc_task *task = ctx;
327 0 : struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
328 :
329 0 : bdev_io->u.bdev.accel_sequence = NULL;
330 : /* Prevent bdev layer from retrying the request if the sequence failed with ENOMEM */
331 0 : malloc_done(task, status != -ENOMEM ? status : -EFAULT);
332 0 : }
333 :
334 : static void
335 0 : bdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
336 : struct malloc_task *task, struct spdk_bdev_io *bdev_io)
337 : {
338 : uint64_t len, offset;
339 0 : int res = 0;
340 :
341 0 : len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
342 0 : offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
343 :
344 0 : if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
345 0 : spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
346 : SPDK_BDEV_IO_STATUS_FAILED);
347 0 : return;
348 : }
349 :
350 0 : task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
351 0 : task->num_outstanding = 0;
352 0 : task->iov.iov_base = mdisk->malloc_buf + offset;
353 0 : task->iov.iov_len = len;
354 :
355 0 : SPDK_DEBUGLOG(bdev_malloc, "read %zu bytes from offset %#" PRIx64 ", iovcnt=%d\n",
356 : len, offset, bdev_io->u.bdev.iovcnt);
357 :
358 0 : task->num_outstanding++;
359 0 : res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch,
360 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
361 0 : bdev_io->u.bdev.memory_domain,
362 0 : bdev_io->u.bdev.memory_domain_ctx,
363 0 : &task->iov, 1, NULL, NULL, NULL, NULL);
364 0 : if (spdk_unlikely(res != 0)) {
365 0 : malloc_sequence_fail(task, res);
366 0 : return;
367 : }
368 :
369 0 : spdk_accel_sequence_reverse(bdev_io->u.bdev.accel_sequence);
370 0 : spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
371 :
372 0 : if (bdev_io->u.bdev.md_buf == NULL) {
373 0 : return;
374 : }
375 :
376 0 : SPDK_DEBUGLOG(bdev_malloc, "read metadata %zu bytes from offset%#" PRIx64 "\n",
377 : malloc_get_md_len(bdev_io), malloc_get_md_offset(bdev_io));
378 :
379 0 : task->num_outstanding++;
380 0 : res = spdk_accel_submit_copy(ch, bdev_io->u.bdev.md_buf, malloc_get_md_buf(bdev_io),
381 0 : malloc_get_md_len(bdev_io), malloc_done, task);
382 0 : if (res != 0) {
383 0 : malloc_done(task, res);
384 0 : }
385 0 : }
386 :
387 : static void
388 0 : bdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
389 : struct malloc_task *task, struct spdk_bdev_io *bdev_io)
390 : {
391 : uint64_t len, offset;
392 0 : int res = 0;
393 :
394 0 : len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
395 0 : offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
396 :
397 0 : if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
398 0 : spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
399 : SPDK_BDEV_IO_STATUS_FAILED);
400 0 : return;
401 : }
402 :
403 0 : task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
404 0 : task->num_outstanding = 0;
405 0 : task->iov.iov_base = mdisk->malloc_buf + offset;
406 0 : task->iov.iov_len = len;
407 :
408 0 : SPDK_DEBUGLOG(bdev_malloc, "write %zu bytes to offset %#" PRIx64 ", iovcnt=%d\n",
409 : len, offset, bdev_io->u.bdev.iovcnt);
410 :
411 0 : task->num_outstanding++;
412 0 : res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch, &task->iov, 1, NULL, NULL,
413 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
414 0 : bdev_io->u.bdev.memory_domain,
415 0 : bdev_io->u.bdev.memory_domain_ctx, NULL, NULL);
416 0 : if (spdk_unlikely(res != 0)) {
417 0 : malloc_sequence_fail(task, res);
418 0 : return;
419 : }
420 :
421 0 : spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
422 :
423 0 : if (bdev_io->u.bdev.md_buf == NULL) {
424 0 : return;
425 : }
426 :
427 0 : SPDK_DEBUGLOG(bdev_malloc, "write metadata %zu bytes to offset %#" PRIx64 "\n",
428 : malloc_get_md_len(bdev_io), malloc_get_md_offset(bdev_io));
429 :
430 0 : task->num_outstanding++;
431 0 : res = spdk_accel_submit_copy(ch, malloc_get_md_buf(bdev_io), bdev_io->u.bdev.md_buf,
432 0 : malloc_get_md_len(bdev_io), malloc_done, task);
433 0 : if (res != 0) {
434 0 : malloc_done(task, res);
435 0 : }
436 0 : }
437 :
438 : static int
439 0 : bdev_malloc_unmap(struct malloc_disk *mdisk,
440 : struct spdk_io_channel *ch,
441 : struct malloc_task *task,
442 : uint64_t offset,
443 : uint64_t byte_count)
444 : {
445 0 : task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
446 0 : task->num_outstanding = 1;
447 :
448 0 : return spdk_accel_submit_fill(ch, mdisk->malloc_buf + offset, 0,
449 0 : byte_count, malloc_done, task);
450 : }
451 :
452 : static void
453 0 : bdev_malloc_copy(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
454 : struct malloc_task *task,
455 : uint64_t dst_offset, uint64_t src_offset, size_t len)
456 : {
457 0 : int64_t res = 0;
458 0 : void *dst = mdisk->malloc_buf + dst_offset;
459 0 : void *src = mdisk->malloc_buf + src_offset;
460 :
461 0 : SPDK_DEBUGLOG(bdev_malloc, "Copy %zu bytes from offset %#" PRIx64 " to offset %#" PRIx64 "\n",
462 : len, src_offset, dst_offset);
463 :
464 0 : task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
465 0 : task->num_outstanding = 1;
466 :
467 0 : res = spdk_accel_submit_copy(ch, dst, src, len, malloc_done, task);
468 0 : if (res != 0) {
469 0 : malloc_done(task, res);
470 0 : }
471 0 : }
472 :
473 : static int
474 0 : _bdev_malloc_submit_request(struct malloc_channel *mch, struct spdk_bdev_io *bdev_io)
475 : {
476 0 : struct malloc_task *task = (struct malloc_task *)bdev_io->driver_ctx;
477 0 : struct malloc_disk *disk = bdev_io->bdev->ctxt;
478 0 : uint32_t block_size = bdev_io->bdev->blocklen;
479 : int rc;
480 :
481 0 : switch (bdev_io->type) {
482 : case SPDK_BDEV_IO_TYPE_READ:
483 0 : if (bdev_io->u.bdev.iovs[0].iov_base == NULL) {
484 0 : assert(bdev_io->u.bdev.iovcnt == 1);
485 0 : assert(bdev_io->u.bdev.memory_domain == NULL);
486 0 : bdev_io->u.bdev.iovs[0].iov_base =
487 0 : disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
488 0 : bdev_io->u.bdev.iovs[0].iov_len = bdev_io->u.bdev.num_blocks * block_size;
489 0 : if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
490 0 : spdk_bdev_io_set_md_buf(bdev_io, malloc_get_md_buf(bdev_io),
491 0 : malloc_get_md_len(bdev_io));
492 0 : }
493 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
494 0 : return 0;
495 : }
496 :
497 0 : bdev_malloc_readv(disk, mch->accel_channel, task, bdev_io);
498 0 : return 0;
499 :
500 : case SPDK_BDEV_IO_TYPE_WRITE:
501 0 : if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE) {
502 0 : rc = malloc_verify_pi(bdev_io);
503 0 : if (rc != 0) {
504 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
505 0 : return 0;
506 : }
507 0 : }
508 :
509 0 : bdev_malloc_writev(disk, mch->accel_channel, task, bdev_io);
510 0 : return 0;
511 :
512 : case SPDK_BDEV_IO_TYPE_RESET:
513 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
514 0 : return 0;
515 :
516 : case SPDK_BDEV_IO_TYPE_FLUSH:
517 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
518 0 : return 0;
519 :
520 : case SPDK_BDEV_IO_TYPE_UNMAP:
521 0 : return bdev_malloc_unmap(disk, mch->accel_channel, task,
522 0 : bdev_io->u.bdev.offset_blocks * block_size,
523 0 : bdev_io->u.bdev.num_blocks * block_size);
524 :
525 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
526 : /* bdev_malloc_unmap is implemented with a call to mem_cpy_fill which zeroes out all of the requested bytes. */
527 0 : return bdev_malloc_unmap(disk, mch->accel_channel, task,
528 0 : bdev_io->u.bdev.offset_blocks * block_size,
529 0 : bdev_io->u.bdev.num_blocks * block_size);
530 :
531 : case SPDK_BDEV_IO_TYPE_ZCOPY:
532 0 : if (bdev_io->u.bdev.zcopy.start) {
533 : void *buf;
534 : size_t len;
535 :
536 0 : buf = disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
537 0 : len = bdev_io->u.bdev.num_blocks * block_size;
538 0 : spdk_bdev_io_set_buf(bdev_io, buf, len);
539 0 : if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
540 0 : spdk_bdev_io_set_md_buf(bdev_io, malloc_get_md_buf(bdev_io),
541 0 : malloc_get_md_len(bdev_io));
542 0 : }
543 0 : }
544 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
545 0 : return 0;
546 : case SPDK_BDEV_IO_TYPE_ABORT:
547 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
548 0 : return 0;
549 : case SPDK_BDEV_IO_TYPE_COPY:
550 0 : bdev_malloc_copy(disk, mch->accel_channel, task,
551 0 : bdev_io->u.bdev.offset_blocks * block_size,
552 0 : bdev_io->u.bdev.copy.src_offset_blocks * block_size,
553 0 : bdev_io->u.bdev.num_blocks * block_size);
554 0 : return 0;
555 :
556 : default:
557 0 : return -1;
558 : }
559 : return 0;
560 0 : }
561 :
562 : static void
563 0 : bdev_malloc_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
564 : {
565 0 : struct malloc_channel *mch = spdk_io_channel_get_ctx(ch);
566 :
567 0 : if (_bdev_malloc_submit_request(mch, bdev_io) != 0) {
568 0 : malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
569 : SPDK_BDEV_IO_STATUS_FAILED);
570 0 : }
571 0 : }
572 :
573 : static bool
574 0 : bdev_malloc_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
575 : {
576 0 : switch (io_type) {
577 : case SPDK_BDEV_IO_TYPE_READ:
578 : case SPDK_BDEV_IO_TYPE_WRITE:
579 : case SPDK_BDEV_IO_TYPE_FLUSH:
580 : case SPDK_BDEV_IO_TYPE_RESET:
581 : case SPDK_BDEV_IO_TYPE_UNMAP:
582 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
583 : case SPDK_BDEV_IO_TYPE_ZCOPY:
584 : case SPDK_BDEV_IO_TYPE_ABORT:
585 : case SPDK_BDEV_IO_TYPE_COPY:
586 0 : return true;
587 :
588 : default:
589 0 : return false;
590 : }
591 0 : }
592 :
593 : static struct spdk_io_channel *
594 0 : bdev_malloc_get_io_channel(void *ctx)
595 : {
596 0 : return spdk_get_io_channel(&g_malloc_disks);
597 : }
598 :
599 : static void
600 0 : bdev_malloc_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
601 : {
602 0 : spdk_json_write_object_begin(w);
603 :
604 0 : spdk_json_write_named_string(w, "method", "bdev_malloc_create");
605 :
606 0 : spdk_json_write_named_object_begin(w, "params");
607 0 : spdk_json_write_named_string(w, "name", bdev->name);
608 0 : spdk_json_write_named_uint64(w, "num_blocks", bdev->blockcnt);
609 0 : spdk_json_write_named_uint32(w, "block_size", bdev->blocklen);
610 0 : spdk_json_write_named_uint32(w, "physical_block_size", bdev->phys_blocklen);
611 0 : spdk_json_write_named_uuid(w, "uuid", &bdev->uuid);
612 0 : spdk_json_write_named_uint32(w, "optimal_io_boundary", bdev->optimal_io_boundary);
613 0 : spdk_json_write_named_uint32(w, "md_size", bdev->md_len);
614 0 : spdk_json_write_named_uint32(w, "dif_type", bdev->dif_type);
615 0 : spdk_json_write_named_bool(w, "dif_is_head_of_md", bdev->dif_is_head_of_md);
616 0 : spdk_json_write_named_uint32(w, "dif_pi_format", bdev->dif_pi_format);
617 :
618 0 : spdk_json_write_object_end(w);
619 :
620 0 : spdk_json_write_object_end(w);
621 0 : }
622 :
623 : static int
624 0 : bdev_malloc_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
625 : {
626 0 : struct malloc_disk *malloc_disk = ctx;
627 : struct spdk_memory_domain *domain;
628 0 : int num_domains = 0;
629 :
630 0 : if (malloc_disk->disk.dif_type != SPDK_DIF_DISABLE) {
631 0 : return 0;
632 : }
633 :
634 : /* Report support for every memory domain */
635 0 : for (domain = spdk_memory_domain_get_first(NULL); domain != NULL;
636 0 : domain = spdk_memory_domain_get_next(domain, NULL)) {
637 0 : if (domains != NULL && num_domains < array_size) {
638 0 : domains[num_domains] = domain;
639 0 : }
640 0 : num_domains++;
641 0 : }
642 :
643 0 : return num_domains;
644 0 : }
645 :
646 : static bool
647 0 : bdev_malloc_accel_sequence_supported(void *ctx, enum spdk_bdev_io_type type)
648 : {
649 0 : struct malloc_disk *malloc_disk = ctx;
650 :
651 0 : if (malloc_disk->disk.dif_type != SPDK_DIF_DISABLE) {
652 0 : return false;
653 : }
654 :
655 0 : switch (type) {
656 : case SPDK_BDEV_IO_TYPE_READ:
657 : case SPDK_BDEV_IO_TYPE_WRITE:
658 0 : return true;
659 : default:
660 0 : return false;
661 : }
662 0 : }
663 :
664 : static const struct spdk_bdev_fn_table malloc_fn_table = {
665 : .destruct = bdev_malloc_destruct,
666 : .submit_request = bdev_malloc_submit_request,
667 : .io_type_supported = bdev_malloc_io_type_supported,
668 : .get_io_channel = bdev_malloc_get_io_channel,
669 : .write_config_json = bdev_malloc_write_json_config,
670 : .get_memory_domains = bdev_malloc_get_memory_domains,
671 : .accel_sequence_supported = bdev_malloc_accel_sequence_supported,
672 : };
673 :
674 : static int
675 0 : malloc_disk_setup_pi(struct malloc_disk *mdisk)
676 : {
677 0 : struct spdk_bdev *bdev = &mdisk->disk;
678 : struct spdk_dif_ctx dif_ctx;
679 : struct iovec iov, md_iov;
680 : uint32_t dif_check_flags;
681 : int rc;
682 : struct spdk_dif_ctx_init_ext_opts dif_opts;
683 :
684 0 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
685 0 : dif_opts.dif_pi_format = bdev->dif_pi_format;
686 : /* Set APPTAG|REFTAG_IGNORE to PI fields after creation of malloc bdev */
687 0 : dif_check_flags = bdev->dif_check_flags | SPDK_DIF_CHECK_TYPE_REFTAG |
688 : SPDK_DIF_FLAGS_APPTAG_CHECK;
689 0 : rc = spdk_dif_ctx_init(&dif_ctx,
690 0 : bdev->blocklen,
691 0 : bdev->md_len,
692 0 : bdev->md_interleave,
693 0 : bdev->dif_is_head_of_md,
694 0 : bdev->dif_type,
695 0 : dif_check_flags,
696 : SPDK_DIF_REFTAG_IGNORE,
697 : 0xFFFF, SPDK_DIF_APPTAG_IGNORE,
698 : 0, 0, &dif_opts);
699 0 : if (rc != 0) {
700 0 : SPDK_ERRLOG("Initialization of DIF/DIX context failed\n");
701 0 : return rc;
702 : }
703 :
704 0 : iov.iov_base = mdisk->malloc_buf;
705 0 : iov.iov_len = bdev->blockcnt * bdev->blocklen;
706 :
707 0 : if (mdisk->disk.md_interleave) {
708 0 : rc = spdk_dif_generate(&iov, 1, bdev->blockcnt, &dif_ctx);
709 0 : } else {
710 0 : md_iov.iov_base = mdisk->malloc_md_buf;
711 0 : md_iov.iov_len = bdev->blockcnt * bdev->md_len;
712 :
713 0 : rc = spdk_dix_generate(&iov, 1, &md_iov, bdev->blockcnt, &dif_ctx);
714 : }
715 :
716 0 : if (rc != 0) {
717 0 : SPDK_ERRLOG("Formatting by DIF/DIX failed\n");
718 0 : }
719 :
720 0 : return rc;
721 0 : }
722 :
723 : int
724 0 : create_malloc_disk(struct spdk_bdev **bdev, const struct malloc_bdev_opts *opts)
725 : {
726 : struct malloc_disk *mdisk;
727 : uint32_t block_size;
728 : int rc;
729 :
730 0 : assert(opts != NULL);
731 :
732 0 : if (opts->num_blocks == 0) {
733 0 : SPDK_ERRLOG("Disk num_blocks must be greater than 0");
734 0 : return -EINVAL;
735 : }
736 :
737 0 : if (opts->block_size % 512) {
738 0 : SPDK_ERRLOG("Data block size must be 512 bytes aligned\n");
739 0 : return -EINVAL;
740 : }
741 :
742 0 : if (opts->physical_block_size % 512) {
743 0 : SPDK_ERRLOG("Physical block must be 512 bytes aligned\n");
744 0 : return -EINVAL;
745 : }
746 :
747 0 : switch (opts->md_size) {
748 : case 0:
749 : case 8:
750 : case 16:
751 : case 32:
752 : case 64:
753 : case 128:
754 0 : break;
755 : default:
756 0 : SPDK_ERRLOG("metadata size %u is not supported\n", opts->md_size);
757 0 : return -EINVAL;
758 : }
759 :
760 0 : if (opts->md_interleave) {
761 0 : block_size = opts->block_size + opts->md_size;
762 0 : } else {
763 0 : block_size = opts->block_size;
764 : }
765 :
766 0 : mdisk = calloc(1, sizeof(*mdisk));
767 0 : if (!mdisk) {
768 0 : SPDK_ERRLOG("mdisk calloc() failed\n");
769 0 : return -ENOMEM;
770 : }
771 :
772 : /*
773 : * Allocate the large backend memory buffer from pinned memory.
774 : *
775 : * TODO: need to pass a hint so we know which socket to allocate
776 : * from on multi-socket systems.
777 : */
778 0 : mdisk->malloc_buf = spdk_zmalloc(opts->num_blocks * block_size, 2 * 1024 * 1024, NULL,
779 : SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
780 0 : if (!mdisk->malloc_buf) {
781 0 : SPDK_ERRLOG("malloc_buf spdk_zmalloc() failed\n");
782 0 : malloc_disk_free(mdisk);
783 0 : return -ENOMEM;
784 : }
785 :
786 0 : if (!opts->md_interleave && opts->md_size != 0) {
787 0 : mdisk->malloc_md_buf = spdk_zmalloc(opts->num_blocks * opts->md_size, 2 * 1024 * 1024, NULL,
788 : SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
789 0 : if (!mdisk->malloc_md_buf) {
790 0 : SPDK_ERRLOG("malloc_md_buf spdk_zmalloc() failed\n");
791 0 : malloc_disk_free(mdisk);
792 0 : return -ENOMEM;
793 : }
794 0 : }
795 :
796 0 : if (opts->name) {
797 0 : mdisk->disk.name = strdup(opts->name);
798 0 : } else {
799 : /* Auto-generate a name */
800 0 : mdisk->disk.name = spdk_sprintf_alloc("Malloc%d", malloc_disk_count);
801 0 : malloc_disk_count++;
802 : }
803 0 : if (!mdisk->disk.name) {
804 0 : malloc_disk_free(mdisk);
805 0 : return -ENOMEM;
806 : }
807 0 : mdisk->disk.product_name = "Malloc disk";
808 :
809 0 : mdisk->disk.write_cache = 1;
810 0 : mdisk->disk.blocklen = block_size;
811 0 : mdisk->disk.phys_blocklen = opts->physical_block_size;
812 0 : mdisk->disk.blockcnt = opts->num_blocks;
813 0 : mdisk->disk.md_len = opts->md_size;
814 0 : mdisk->disk.md_interleave = opts->md_interleave;
815 0 : mdisk->disk.dif_type = opts->dif_type;
816 0 : mdisk->disk.dif_is_head_of_md = opts->dif_is_head_of_md;
817 : /* Current block device layer API does not propagate
818 : * any DIF related information from user. So, we can
819 : * not generate or verify Application Tag.
820 : */
821 0 : switch (opts->dif_type) {
822 : case SPDK_DIF_TYPE1:
823 : case SPDK_DIF_TYPE2:
824 0 : mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK |
825 : SPDK_DIF_FLAGS_REFTAG_CHECK;
826 0 : break;
827 : case SPDK_DIF_TYPE3:
828 0 : mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK;
829 0 : break;
830 : case SPDK_DIF_DISABLE:
831 0 : break;
832 : }
833 0 : mdisk->disk.dif_pi_format = opts->dif_pi_format;
834 :
835 0 : if (opts->dif_type != SPDK_DIF_DISABLE) {
836 0 : rc = malloc_disk_setup_pi(mdisk);
837 0 : if (rc) {
838 0 : SPDK_ERRLOG("Failed to set up protection information.\n");
839 0 : malloc_disk_free(mdisk);
840 0 : return rc;
841 : }
842 0 : }
843 :
844 0 : if (opts->optimal_io_boundary) {
845 0 : mdisk->disk.optimal_io_boundary = opts->optimal_io_boundary;
846 0 : mdisk->disk.split_on_optimal_io_boundary = true;
847 0 : }
848 0 : if (!spdk_uuid_is_null(&opts->uuid)) {
849 0 : spdk_uuid_copy(&mdisk->disk.uuid, &opts->uuid);
850 0 : }
851 :
852 0 : mdisk->disk.max_copy = 0;
853 0 : mdisk->disk.ctxt = mdisk;
854 0 : mdisk->disk.fn_table = &malloc_fn_table;
855 0 : mdisk->disk.module = &malloc_if;
856 :
857 0 : rc = spdk_bdev_register(&mdisk->disk);
858 0 : if (rc) {
859 0 : malloc_disk_free(mdisk);
860 0 : return rc;
861 : }
862 :
863 0 : *bdev = &(mdisk->disk);
864 :
865 0 : TAILQ_INSERT_TAIL(&g_malloc_disks, mdisk, link);
866 :
867 0 : return rc;
868 0 : }
869 :
870 : void
871 0 : delete_malloc_disk(const char *name, spdk_delete_malloc_complete cb_fn, void *cb_arg)
872 : {
873 : int rc;
874 :
875 0 : rc = spdk_bdev_unregister_by_name(name, &malloc_if, cb_fn, cb_arg);
876 0 : if (rc != 0) {
877 0 : cb_fn(cb_arg, rc);
878 0 : }
879 0 : }
880 :
881 : static int
882 0 : malloc_completion_poller(void *ctx)
883 : {
884 0 : struct malloc_channel *ch = ctx;
885 : struct malloc_task *task;
886 : TAILQ_HEAD(, malloc_task) completed_tasks;
887 0 : uint32_t num_completions = 0;
888 :
889 0 : TAILQ_INIT(&completed_tasks);
890 0 : TAILQ_SWAP(&completed_tasks, &ch->completed_tasks, malloc_task, tailq);
891 :
892 0 : while (!TAILQ_EMPTY(&completed_tasks)) {
893 0 : task = TAILQ_FIRST(&completed_tasks);
894 0 : TAILQ_REMOVE(&completed_tasks, task, tailq);
895 0 : spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
896 0 : num_completions++;
897 : }
898 :
899 0 : return num_completions > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
900 : }
901 :
902 : static int
903 0 : malloc_create_channel_cb(void *io_device, void *ctx)
904 : {
905 0 : struct malloc_channel *ch = ctx;
906 :
907 0 : ch->accel_channel = spdk_accel_get_io_channel();
908 0 : if (!ch->accel_channel) {
909 0 : SPDK_ERRLOG("Failed to get accel framework's IO channel\n");
910 0 : return -ENOMEM;
911 : }
912 :
913 0 : ch->completion_poller = SPDK_POLLER_REGISTER(malloc_completion_poller, ch, 0);
914 0 : if (!ch->completion_poller) {
915 0 : SPDK_ERRLOG("Failed to register malloc completion poller\n");
916 0 : spdk_put_io_channel(ch->accel_channel);
917 0 : return -ENOMEM;
918 : }
919 :
920 0 : TAILQ_INIT(&ch->completed_tasks);
921 :
922 0 : return 0;
923 0 : }
924 :
925 : static void
926 0 : malloc_destroy_channel_cb(void *io_device, void *ctx)
927 : {
928 0 : struct malloc_channel *ch = ctx;
929 :
930 0 : assert(TAILQ_EMPTY(&ch->completed_tasks));
931 :
932 0 : spdk_put_io_channel(ch->accel_channel);
933 0 : spdk_poller_unregister(&ch->completion_poller);
934 0 : }
935 :
936 : static int
937 0 : bdev_malloc_initialize(void)
938 : {
939 : /* This needs to be reset for each reinitialization of submodules.
940 : * Otherwise after enough devices or reinitializations the value gets too high.
941 : * TODO: Make malloc bdev name mandatory and remove this counter. */
942 0 : malloc_disk_count = 0;
943 :
944 0 : spdk_io_device_register(&g_malloc_disks, malloc_create_channel_cb,
945 : malloc_destroy_channel_cb, sizeof(struct malloc_channel),
946 : "bdev_malloc");
947 :
948 0 : return 0;
949 : }
950 :
951 : static void
952 0 : bdev_malloc_deinitialize(void)
953 : {
954 0 : spdk_io_device_unregister(&g_malloc_disks, NULL);
955 0 : }
956 :
957 0 : SPDK_LOG_REGISTER_COMPONENT(bdev_malloc)
|