Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2017 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "bdev_malloc.h"
10 : #include "spdk/endian.h"
11 : #include "spdk/env.h"
12 : #include "spdk/accel.h"
13 : #include "spdk/dma.h"
14 : #include "spdk/likely.h"
15 : #include "spdk/string.h"
16 :
17 : #include "spdk/log.h"
18 :
19 : struct malloc_disk {
20 : struct spdk_bdev disk;
21 : void *malloc_buf;
22 : void *malloc_md_buf;
23 : TAILQ_ENTRY(malloc_disk) link;
24 : };
25 :
26 : struct malloc_task {
27 : struct iovec iov;
28 : int num_outstanding;
29 : enum spdk_bdev_io_status status;
30 : TAILQ_ENTRY(malloc_task) tailq;
31 : };
32 :
33 : struct malloc_channel {
34 : struct spdk_io_channel *accel_channel;
35 : struct spdk_poller *completion_poller;
36 : TAILQ_HEAD(, malloc_task) completed_tasks;
37 : };
38 :
39 : static int
40 0 : _malloc_verify_pi(struct spdk_bdev_io *bdev_io, struct iovec *iovs, int iovcnt,
41 : void *md_buf)
42 : {
43 0 : struct spdk_bdev *bdev = bdev_io->bdev;
44 0 : struct spdk_dif_ctx dif_ctx;
45 0 : struct spdk_dif_error err_blk;
46 : int rc;
47 0 : struct spdk_dif_ctx_init_ext_opts dif_opts;
48 :
49 0 : assert(bdev_io->u.bdev.memory_domain == NULL);
50 0 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
51 0 : dif_opts.dif_pi_format = bdev->dif_pi_format;
52 0 : rc = spdk_dif_ctx_init(&dif_ctx,
53 : bdev->blocklen,
54 : bdev->md_len,
55 0 : bdev->md_interleave,
56 0 : bdev->dif_is_head_of_md,
57 : bdev->dif_type,
58 : bdev_io->u.bdev.dif_check_flags,
59 0 : bdev_io->u.bdev.offset_blocks & 0xFFFFFFFF,
60 : 0xFFFF, 0, 0, 0, &dif_opts);
61 0 : if (rc != 0) {
62 0 : SPDK_ERRLOG("Failed to initialize DIF/DIX context\n");
63 0 : return rc;
64 : }
65 :
66 0 : if (spdk_bdev_is_md_interleaved(bdev)) {
67 0 : rc = spdk_dif_verify(iovs,
68 : iovcnt,
69 0 : bdev_io->u.bdev.num_blocks,
70 : &dif_ctx,
71 : &err_blk);
72 : } else {
73 0 : struct iovec md_iov = {
74 : .iov_base = md_buf,
75 0 : .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len,
76 : };
77 :
78 0 : if (bdev_io->u.bdev.md_buf == NULL) {
79 0 : return 0;
80 : }
81 :
82 0 : rc = spdk_dix_verify(iovs,
83 : iovcnt,
84 : &md_iov,
85 0 : bdev_io->u.bdev.num_blocks,
86 : &dif_ctx,
87 : &err_blk);
88 : }
89 :
90 0 : if (rc != 0) {
91 0 : SPDK_ERRLOG("DIF/DIX verify failed: lba %" PRIu64 ", num_blocks %" PRIu64 ", "
92 : "err_type %u, expected %lu, actual %lu, err_offset %u\n",
93 : bdev_io->u.bdev.offset_blocks,
94 : bdev_io->u.bdev.num_blocks,
95 : err_blk.err_type,
96 : err_blk.expected,
97 : err_blk.actual,
98 : err_blk.err_offset);
99 : }
100 :
101 0 : return rc;
102 : }
103 :
104 : static int
105 0 : malloc_verify_pi_io_buf(struct spdk_bdev_io *bdev_io)
106 : {
107 0 : return _malloc_verify_pi(bdev_io,
108 : bdev_io->u.bdev.iovs,
109 : bdev_io->u.bdev.iovcnt,
110 : bdev_io->u.bdev.md_buf);
111 : }
112 :
113 : static int
114 0 : malloc_verify_pi_malloc_buf(struct spdk_bdev_io *bdev_io)
115 : {
116 0 : struct iovec iov;
117 0 : struct spdk_bdev *bdev = bdev_io->bdev;
118 0 : struct malloc_disk *mdisk = bdev->ctxt;
119 : uint64_t len, offset;
120 :
121 0 : len = bdev_io->u.bdev.num_blocks * bdev->blocklen;
122 0 : offset = bdev_io->u.bdev.offset_blocks * bdev->blocklen;
123 :
124 0 : iov.iov_base = mdisk->malloc_buf + offset;
125 0 : iov.iov_len = len;
126 :
127 0 : return _malloc_verify_pi(bdev_io, &iov, 1, NULL);
128 : }
129 :
130 : static int
131 0 : malloc_unmap_write_zeroes_generate_pi(struct spdk_bdev_io *bdev_io)
132 : {
133 0 : struct spdk_bdev *bdev = bdev_io->bdev;
134 0 : struct malloc_disk *mdisk = bdev_io->bdev->ctxt;
135 0 : uint32_t block_size = bdev_io->bdev->blocklen;
136 : uint32_t dif_check_flags;
137 0 : struct spdk_dif_ctx dif_ctx;
138 0 : struct spdk_dif_ctx_init_ext_opts dif_opts;
139 : int rc;
140 :
141 0 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
142 0 : dif_opts.dif_pi_format = bdev->dif_pi_format;
143 0 : dif_check_flags = bdev->dif_check_flags | SPDK_DIF_CHECK_TYPE_REFTAG |
144 : SPDK_DIF_FLAGS_APPTAG_CHECK;
145 0 : rc = spdk_dif_ctx_init(&dif_ctx,
146 : bdev->blocklen,
147 : bdev->md_len,
148 0 : bdev->md_interleave,
149 0 : bdev->dif_is_head_of_md,
150 : bdev->dif_type,
151 : dif_check_flags,
152 : SPDK_DIF_REFTAG_IGNORE,
153 : 0xFFFF, SPDK_DIF_APPTAG_IGNORE,
154 : 0, 0, &dif_opts);
155 0 : if (rc != 0) {
156 0 : SPDK_ERRLOG("Initialization of DIF/DIX context failed\n");
157 0 : return rc;
158 : }
159 :
160 0 : if (bdev->md_interleave) {
161 0 : struct iovec iov = {
162 0 : .iov_base = mdisk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size,
163 0 : .iov_len = bdev_io->u.bdev.num_blocks * block_size,
164 : };
165 :
166 0 : rc = spdk_dif_generate(&iov, 1, bdev_io->u.bdev.num_blocks, &dif_ctx);
167 : } else {
168 0 : struct iovec iov = {
169 0 : .iov_base = mdisk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size,
170 0 : .iov_len = bdev_io->u.bdev.num_blocks * block_size,
171 : };
172 :
173 0 : struct iovec md_iov = {
174 0 : .iov_base = mdisk->malloc_md_buf + bdev_io->u.bdev.offset_blocks * bdev->md_len,
175 0 : .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len,
176 : };
177 :
178 0 : rc = spdk_dix_generate(&iov, 1, &md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx);
179 : }
180 :
181 0 : if (rc != 0) {
182 0 : SPDK_ERRLOG("Formatting by DIF/DIX failed\n");
183 : }
184 :
185 :
186 0 : return rc;
187 : }
188 :
189 : static void
190 0 : malloc_done(void *ref, int status)
191 : {
192 0 : struct malloc_task *task = (struct malloc_task *)ref;
193 0 : struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
194 : int rc;
195 :
196 0 : if (status != 0) {
197 0 : if (status == -ENOMEM) {
198 0 : if (task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
199 0 : task->status = SPDK_BDEV_IO_STATUS_NOMEM;
200 : }
201 : } else {
202 0 : task->status = SPDK_BDEV_IO_STATUS_FAILED;
203 : }
204 : }
205 :
206 0 : if (--task->num_outstanding != 0) {
207 0 : return;
208 : }
209 :
210 0 : if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
211 0 : task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
212 0 : switch (bdev_io->type) {
213 0 : case SPDK_BDEV_IO_TYPE_READ:
214 0 : if (!spdk_bdev_io_hide_metadata(bdev_io)) {
215 0 : rc = malloc_verify_pi_io_buf(bdev_io);
216 : } else {
217 0 : rc = 0;
218 : }
219 0 : break;
220 0 : case SPDK_BDEV_IO_TYPE_WRITE:
221 0 : if (!spdk_bdev_io_hide_metadata(bdev_io)) {
222 0 : rc = 0;
223 : } else {
224 0 : rc = malloc_verify_pi_malloc_buf(bdev_io);
225 : }
226 0 : break;
227 0 : case SPDK_BDEV_IO_TYPE_UNMAP:
228 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
229 0 : rc = malloc_unmap_write_zeroes_generate_pi(bdev_io);
230 0 : break;
231 0 : default:
232 0 : rc = 0;
233 0 : break;
234 : }
235 :
236 0 : if (rc != 0) {
237 0 : task->status = SPDK_BDEV_IO_STATUS_FAILED;
238 : }
239 : }
240 :
241 0 : assert(!bdev_io->u.bdev.accel_sequence || task->status == SPDK_BDEV_IO_STATUS_NOMEM);
242 0 : spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
243 : }
244 :
245 : static void
246 0 : malloc_complete_task(struct malloc_task *task, struct malloc_channel *mch,
247 : enum spdk_bdev_io_status status)
248 : {
249 0 : task->status = status;
250 0 : TAILQ_INSERT_TAIL(&mch->completed_tasks, task, tailq);
251 0 : }
252 :
253 : static TAILQ_HEAD(, malloc_disk) g_malloc_disks = TAILQ_HEAD_INITIALIZER(g_malloc_disks);
254 :
255 : int malloc_disk_count = 0;
256 :
257 : static int bdev_malloc_initialize(void);
258 : static void bdev_malloc_deinitialize(void);
259 :
260 : static int
261 0 : bdev_malloc_get_ctx_size(void)
262 : {
263 0 : return sizeof(struct malloc_task);
264 : }
265 :
266 : static struct spdk_bdev_module malloc_if = {
267 : .name = "malloc",
268 : .module_init = bdev_malloc_initialize,
269 : .module_fini = bdev_malloc_deinitialize,
270 : .get_ctx_size = bdev_malloc_get_ctx_size,
271 :
272 : };
273 :
274 0 : SPDK_BDEV_MODULE_REGISTER(malloc, &malloc_if)
275 :
276 : static void
277 0 : malloc_disk_free(struct malloc_disk *malloc_disk)
278 : {
279 0 : if (!malloc_disk) {
280 0 : return;
281 : }
282 :
283 0 : free(malloc_disk->disk.name);
284 0 : spdk_free(malloc_disk->malloc_buf);
285 0 : spdk_free(malloc_disk->malloc_md_buf);
286 0 : free(malloc_disk);
287 : }
288 :
289 : static int
290 0 : bdev_malloc_destruct(void *ctx)
291 : {
292 0 : struct malloc_disk *malloc_disk = ctx;
293 :
294 0 : TAILQ_REMOVE(&g_malloc_disks, malloc_disk, link);
295 0 : malloc_disk_free(malloc_disk);
296 0 : return 0;
297 : }
298 :
299 : static int
300 0 : bdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes)
301 : {
302 : int i;
303 :
304 0 : for (i = 0; i < iovcnt; i++) {
305 0 : if (nbytes < iovs[i].iov_len) {
306 0 : return 0;
307 : }
308 :
309 0 : nbytes -= iovs[i].iov_len;
310 : }
311 :
312 0 : return nbytes != 0;
313 : }
314 :
315 : static size_t
316 0 : malloc_get_md_len(struct spdk_bdev_io *bdev_io)
317 : {
318 0 : return bdev_io->u.bdev.num_blocks * bdev_io->bdev->md_len;
319 : }
320 :
321 : static uint64_t
322 0 : malloc_get_md_offset(struct spdk_bdev_io *bdev_io)
323 : {
324 0 : return bdev_io->u.bdev.offset_blocks * bdev_io->bdev->md_len;
325 : }
326 :
327 : static void *
328 0 : malloc_get_md_buf(struct spdk_bdev_io *bdev_io)
329 : {
330 0 : struct malloc_disk *mdisk = SPDK_CONTAINEROF(bdev_io->bdev, struct malloc_disk, disk);
331 :
332 0 : assert(spdk_bdev_is_md_separate(bdev_io->bdev));
333 :
334 0 : return (char *)mdisk->malloc_md_buf + malloc_get_md_offset(bdev_io);
335 : }
336 :
337 : static void
338 0 : malloc_sequence_fail(struct malloc_task *task, int status)
339 : {
340 0 : struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
341 :
342 : /* For ENOMEM, the IO will be retried by the bdev layer, so we don't abort the sequence */
343 0 : if (status != -ENOMEM) {
344 0 : spdk_accel_sequence_abort(bdev_io->u.bdev.accel_sequence);
345 0 : bdev_io->u.bdev.accel_sequence = NULL;
346 : }
347 :
348 0 : malloc_done(task, status);
349 0 : }
350 :
351 : static void
352 0 : malloc_sequence_done(void *ctx, int status)
353 : {
354 0 : struct malloc_task *task = ctx;
355 0 : struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
356 :
357 0 : bdev_io->u.bdev.accel_sequence = NULL;
358 : /* Prevent bdev layer from retrying the request if the sequence failed with ENOMEM */
359 0 : malloc_done(task, status != -ENOMEM ? status : -EFAULT);
360 0 : }
361 :
362 : static void
363 0 : bdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
364 : struct malloc_task *task, struct spdk_bdev_io *bdev_io)
365 : {
366 : uint64_t len, offset;
367 0 : int res = 0;
368 :
369 0 : len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
370 0 : offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
371 :
372 0 : if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
373 0 : spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
374 : SPDK_BDEV_IO_STATUS_FAILED);
375 0 : return;
376 : }
377 :
378 0 : task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
379 0 : task->num_outstanding = 0;
380 0 : task->iov.iov_base = mdisk->malloc_buf + offset;
381 0 : task->iov.iov_len = len;
382 :
383 0 : SPDK_DEBUGLOG(bdev_malloc, "read %zu bytes from offset %#" PRIx64 ", iovcnt=%d\n",
384 : len, offset, bdev_io->u.bdev.iovcnt);
385 :
386 0 : task->num_outstanding++;
387 0 : res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch,
388 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
389 : bdev_io->u.bdev.memory_domain,
390 : bdev_io->u.bdev.memory_domain_ctx,
391 : &task->iov, 1, NULL, NULL, NULL, NULL);
392 0 : if (spdk_unlikely(res != 0)) {
393 0 : malloc_sequence_fail(task, res);
394 0 : return;
395 : }
396 :
397 0 : spdk_accel_sequence_reverse(bdev_io->u.bdev.accel_sequence);
398 0 : spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
399 :
400 0 : if (bdev_io->u.bdev.md_buf == NULL) {
401 0 : return;
402 : }
403 :
404 0 : SPDK_DEBUGLOG(bdev_malloc, "read metadata %zu bytes from offset%#" PRIx64 "\n",
405 : malloc_get_md_len(bdev_io), malloc_get_md_offset(bdev_io));
406 :
407 0 : task->num_outstanding++;
408 0 : res = spdk_accel_submit_copy(ch, bdev_io->u.bdev.md_buf, malloc_get_md_buf(bdev_io),
409 : malloc_get_md_len(bdev_io), malloc_done, task);
410 0 : if (res != 0) {
411 0 : malloc_done(task, res);
412 : }
413 : }
414 :
415 : static void
416 0 : bdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
417 : struct malloc_task *task, struct spdk_bdev_io *bdev_io)
418 : {
419 : uint64_t len, offset;
420 0 : int res = 0;
421 :
422 0 : len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
423 0 : offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
424 :
425 0 : if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
426 0 : spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
427 : SPDK_BDEV_IO_STATUS_FAILED);
428 0 : return;
429 : }
430 :
431 0 : task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
432 0 : task->num_outstanding = 0;
433 0 : task->iov.iov_base = mdisk->malloc_buf + offset;
434 0 : task->iov.iov_len = len;
435 :
436 0 : SPDK_DEBUGLOG(bdev_malloc, "write %zu bytes to offset %#" PRIx64 ", iovcnt=%d\n",
437 : len, offset, bdev_io->u.bdev.iovcnt);
438 :
439 0 : task->num_outstanding++;
440 0 : res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch, &task->iov, 1, NULL, NULL,
441 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
442 : bdev_io->u.bdev.memory_domain,
443 : bdev_io->u.bdev.memory_domain_ctx, NULL, NULL);
444 0 : if (spdk_unlikely(res != 0)) {
445 0 : malloc_sequence_fail(task, res);
446 0 : return;
447 : }
448 :
449 0 : spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
450 :
451 0 : if (bdev_io->u.bdev.md_buf == NULL) {
452 0 : return;
453 : }
454 :
455 0 : SPDK_DEBUGLOG(bdev_malloc, "write metadata %zu bytes to offset %#" PRIx64 "\n",
456 : malloc_get_md_len(bdev_io), malloc_get_md_offset(bdev_io));
457 :
458 0 : task->num_outstanding++;
459 0 : res = spdk_accel_submit_copy(ch, malloc_get_md_buf(bdev_io), bdev_io->u.bdev.md_buf,
460 : malloc_get_md_len(bdev_io), malloc_done, task);
461 0 : if (res != 0) {
462 0 : malloc_done(task, res);
463 : }
464 : }
465 :
466 : static int
467 0 : bdev_malloc_unmap(struct malloc_disk *mdisk,
468 : struct spdk_io_channel *ch,
469 : struct malloc_task *task,
470 : uint64_t offset,
471 : uint64_t byte_count)
472 : {
473 0 : task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
474 0 : task->num_outstanding = 1;
475 :
476 0 : return spdk_accel_submit_fill(ch, mdisk->malloc_buf + offset, 0,
477 : byte_count, malloc_done, task);
478 : }
479 :
480 : static void
481 0 : bdev_malloc_copy(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
482 : struct malloc_task *task,
483 : uint64_t dst_offset, uint64_t src_offset, size_t len)
484 : {
485 0 : int64_t res = 0;
486 0 : void *dst = mdisk->malloc_buf + dst_offset;
487 0 : void *src = mdisk->malloc_buf + src_offset;
488 :
489 0 : SPDK_DEBUGLOG(bdev_malloc, "Copy %zu bytes from offset %#" PRIx64 " to offset %#" PRIx64 "\n",
490 : len, src_offset, dst_offset);
491 :
492 0 : task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
493 0 : task->num_outstanding = 1;
494 :
495 0 : res = spdk_accel_submit_copy(ch, dst, src, len, malloc_done, task);
496 0 : if (res != 0) {
497 0 : malloc_done(task, res);
498 : }
499 0 : }
500 :
501 : static int
502 0 : _bdev_malloc_submit_request(struct malloc_channel *mch, struct spdk_bdev_io *bdev_io)
503 : {
504 0 : struct malloc_task *task = (struct malloc_task *)bdev_io->driver_ctx;
505 0 : struct malloc_disk *disk = bdev_io->bdev->ctxt;
506 0 : uint32_t block_size = bdev_io->bdev->blocklen;
507 : int rc;
508 :
509 0 : switch (bdev_io->type) {
510 0 : case SPDK_BDEV_IO_TYPE_READ:
511 0 : if (bdev_io->u.bdev.iovs[0].iov_base == NULL) {
512 0 : assert(bdev_io->u.bdev.iovcnt == 1);
513 0 : assert(bdev_io->u.bdev.memory_domain == NULL);
514 0 : bdev_io->u.bdev.iovs[0].iov_base =
515 0 : disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
516 0 : bdev_io->u.bdev.iovs[0].iov_len = bdev_io->u.bdev.num_blocks * block_size;
517 0 : if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
518 0 : spdk_bdev_io_set_md_buf(bdev_io, malloc_get_md_buf(bdev_io),
519 : malloc_get_md_len(bdev_io));
520 : }
521 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
522 0 : return 0;
523 : }
524 :
525 0 : if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
526 0 : spdk_bdev_io_hide_metadata(bdev_io)) {
527 0 : rc = malloc_verify_pi_malloc_buf(bdev_io);
528 0 : if (rc != 0) {
529 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
530 0 : return 0;
531 : }
532 : }
533 :
534 0 : bdev_malloc_readv(disk, mch->accel_channel, task, bdev_io);
535 0 : return 0;
536 :
537 0 : case SPDK_BDEV_IO_TYPE_WRITE:
538 0 : if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
539 0 : !spdk_bdev_io_hide_metadata(bdev_io)) {
540 0 : rc = malloc_verify_pi_io_buf(bdev_io);
541 0 : if (rc != 0) {
542 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
543 0 : return 0;
544 : }
545 : }
546 :
547 0 : bdev_malloc_writev(disk, mch->accel_channel, task, bdev_io);
548 0 : return 0;
549 :
550 0 : case SPDK_BDEV_IO_TYPE_RESET:
551 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
552 0 : return 0;
553 :
554 0 : case SPDK_BDEV_IO_TYPE_FLUSH:
555 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
556 0 : return 0;
557 :
558 0 : case SPDK_BDEV_IO_TYPE_UNMAP:
559 0 : return bdev_malloc_unmap(disk, mch->accel_channel, task,
560 0 : bdev_io->u.bdev.offset_blocks * block_size,
561 0 : bdev_io->u.bdev.num_blocks * block_size);
562 :
563 0 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
564 : /* bdev_malloc_unmap is implemented with a call to mem_cpy_fill which zeroes out all of the requested bytes. */
565 0 : return bdev_malloc_unmap(disk, mch->accel_channel, task,
566 0 : bdev_io->u.bdev.offset_blocks * block_size,
567 0 : bdev_io->u.bdev.num_blocks * block_size);
568 :
569 0 : case SPDK_BDEV_IO_TYPE_ZCOPY:
570 0 : if (bdev_io->u.bdev.zcopy.start) {
571 : void *buf;
572 : size_t len;
573 :
574 0 : buf = disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
575 0 : len = bdev_io->u.bdev.num_blocks * block_size;
576 0 : spdk_bdev_io_set_buf(bdev_io, buf, len);
577 0 : if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
578 0 : spdk_bdev_io_set_md_buf(bdev_io, malloc_get_md_buf(bdev_io),
579 : malloc_get_md_len(bdev_io));
580 : }
581 : }
582 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
583 0 : return 0;
584 0 : case SPDK_BDEV_IO_TYPE_ABORT:
585 0 : malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
586 0 : return 0;
587 0 : case SPDK_BDEV_IO_TYPE_COPY:
588 0 : bdev_malloc_copy(disk, mch->accel_channel, task,
589 0 : bdev_io->u.bdev.offset_blocks * block_size,
590 0 : bdev_io->u.bdev.copy.src_offset_blocks * block_size,
591 0 : bdev_io->u.bdev.num_blocks * block_size);
592 0 : return 0;
593 :
594 0 : default:
595 0 : return -1;
596 : }
597 : return 0;
598 : }
599 :
600 : static void
601 0 : bdev_malloc_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
602 : {
603 0 : struct malloc_channel *mch = spdk_io_channel_get_ctx(ch);
604 :
605 0 : if (_bdev_malloc_submit_request(mch, bdev_io) != 0) {
606 0 : malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
607 : SPDK_BDEV_IO_STATUS_FAILED);
608 : }
609 0 : }
610 :
611 : static bool
612 0 : bdev_malloc_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
613 : {
614 0 : switch (io_type) {
615 0 : case SPDK_BDEV_IO_TYPE_READ:
616 : case SPDK_BDEV_IO_TYPE_WRITE:
617 : case SPDK_BDEV_IO_TYPE_FLUSH:
618 : case SPDK_BDEV_IO_TYPE_RESET:
619 : case SPDK_BDEV_IO_TYPE_UNMAP:
620 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
621 : case SPDK_BDEV_IO_TYPE_ZCOPY:
622 : case SPDK_BDEV_IO_TYPE_ABORT:
623 : case SPDK_BDEV_IO_TYPE_COPY:
624 0 : return true;
625 :
626 0 : default:
627 0 : return false;
628 : }
629 : }
630 :
631 : static struct spdk_io_channel *
632 0 : bdev_malloc_get_io_channel(void *ctx)
633 : {
634 0 : return spdk_get_io_channel(&g_malloc_disks);
635 : }
636 :
637 : static void
638 0 : bdev_malloc_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
639 : {
640 0 : spdk_json_write_object_begin(w);
641 :
642 0 : spdk_json_write_named_string(w, "method", "bdev_malloc_create");
643 :
644 0 : spdk_json_write_named_object_begin(w, "params");
645 0 : spdk_json_write_named_string(w, "name", bdev->name);
646 0 : spdk_json_write_named_uint64(w, "num_blocks", bdev->blockcnt);
647 0 : spdk_json_write_named_uint32(w, "block_size", bdev->blocklen);
648 0 : spdk_json_write_named_uint32(w, "physical_block_size", bdev->phys_blocklen);
649 0 : spdk_json_write_named_uuid(w, "uuid", &bdev->uuid);
650 0 : spdk_json_write_named_uint32(w, "optimal_io_boundary", bdev->optimal_io_boundary);
651 0 : spdk_json_write_named_uint32(w, "md_size", bdev->md_len);
652 0 : spdk_json_write_named_uint32(w, "dif_type", bdev->dif_type);
653 0 : spdk_json_write_named_bool(w, "dif_is_head_of_md", bdev->dif_is_head_of_md);
654 0 : spdk_json_write_named_uint32(w, "dif_pi_format", bdev->dif_pi_format);
655 :
656 0 : spdk_json_write_object_end(w);
657 :
658 0 : spdk_json_write_object_end(w);
659 0 : }
660 :
661 : static int
662 0 : bdev_malloc_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
663 : {
664 0 : struct malloc_disk *malloc_disk = ctx;
665 : struct spdk_memory_domain *domain;
666 0 : int num_domains = 0;
667 :
668 0 : if (malloc_disk->disk.dif_type != SPDK_DIF_DISABLE) {
669 0 : return 0;
670 : }
671 :
672 : /* Report support for every memory domain */
673 0 : for (domain = spdk_memory_domain_get_first(NULL); domain != NULL;
674 0 : domain = spdk_memory_domain_get_next(domain, NULL)) {
675 0 : if (domains != NULL && num_domains < array_size) {
676 0 : domains[num_domains] = domain;
677 : }
678 0 : num_domains++;
679 : }
680 :
681 0 : return num_domains;
682 : }
683 :
684 : static bool
685 0 : bdev_malloc_accel_sequence_supported(void *ctx, enum spdk_bdev_io_type type)
686 : {
687 0 : switch (type) {
688 0 : case SPDK_BDEV_IO_TYPE_READ:
689 : case SPDK_BDEV_IO_TYPE_WRITE:
690 0 : return true;
691 0 : default:
692 0 : return false;
693 : }
694 : }
695 :
696 : static const struct spdk_bdev_fn_table malloc_fn_table = {
697 : .destruct = bdev_malloc_destruct,
698 : .submit_request = bdev_malloc_submit_request,
699 : .io_type_supported = bdev_malloc_io_type_supported,
700 : .get_io_channel = bdev_malloc_get_io_channel,
701 : .write_config_json = bdev_malloc_write_json_config,
702 : .get_memory_domains = bdev_malloc_get_memory_domains,
703 : .accel_sequence_supported = bdev_malloc_accel_sequence_supported,
704 : };
705 :
706 : static int
707 0 : malloc_disk_setup_pi(struct malloc_disk *mdisk)
708 : {
709 0 : struct spdk_bdev *bdev = &mdisk->disk;
710 0 : struct spdk_dif_ctx dif_ctx;
711 0 : struct iovec iov, md_iov;
712 : uint32_t dif_check_flags;
713 : int rc;
714 0 : struct spdk_dif_ctx_init_ext_opts dif_opts;
715 :
716 0 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
717 0 : dif_opts.dif_pi_format = bdev->dif_pi_format;
718 : /* Set APPTAG|REFTAG_IGNORE to PI fields after creation of malloc bdev */
719 0 : dif_check_flags = bdev->dif_check_flags | SPDK_DIF_CHECK_TYPE_REFTAG |
720 : SPDK_DIF_FLAGS_APPTAG_CHECK;
721 0 : rc = spdk_dif_ctx_init(&dif_ctx,
722 : bdev->blocklen,
723 : bdev->md_len,
724 0 : bdev->md_interleave,
725 0 : bdev->dif_is_head_of_md,
726 : bdev->dif_type,
727 : dif_check_flags,
728 : SPDK_DIF_REFTAG_IGNORE,
729 : 0xFFFF, SPDK_DIF_APPTAG_IGNORE,
730 : 0, 0, &dif_opts);
731 0 : if (rc != 0) {
732 0 : SPDK_ERRLOG("Initialization of DIF/DIX context failed\n");
733 0 : return rc;
734 : }
735 :
736 0 : iov.iov_base = mdisk->malloc_buf;
737 0 : iov.iov_len = bdev->blockcnt * bdev->blocklen;
738 :
739 0 : if (mdisk->disk.md_interleave) {
740 0 : rc = spdk_dif_generate(&iov, 1, bdev->blockcnt, &dif_ctx);
741 : } else {
742 0 : md_iov.iov_base = mdisk->malloc_md_buf;
743 0 : md_iov.iov_len = bdev->blockcnt * bdev->md_len;
744 :
745 0 : rc = spdk_dix_generate(&iov, 1, &md_iov, bdev->blockcnt, &dif_ctx);
746 : }
747 :
748 0 : if (rc != 0) {
749 0 : SPDK_ERRLOG("Formatting by DIF/DIX failed\n");
750 : }
751 :
752 0 : return rc;
753 : }
754 :
755 : int
756 0 : create_malloc_disk(struct spdk_bdev **bdev, const struct malloc_bdev_opts *opts)
757 : {
758 : struct malloc_disk *mdisk;
759 : uint32_t block_size;
760 : int rc;
761 :
762 0 : assert(opts != NULL);
763 :
764 0 : if (opts->num_blocks == 0) {
765 0 : SPDK_ERRLOG("Disk num_blocks must be greater than 0");
766 0 : return -EINVAL;
767 : }
768 :
769 0 : if (opts->block_size % 512) {
770 0 : SPDK_ERRLOG("Data block size must be 512 bytes aligned\n");
771 0 : return -EINVAL;
772 : }
773 :
774 0 : if (opts->physical_block_size % 512) {
775 0 : SPDK_ERRLOG("Physical block must be 512 bytes aligned\n");
776 0 : return -EINVAL;
777 : }
778 :
779 0 : switch (opts->md_size) {
780 0 : case 0:
781 : case 8:
782 : case 16:
783 : case 32:
784 : case 64:
785 : case 128:
786 0 : break;
787 0 : default:
788 0 : SPDK_ERRLOG("metadata size %u is not supported\n", opts->md_size);
789 0 : return -EINVAL;
790 : }
791 :
792 0 : if (opts->md_interleave) {
793 0 : block_size = opts->block_size + opts->md_size;
794 : } else {
795 0 : block_size = opts->block_size;
796 : }
797 :
798 0 : mdisk = calloc(1, sizeof(*mdisk));
799 0 : if (!mdisk) {
800 0 : SPDK_ERRLOG("mdisk calloc() failed\n");
801 0 : return -ENOMEM;
802 : }
803 :
804 : /*
805 : * Allocate the large backend memory buffer from pinned memory.
806 : *
807 : * TODO: need to pass a hint so we know which socket to allocate
808 : * from on multi-socket systems.
809 : */
810 0 : mdisk->malloc_buf = spdk_zmalloc(opts->num_blocks * block_size, 2 * 1024 * 1024, NULL,
811 : SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
812 0 : if (!mdisk->malloc_buf) {
813 0 : SPDK_ERRLOG("malloc_buf spdk_zmalloc() failed\n");
814 0 : malloc_disk_free(mdisk);
815 0 : return -ENOMEM;
816 : }
817 :
818 0 : if (!opts->md_interleave && opts->md_size != 0) {
819 0 : mdisk->malloc_md_buf = spdk_zmalloc(opts->num_blocks * opts->md_size, 2 * 1024 * 1024, NULL,
820 : SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
821 0 : if (!mdisk->malloc_md_buf) {
822 0 : SPDK_ERRLOG("malloc_md_buf spdk_zmalloc() failed\n");
823 0 : malloc_disk_free(mdisk);
824 0 : return -ENOMEM;
825 : }
826 : }
827 :
828 0 : if (opts->name) {
829 0 : mdisk->disk.name = strdup(opts->name);
830 : } else {
831 : /* Auto-generate a name */
832 0 : mdisk->disk.name = spdk_sprintf_alloc("Malloc%d", malloc_disk_count);
833 0 : malloc_disk_count++;
834 : }
835 0 : if (!mdisk->disk.name) {
836 0 : malloc_disk_free(mdisk);
837 0 : return -ENOMEM;
838 : }
839 0 : mdisk->disk.product_name = "Malloc disk";
840 :
841 0 : mdisk->disk.write_cache = 1;
842 0 : mdisk->disk.blocklen = block_size;
843 0 : mdisk->disk.phys_blocklen = opts->physical_block_size;
844 0 : mdisk->disk.blockcnt = opts->num_blocks;
845 0 : mdisk->disk.md_len = opts->md_size;
846 0 : mdisk->disk.md_interleave = opts->md_interleave;
847 0 : mdisk->disk.dif_type = opts->dif_type;
848 0 : mdisk->disk.dif_is_head_of_md = opts->dif_is_head_of_md;
849 : /* Current block device layer API does not propagate
850 : * any DIF related information from user. So, we can
851 : * not generate or verify Application Tag.
852 : */
853 0 : switch (opts->dif_type) {
854 0 : case SPDK_DIF_TYPE1:
855 : case SPDK_DIF_TYPE2:
856 0 : mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK |
857 : SPDK_DIF_FLAGS_REFTAG_CHECK;
858 0 : break;
859 0 : case SPDK_DIF_TYPE3:
860 0 : mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK;
861 0 : break;
862 0 : case SPDK_DIF_DISABLE:
863 0 : break;
864 : }
865 0 : mdisk->disk.dif_pi_format = opts->dif_pi_format;
866 :
867 0 : if (opts->dif_type != SPDK_DIF_DISABLE) {
868 0 : rc = malloc_disk_setup_pi(mdisk);
869 0 : if (rc) {
870 0 : SPDK_ERRLOG("Failed to set up protection information.\n");
871 0 : malloc_disk_free(mdisk);
872 0 : return rc;
873 : }
874 : }
875 :
876 0 : if (opts->optimal_io_boundary) {
877 0 : mdisk->disk.optimal_io_boundary = opts->optimal_io_boundary;
878 0 : mdisk->disk.split_on_optimal_io_boundary = true;
879 : }
880 0 : if (!spdk_uuid_is_null(&opts->uuid)) {
881 0 : spdk_uuid_copy(&mdisk->disk.uuid, &opts->uuid);
882 : }
883 :
884 0 : mdisk->disk.max_copy = 0;
885 0 : mdisk->disk.ctxt = mdisk;
886 0 : mdisk->disk.fn_table = &malloc_fn_table;
887 0 : mdisk->disk.module = &malloc_if;
888 :
889 0 : rc = spdk_bdev_register(&mdisk->disk);
890 0 : if (rc) {
891 0 : malloc_disk_free(mdisk);
892 0 : return rc;
893 : }
894 :
895 0 : *bdev = &(mdisk->disk);
896 :
897 0 : TAILQ_INSERT_TAIL(&g_malloc_disks, mdisk, link);
898 :
899 0 : return rc;
900 : }
901 :
902 : void
903 0 : delete_malloc_disk(const char *name, spdk_delete_malloc_complete cb_fn, void *cb_arg)
904 : {
905 : int rc;
906 :
907 0 : rc = spdk_bdev_unregister_by_name(name, &malloc_if, cb_fn, cb_arg);
908 0 : if (rc != 0) {
909 0 : cb_fn(cb_arg, rc);
910 : }
911 0 : }
912 :
913 : static int
914 0 : malloc_completion_poller(void *ctx)
915 : {
916 0 : struct malloc_channel *ch = ctx;
917 : struct malloc_task *task;
918 0 : TAILQ_HEAD(, malloc_task) completed_tasks;
919 0 : uint32_t num_completions = 0;
920 :
921 0 : TAILQ_INIT(&completed_tasks);
922 0 : TAILQ_SWAP(&completed_tasks, &ch->completed_tasks, malloc_task, tailq);
923 :
924 0 : while (!TAILQ_EMPTY(&completed_tasks)) {
925 0 : task = TAILQ_FIRST(&completed_tasks);
926 0 : TAILQ_REMOVE(&completed_tasks, task, tailq);
927 0 : spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
928 0 : num_completions++;
929 : }
930 :
931 0 : return num_completions > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
932 : }
933 :
934 : static int
935 0 : malloc_create_channel_cb(void *io_device, void *ctx)
936 : {
937 0 : struct malloc_channel *ch = ctx;
938 :
939 0 : ch->accel_channel = spdk_accel_get_io_channel();
940 0 : if (!ch->accel_channel) {
941 0 : SPDK_ERRLOG("Failed to get accel framework's IO channel\n");
942 0 : return -ENOMEM;
943 : }
944 :
945 0 : ch->completion_poller = SPDK_POLLER_REGISTER(malloc_completion_poller, ch, 0);
946 0 : if (!ch->completion_poller) {
947 0 : SPDK_ERRLOG("Failed to register malloc completion poller\n");
948 0 : spdk_put_io_channel(ch->accel_channel);
949 0 : return -ENOMEM;
950 : }
951 :
952 0 : TAILQ_INIT(&ch->completed_tasks);
953 :
954 0 : return 0;
955 : }
956 :
957 : static void
958 0 : malloc_destroy_channel_cb(void *io_device, void *ctx)
959 : {
960 0 : struct malloc_channel *ch = ctx;
961 :
962 0 : assert(TAILQ_EMPTY(&ch->completed_tasks));
963 :
964 0 : spdk_put_io_channel(ch->accel_channel);
965 0 : spdk_poller_unregister(&ch->completion_poller);
966 0 : }
967 :
968 : static int
969 0 : bdev_malloc_initialize(void)
970 : {
971 : /* This needs to be reset for each reinitialization of submodules.
972 : * Otherwise after enough devices or reinitializations the value gets too high.
973 : * TODO: Make malloc bdev name mandatory and remove this counter. */
974 0 : malloc_disk_count = 0;
975 :
976 0 : spdk_io_device_register(&g_malloc_disks, malloc_create_channel_cb,
977 : malloc_destroy_channel_cb, sizeof(struct malloc_channel),
978 : "bdev_malloc");
979 :
980 0 : return 0;
981 : }
982 :
983 : static void
984 0 : bdev_malloc_deinitialize(void)
985 : {
986 0 : spdk_io_device_unregister(&g_malloc_disks, NULL);
987 0 : }
988 :
989 0 : SPDK_LOG_REGISTER_COMPONENT(bdev_malloc)
|