Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "spdk/bdev.h"
10 :
11 : #include "spdk/accel.h"
12 : #include "spdk/config.h"
13 : #include "spdk/env.h"
14 : #include "spdk/thread.h"
15 : #include "spdk/likely.h"
16 : #include "spdk/queue.h"
17 : #include "spdk/nvme_spec.h"
18 : #include "spdk/scsi_spec.h"
19 : #include "spdk/notify.h"
20 : #include "spdk/util.h"
21 : #include "spdk/trace.h"
22 : #include "spdk/dma.h"
23 :
24 : #include "spdk/bdev_module.h"
25 : #include "spdk/log.h"
26 : #include "spdk/string.h"
27 :
28 : #include "bdev_internal.h"
29 : #include "spdk_internal/trace_defs.h"
30 : #include "spdk_internal/assert.h"
31 :
32 : #ifdef SPDK_CONFIG_VTUNE
33 : #include "ittnotify.h"
34 : #include "ittnotify_types.h"
35 : int __itt_init_ittlib(const char *, __itt_group_id);
36 : #endif
37 :
38 : #define SPDK_BDEV_IO_POOL_SIZE (64 * 1024 - 1)
39 : #define SPDK_BDEV_IO_CACHE_SIZE 256
40 : #define SPDK_BDEV_AUTO_EXAMINE true
41 : #define BUF_SMALL_CACHE_SIZE 128
42 : #define BUF_LARGE_CACHE_SIZE 16
43 : #define NOMEM_THRESHOLD_COUNT 8
44 :
45 : #define SPDK_BDEV_QOS_TIMESLICE_IN_USEC 1000
46 : #define SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE 1
47 : #define SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE 512
48 : #define SPDK_BDEV_QOS_MIN_IOS_PER_SEC 1000
49 : #define SPDK_BDEV_QOS_MIN_BYTES_PER_SEC (1024 * 1024)
50 : #define SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC (UINT64_MAX / (1024 * 1024))
51 : #define SPDK_BDEV_QOS_LIMIT_NOT_DEFINED UINT64_MAX
52 : #define SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC 1000
53 :
54 : /* The maximum number of children requests for a UNMAP or WRITE ZEROES command
55 : * when splitting into children requests at a time.
56 : */
57 : #define SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS (8)
58 : #define BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD 1000000
59 :
60 : /* The maximum number of children requests for a COPY command
61 : * when splitting into children requests at a time.
62 : */
63 : #define SPDK_BDEV_MAX_CHILDREN_COPY_REQS (8)
64 :
65 : #define LOG_ALREADY_CLAIMED_ERROR(detail, bdev) \
66 : log_already_claimed(SPDK_LOG_ERROR, __LINE__, __func__, detail, bdev)
67 : #ifdef DEBUG
68 : #define LOG_ALREADY_CLAIMED_DEBUG(detail, bdev) \
69 : log_already_claimed(SPDK_LOG_DEBUG, __LINE__, __func__, detail, bdev)
70 : #else
71 : #define LOG_ALREADY_CLAIMED_DEBUG(detail, bdev) do {} while(0)
72 : #endif
73 :
74 : static void log_already_claimed(enum spdk_log_level level, const int line, const char *func,
75 : const char *detail, struct spdk_bdev *bdev);
76 :
77 : static const char *qos_rpc_type[] = {"rw_ios_per_sec",
78 : "rw_mbytes_per_sec", "r_mbytes_per_sec", "w_mbytes_per_sec"
79 : };
80 :
81 : TAILQ_HEAD(spdk_bdev_list, spdk_bdev);
82 :
83 : RB_HEAD(bdev_name_tree, spdk_bdev_name);
84 :
85 : static int
86 575 : bdev_name_cmp(struct spdk_bdev_name *name1, struct spdk_bdev_name *name2)
87 : {
88 575 : return strcmp(name1->name, name2->name);
89 : }
90 :
91 2118 : RB_GENERATE_STATIC(bdev_name_tree, spdk_bdev_name, node, bdev_name_cmp);
92 :
93 : struct spdk_bdev_mgr {
94 : struct spdk_mempool *bdev_io_pool;
95 :
96 : void *zero_buffer;
97 :
98 : TAILQ_HEAD(bdev_module_list, spdk_bdev_module) bdev_modules;
99 :
100 : struct spdk_bdev_list bdevs;
101 : struct bdev_name_tree bdev_names;
102 :
103 : bool init_complete;
104 : bool module_init_complete;
105 :
106 : struct spdk_spinlock spinlock;
107 :
108 : TAILQ_HEAD(, spdk_bdev_open_async_ctx) async_bdev_opens;
109 :
110 : #ifdef SPDK_CONFIG_VTUNE
111 : __itt_domain *domain;
112 : #endif
113 : };
114 :
115 : static struct spdk_bdev_mgr g_bdev_mgr = {
116 : .bdev_modules = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdev_modules),
117 : .bdevs = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdevs),
118 : .bdev_names = RB_INITIALIZER(g_bdev_mgr.bdev_names),
119 : .init_complete = false,
120 : .module_init_complete = false,
121 : .async_bdev_opens = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.async_bdev_opens),
122 : };
123 :
124 : static void
125 : __attribute__((constructor))
126 3 : _bdev_init(void)
127 : {
128 3 : spdk_spin_init(&g_bdev_mgr.spinlock);
129 3 : }
130 :
131 : typedef void (*lock_range_cb)(struct lba_range *range, void *ctx, int status);
132 :
133 : typedef void (*bdev_copy_bounce_buffer_cpl)(void *ctx, int rc);
134 :
135 : struct lba_range {
136 : struct spdk_bdev *bdev;
137 : uint64_t offset;
138 : uint64_t length;
139 : bool quiesce;
140 : void *locked_ctx;
141 : struct spdk_thread *owner_thread;
142 : struct spdk_bdev_channel *owner_ch;
143 : TAILQ_ENTRY(lba_range) tailq;
144 : TAILQ_ENTRY(lba_range) tailq_module;
145 : };
146 :
147 : static struct spdk_bdev_opts g_bdev_opts = {
148 : .bdev_io_pool_size = SPDK_BDEV_IO_POOL_SIZE,
149 : .bdev_io_cache_size = SPDK_BDEV_IO_CACHE_SIZE,
150 : .bdev_auto_examine = SPDK_BDEV_AUTO_EXAMINE,
151 : .iobuf_small_cache_size = BUF_SMALL_CACHE_SIZE,
152 : .iobuf_large_cache_size = BUF_LARGE_CACHE_SIZE,
153 : };
154 :
155 : static spdk_bdev_init_cb g_init_cb_fn = NULL;
156 : static void *g_init_cb_arg = NULL;
157 :
158 : static spdk_bdev_fini_cb g_fini_cb_fn = NULL;
159 : static void *g_fini_cb_arg = NULL;
160 : static struct spdk_thread *g_fini_thread = NULL;
161 :
162 : struct spdk_bdev_qos_limit {
163 : /** IOs or bytes allowed per second (i.e., 1s). */
164 : uint64_t limit;
165 :
166 : /** Remaining IOs or bytes allowed in current timeslice (e.g., 1ms).
167 : * For remaining bytes, allowed to run negative if an I/O is submitted when
168 : * some bytes are remaining, but the I/O is bigger than that amount. The
169 : * excess will be deducted from the next timeslice.
170 : */
171 : int64_t remaining_this_timeslice;
172 :
173 : /** Minimum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
174 : uint32_t min_per_timeslice;
175 :
176 : /** Maximum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
177 : uint32_t max_per_timeslice;
178 :
179 : /** Function to check whether to queue the IO.
180 : * If The IO is allowed to pass, the quota will be reduced correspondingly.
181 : */
182 : bool (*queue_io)(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
183 :
184 : /** Function to rewind the quota once the IO was allowed to be sent by this
185 : * limit but queued due to one of the further limits.
186 : */
187 : void (*rewind_quota)(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
188 : };
189 :
190 : struct spdk_bdev_qos {
191 : /** Types of structure of rate limits. */
192 : struct spdk_bdev_qos_limit rate_limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
193 :
194 : /** The channel that all I/O are funneled through. */
195 : struct spdk_bdev_channel *ch;
196 :
197 : /** The thread on which the poller is running. */
198 : struct spdk_thread *thread;
199 :
200 : /** Size of a timeslice in tsc ticks. */
201 : uint64_t timeslice_size;
202 :
203 : /** Timestamp of start of last timeslice. */
204 : uint64_t last_timeslice;
205 :
206 : /** Poller that processes queued I/O commands each time slice. */
207 : struct spdk_poller *poller;
208 : };
209 :
210 : struct spdk_bdev_mgmt_channel {
211 : /*
212 : * Each thread keeps a cache of bdev_io - this allows
213 : * bdev threads which are *not* DPDK threads to still
214 : * benefit from a per-thread bdev_io cache. Without
215 : * this, non-DPDK threads fetching from the mempool
216 : * incur a cmpxchg on get and put.
217 : */
218 : bdev_io_stailq_t per_thread_cache;
219 : uint32_t per_thread_cache_count;
220 : uint32_t bdev_io_cache_size;
221 :
222 : struct spdk_iobuf_channel iobuf;
223 :
224 : TAILQ_HEAD(, spdk_bdev_shared_resource) shared_resources;
225 : TAILQ_HEAD(, spdk_bdev_io_wait_entry) io_wait_queue;
226 : };
227 :
228 : /*
229 : * Per-module (or per-io_device) data. Multiple bdevs built on the same io_device
230 : * will queue here their IO that awaits retry. It makes it possible to retry sending
231 : * IO to one bdev after IO from other bdev completes.
232 : */
233 : struct spdk_bdev_shared_resource {
234 : /* The bdev management channel */
235 : struct spdk_bdev_mgmt_channel *mgmt_ch;
236 :
237 : /*
238 : * Count of I/O submitted to bdev module and waiting for completion.
239 : * Incremented before submit_request() is called on an spdk_bdev_io.
240 : */
241 : uint64_t io_outstanding;
242 :
243 : /*
244 : * Queue of IO awaiting retry because of a previous NOMEM status returned
245 : * on this channel.
246 : */
247 : bdev_io_tailq_t nomem_io;
248 :
249 : /*
250 : * Threshold which io_outstanding must drop to before retrying nomem_io.
251 : */
252 : uint64_t nomem_threshold;
253 :
254 : /* I/O channel allocated by a bdev module */
255 : struct spdk_io_channel *shared_ch;
256 :
257 : struct spdk_poller *nomem_poller;
258 :
259 : /* Refcount of bdev channels using this resource */
260 : uint32_t ref;
261 :
262 : TAILQ_ENTRY(spdk_bdev_shared_resource) link;
263 : };
264 :
265 : #define BDEV_CH_RESET_IN_PROGRESS (1 << 0)
266 : #define BDEV_CH_QOS_ENABLED (1 << 1)
267 :
268 : struct spdk_bdev_channel {
269 : struct spdk_bdev *bdev;
270 :
271 : /* The channel for the underlying device */
272 : struct spdk_io_channel *channel;
273 :
274 : /* Accel channel */
275 : struct spdk_io_channel *accel_channel;
276 :
277 : /* Per io_device per thread data */
278 : struct spdk_bdev_shared_resource *shared_resource;
279 :
280 : struct spdk_bdev_io_stat *stat;
281 :
282 : /*
283 : * Count of I/O submitted to the underlying dev module through this channel
284 : * and waiting for completion.
285 : */
286 : uint64_t io_outstanding;
287 :
288 : /*
289 : * List of all submitted I/Os including I/O that are generated via splitting.
290 : */
291 : bdev_io_tailq_t io_submitted;
292 :
293 : /*
294 : * List of spdk_bdev_io that are currently queued because they write to a locked
295 : * LBA range.
296 : */
297 : bdev_io_tailq_t io_locked;
298 :
299 : /* List of I/Os with accel sequence being currently executed */
300 : bdev_io_tailq_t io_accel_exec;
301 :
302 : /* List of I/Os doing memory domain pull/push */
303 : bdev_io_tailq_t io_memory_domain;
304 :
305 : uint32_t flags;
306 :
307 : /* Counts number of bdev_io in the io_submitted TAILQ */
308 : uint16_t queue_depth;
309 :
310 : uint16_t trace_id;
311 :
312 : struct spdk_histogram_data *histogram;
313 :
314 : #ifdef SPDK_CONFIG_VTUNE
315 : uint64_t start_tsc;
316 : uint64_t interval_tsc;
317 : __itt_string_handle *handle;
318 : struct spdk_bdev_io_stat *prev_stat;
319 : #endif
320 :
321 : lba_range_tailq_t locked_ranges;
322 :
323 : /** List of I/Os queued by QoS. */
324 : bdev_io_tailq_t qos_queued_io;
325 : };
326 :
327 : struct media_event_entry {
328 : struct spdk_bdev_media_event event;
329 : TAILQ_ENTRY(media_event_entry) tailq;
330 : };
331 :
332 : #define MEDIA_EVENT_POOL_SIZE 64
333 :
334 : struct spdk_bdev_desc {
335 : struct spdk_bdev *bdev;
336 : bool write;
337 : bool memory_domains_supported;
338 : bool accel_sequence_supported[SPDK_BDEV_NUM_IO_TYPES];
339 : struct spdk_bdev_open_opts opts;
340 : struct spdk_thread *thread;
341 : struct {
342 : spdk_bdev_event_cb_t event_fn;
343 : void *ctx;
344 : } callback;
345 : bool closed;
346 : struct spdk_spinlock spinlock;
347 : uint32_t refs;
348 : TAILQ_HEAD(, media_event_entry) pending_media_events;
349 : TAILQ_HEAD(, media_event_entry) free_media_events;
350 : struct media_event_entry *media_events_buffer;
351 : TAILQ_ENTRY(spdk_bdev_desc) link;
352 :
353 : uint64_t timeout_in_sec;
354 : spdk_bdev_io_timeout_cb cb_fn;
355 : void *cb_arg;
356 : struct spdk_poller *io_timeout_poller;
357 : struct spdk_bdev_module_claim *claim;
358 : };
359 :
360 : struct spdk_bdev_iostat_ctx {
361 : struct spdk_bdev_io_stat *stat;
362 : enum spdk_bdev_reset_stat_mode reset_mode;
363 : spdk_bdev_get_device_stat_cb cb;
364 : void *cb_arg;
365 : };
366 :
367 : struct set_qos_limit_ctx {
368 : void (*cb_fn)(void *cb_arg, int status);
369 : void *cb_arg;
370 : struct spdk_bdev *bdev;
371 : };
372 :
373 : struct spdk_bdev_channel_iter {
374 : spdk_bdev_for_each_channel_msg fn;
375 : spdk_bdev_for_each_channel_done cpl;
376 : struct spdk_io_channel_iter *i;
377 : void *ctx;
378 : };
379 :
380 : struct spdk_bdev_io_error_stat {
381 : uint32_t error_status[-SPDK_MIN_BDEV_IO_STATUS];
382 : };
383 :
384 : enum bdev_io_retry_state {
385 : BDEV_IO_RETRY_STATE_INVALID,
386 : BDEV_IO_RETRY_STATE_PULL,
387 : BDEV_IO_RETRY_STATE_PULL_MD,
388 : BDEV_IO_RETRY_STATE_SUBMIT,
389 : BDEV_IO_RETRY_STATE_PUSH,
390 : BDEV_IO_RETRY_STATE_PUSH_MD,
391 : BDEV_IO_RETRY_STATE_GET_ACCEL_BUF,
392 : };
393 :
394 : #define __bdev_to_io_dev(bdev) (((char *)bdev) + 1)
395 : #define __bdev_from_io_dev(io_dev) ((struct spdk_bdev *)(((char *)io_dev) - 1))
396 : #define __io_ch_to_bdev_ch(io_ch) ((struct spdk_bdev_channel *)spdk_io_channel_get_ctx(io_ch))
397 : #define __io_ch_to_bdev_mgmt_ch(io_ch) ((struct spdk_bdev_mgmt_channel *)spdk_io_channel_get_ctx(io_ch))
398 :
399 : static inline void bdev_io_complete(void *ctx);
400 : static inline void bdev_io_complete_unsubmitted(struct spdk_bdev_io *bdev_io);
401 : static void bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io);
402 : static void bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io);
403 : static void _bdev_io_get_accel_buf(struct spdk_bdev_io *bdev_io);
404 :
405 : static void bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
406 : static int bdev_write_zero_buffer(struct spdk_bdev_io *bdev_io);
407 :
408 : static void bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
409 : struct spdk_io_channel *ch, void *_ctx);
410 : static void bdev_enable_qos_done(struct spdk_bdev *bdev, void *_ctx, int status);
411 :
412 : static int bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
413 : struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
414 : uint64_t num_blocks,
415 : struct spdk_memory_domain *domain, void *domain_ctx,
416 : struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
417 : spdk_bdev_io_completion_cb cb, void *cb_arg);
418 : static int bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
419 : struct iovec *iov, int iovcnt, void *md_buf,
420 : uint64_t offset_blocks, uint64_t num_blocks,
421 : struct spdk_memory_domain *domain, void *domain_ctx,
422 : struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
423 : uint32_t nvme_cdw12_raw, uint32_t nvme_cdw13_raw,
424 : spdk_bdev_io_completion_cb cb, void *cb_arg);
425 :
426 : static int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
427 : uint64_t offset, uint64_t length,
428 : lock_range_cb cb_fn, void *cb_arg);
429 :
430 : static int bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
431 : uint64_t offset, uint64_t length,
432 : lock_range_cb cb_fn, void *cb_arg);
433 :
434 : static bool bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort);
435 : static bool bdev_abort_buf_io(struct spdk_bdev_mgmt_channel *ch, struct spdk_bdev_io *bio_to_abort);
436 :
437 : static bool claim_type_is_v2(enum spdk_bdev_claim_type type);
438 : static void bdev_desc_release_claims(struct spdk_bdev_desc *desc);
439 : static void claim_reset(struct spdk_bdev *bdev);
440 :
441 : static void bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch);
442 :
443 : static bool bdev_io_should_split(struct spdk_bdev_io *bdev_io);
444 :
445 : #define bdev_get_ext_io_opt(opts, field, defval) \
446 : ((opts) != NULL ? SPDK_GET_FIELD(opts, field, defval) : (defval))
447 :
448 : static inline void
449 671 : bdev_ch_add_to_io_submitted(struct spdk_bdev_io *bdev_io)
450 : {
451 671 : TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
452 671 : bdev_io->internal.ch->queue_depth++;
453 671 : }
454 :
455 : static inline void
456 671 : bdev_ch_remove_from_io_submitted(struct spdk_bdev_io *bdev_io)
457 : {
458 671 : TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
459 671 : bdev_io->internal.ch->queue_depth--;
460 671 : }
461 :
462 : void
463 16 : spdk_bdev_get_opts(struct spdk_bdev_opts *opts, size_t opts_size)
464 : {
465 16 : if (!opts) {
466 0 : SPDK_ERRLOG("opts should not be NULL\n");
467 0 : return;
468 : }
469 :
470 16 : if (!opts_size) {
471 0 : SPDK_ERRLOG("opts_size should not be zero value\n");
472 0 : return;
473 : }
474 :
475 16 : opts->opts_size = opts_size;
476 :
477 : #define SET_FIELD(field) \
478 : if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts_size) { \
479 : opts->field = g_bdev_opts.field; \
480 : } \
481 :
482 16 : SET_FIELD(bdev_io_pool_size);
483 16 : SET_FIELD(bdev_io_cache_size);
484 16 : SET_FIELD(bdev_auto_examine);
485 16 : SET_FIELD(iobuf_small_cache_size);
486 16 : SET_FIELD(iobuf_large_cache_size);
487 :
488 : /* Do not remove this statement, you should always update this statement when you adding a new field,
489 : * and do not forget to add the SET_FIELD statement for your added field. */
490 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_opts) == 32, "Incorrect size");
491 :
492 : #undef SET_FIELD
493 16 : }
494 :
495 : int
496 17 : spdk_bdev_set_opts(struct spdk_bdev_opts *opts)
497 : {
498 : uint32_t min_pool_size;
499 :
500 17 : if (!opts) {
501 0 : SPDK_ERRLOG("opts cannot be NULL\n");
502 0 : return -1;
503 : }
504 :
505 17 : if (!opts->opts_size) {
506 1 : SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
507 1 : return -1;
508 : }
509 :
510 : /*
511 : * Add 1 to the thread count to account for the extra mgmt_ch that gets created during subsystem
512 : * initialization. A second mgmt_ch will be created on the same thread when the application starts
513 : * but before the deferred put_io_channel event is executed for the first mgmt_ch.
514 : */
515 16 : min_pool_size = opts->bdev_io_cache_size * (spdk_thread_get_count() + 1);
516 16 : if (opts->bdev_io_pool_size < min_pool_size) {
517 0 : SPDK_ERRLOG("bdev_io_pool_size %" PRIu32 " is not compatible with bdev_io_cache_size %" PRIu32
518 : " and %" PRIu32 " threads\n", opts->bdev_io_pool_size, opts->bdev_io_cache_size,
519 : spdk_thread_get_count());
520 0 : SPDK_ERRLOG("bdev_io_pool_size must be at least %" PRIu32 "\n", min_pool_size);
521 0 : return -1;
522 : }
523 :
524 : #define SET_FIELD(field) \
525 : if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
526 : g_bdev_opts.field = opts->field; \
527 : } \
528 :
529 16 : SET_FIELD(bdev_io_pool_size);
530 16 : SET_FIELD(bdev_io_cache_size);
531 16 : SET_FIELD(bdev_auto_examine);
532 16 : SET_FIELD(iobuf_small_cache_size);
533 16 : SET_FIELD(iobuf_large_cache_size);
534 :
535 16 : g_bdev_opts.opts_size = opts->opts_size;
536 :
537 : #undef SET_FIELD
538 :
539 16 : return 0;
540 17 : }
541 :
542 : static struct spdk_bdev *
543 155 : bdev_get_by_name(const char *bdev_name)
544 : {
545 : struct spdk_bdev_name find;
546 : struct spdk_bdev_name *res;
547 :
548 155 : find.name = (char *)bdev_name;
549 155 : res = RB_FIND(bdev_name_tree, &g_bdev_mgr.bdev_names, &find);
550 155 : if (res != NULL) {
551 148 : return res->bdev;
552 : }
553 :
554 7 : return NULL;
555 155 : }
556 :
557 : struct spdk_bdev *
558 19 : spdk_bdev_get_by_name(const char *bdev_name)
559 : {
560 : struct spdk_bdev *bdev;
561 :
562 19 : spdk_spin_lock(&g_bdev_mgr.spinlock);
563 19 : bdev = bdev_get_by_name(bdev_name);
564 19 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
565 :
566 19 : return bdev;
567 : }
568 :
569 : struct bdev_io_status_string {
570 : enum spdk_bdev_io_status status;
571 : const char *str;
572 : };
573 :
574 : static const struct bdev_io_status_string bdev_io_status_strings[] = {
575 : { SPDK_BDEV_IO_STATUS_AIO_ERROR, "aio_error" },
576 : { SPDK_BDEV_IO_STATUS_ABORTED, "aborted" },
577 : { SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED, "first_fused_failed" },
578 : { SPDK_BDEV_IO_STATUS_MISCOMPARE, "miscompare" },
579 : { SPDK_BDEV_IO_STATUS_NOMEM, "nomem" },
580 : { SPDK_BDEV_IO_STATUS_SCSI_ERROR, "scsi_error" },
581 : { SPDK_BDEV_IO_STATUS_NVME_ERROR, "nvme_error" },
582 : { SPDK_BDEV_IO_STATUS_FAILED, "failed" },
583 : { SPDK_BDEV_IO_STATUS_PENDING, "pending" },
584 : { SPDK_BDEV_IO_STATUS_SUCCESS, "success" },
585 : };
586 :
587 : static const char *
588 0 : bdev_io_status_get_string(enum spdk_bdev_io_status status)
589 : {
590 : uint32_t i;
591 :
592 0 : for (i = 0; i < SPDK_COUNTOF(bdev_io_status_strings); i++) {
593 0 : if (bdev_io_status_strings[i].status == status) {
594 0 : return bdev_io_status_strings[i].str;
595 : }
596 0 : }
597 :
598 0 : return "reserved";
599 0 : }
600 :
601 : struct spdk_bdev_wait_for_examine_ctx {
602 : struct spdk_poller *poller;
603 : spdk_bdev_wait_for_examine_cb cb_fn;
604 : void *cb_arg;
605 : };
606 :
607 : static bool bdev_module_all_actions_completed(void);
608 :
609 : static int
610 203 : bdev_wait_for_examine_cb(void *arg)
611 : {
612 203 : struct spdk_bdev_wait_for_examine_ctx *ctx = arg;
613 :
614 203 : if (!bdev_module_all_actions_completed()) {
615 0 : return SPDK_POLLER_IDLE;
616 : }
617 :
618 203 : spdk_poller_unregister(&ctx->poller);
619 203 : ctx->cb_fn(ctx->cb_arg);
620 203 : free(ctx);
621 :
622 203 : return SPDK_POLLER_BUSY;
623 203 : }
624 :
625 : int
626 203 : spdk_bdev_wait_for_examine(spdk_bdev_wait_for_examine_cb cb_fn, void *cb_arg)
627 : {
628 : struct spdk_bdev_wait_for_examine_ctx *ctx;
629 :
630 203 : ctx = calloc(1, sizeof(*ctx));
631 203 : if (ctx == NULL) {
632 0 : return -ENOMEM;
633 : }
634 203 : ctx->cb_fn = cb_fn;
635 203 : ctx->cb_arg = cb_arg;
636 203 : ctx->poller = SPDK_POLLER_REGISTER(bdev_wait_for_examine_cb, ctx, 0);
637 :
638 203 : return 0;
639 203 : }
640 :
641 : struct spdk_bdev_examine_item {
642 : char *name;
643 : TAILQ_ENTRY(spdk_bdev_examine_item) link;
644 : };
645 :
646 : TAILQ_HEAD(spdk_bdev_examine_allowlist, spdk_bdev_examine_item);
647 :
648 : struct spdk_bdev_examine_allowlist g_bdev_examine_allowlist = TAILQ_HEAD_INITIALIZER(
649 : g_bdev_examine_allowlist);
650 :
651 : static inline bool
652 24 : bdev_examine_allowlist_check(const char *name)
653 : {
654 : struct spdk_bdev_examine_item *item;
655 24 : TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
656 3 : if (strcmp(name, item->name) == 0) {
657 3 : return true;
658 : }
659 0 : }
660 21 : return false;
661 24 : }
662 :
663 : static inline void
664 258 : bdev_examine_allowlist_remove(const char *name)
665 : {
666 : struct spdk_bdev_examine_item *item;
667 258 : TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
668 3 : if (strcmp(name, item->name) == 0) {
669 3 : TAILQ_REMOVE(&g_bdev_examine_allowlist, item, link);
670 3 : free(item->name);
671 3 : free(item);
672 3 : break;
673 : }
674 0 : }
675 258 : }
676 :
677 : static inline void
678 68 : bdev_examine_allowlist_free(void)
679 : {
680 : struct spdk_bdev_examine_item *item;
681 68 : while (!TAILQ_EMPTY(&g_bdev_examine_allowlist)) {
682 0 : item = TAILQ_FIRST(&g_bdev_examine_allowlist);
683 0 : TAILQ_REMOVE(&g_bdev_examine_allowlist, item, link);
684 0 : free(item->name);
685 0 : free(item);
686 : }
687 68 : }
688 :
689 : static inline bool
690 12 : bdev_in_examine_allowlist(struct spdk_bdev *bdev)
691 : {
692 : struct spdk_bdev_alias *tmp;
693 12 : if (bdev_examine_allowlist_check(bdev->name)) {
694 3 : return true;
695 : }
696 18 : TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
697 9 : if (bdev_examine_allowlist_check(tmp->alias.name)) {
698 0 : return true;
699 : }
700 9 : }
701 9 : return false;
702 12 : }
703 :
704 : static inline bool
705 133 : bdev_ok_to_examine(struct spdk_bdev *bdev)
706 : {
707 : /* Some bdevs may not support the READ command.
708 : * Do not try to examine them.
709 : */
710 133 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_READ)) {
711 0 : return false;
712 : }
713 :
714 133 : if (g_bdev_opts.bdev_auto_examine) {
715 121 : return true;
716 : } else {
717 12 : return bdev_in_examine_allowlist(bdev);
718 : }
719 133 : }
720 :
721 : static void
722 133 : bdev_examine(struct spdk_bdev *bdev)
723 : {
724 : struct spdk_bdev_module *module;
725 : struct spdk_bdev_module_claim *claim, *tmpclaim;
726 : uint32_t action;
727 :
728 133 : if (!bdev_ok_to_examine(bdev)) {
729 9 : return;
730 : }
731 :
732 506 : TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
733 382 : if (module->examine_config) {
734 258 : spdk_spin_lock(&module->internal.spinlock);
735 258 : action = module->internal.action_in_progress;
736 258 : module->internal.action_in_progress++;
737 258 : spdk_spin_unlock(&module->internal.spinlock);
738 258 : module->examine_config(bdev);
739 258 : if (action != module->internal.action_in_progress) {
740 0 : SPDK_ERRLOG("examine_config for module %s did not call "
741 : "spdk_bdev_module_examine_done()\n", module->name);
742 0 : }
743 258 : }
744 382 : }
745 :
746 124 : spdk_spin_lock(&bdev->internal.spinlock);
747 :
748 124 : switch (bdev->internal.claim_type) {
749 : case SPDK_BDEV_CLAIM_NONE:
750 : /* Examine by all bdev modules */
751 466 : TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
752 350 : if (module->examine_disk) {
753 225 : spdk_spin_lock(&module->internal.spinlock);
754 225 : module->internal.action_in_progress++;
755 225 : spdk_spin_unlock(&module->internal.spinlock);
756 225 : spdk_spin_unlock(&bdev->internal.spinlock);
757 225 : module->examine_disk(bdev);
758 225 : spdk_spin_lock(&bdev->internal.spinlock);
759 225 : }
760 350 : }
761 116 : break;
762 : case SPDK_BDEV_CLAIM_EXCL_WRITE:
763 : /* Examine by the one bdev module with a v1 claim */
764 1 : module = bdev->internal.claim.v1.module;
765 1 : if (module->examine_disk) {
766 1 : spdk_spin_lock(&module->internal.spinlock);
767 1 : module->internal.action_in_progress++;
768 1 : spdk_spin_unlock(&module->internal.spinlock);
769 1 : spdk_spin_unlock(&bdev->internal.spinlock);
770 1 : module->examine_disk(bdev);
771 1 : return;
772 : }
773 0 : break;
774 : default:
775 : /* Examine by all bdev modules with a v2 claim */
776 7 : assert(claim_type_is_v2(bdev->internal.claim_type));
777 : /*
778 : * Removal of tailq nodes while iterating can cause the iteration to jump out of the
779 : * list, perhaps accessing freed memory. Without protection, this could happen
780 : * while the lock is dropped during the examine callback.
781 : */
782 7 : bdev->internal.examine_in_progress++;
783 :
784 16 : TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) {
785 9 : module = claim->module;
786 :
787 9 : if (module == NULL) {
788 : /* This is a vestigial claim, held by examine_count */
789 0 : continue;
790 : }
791 :
792 9 : if (module->examine_disk == NULL) {
793 0 : continue;
794 : }
795 :
796 9 : spdk_spin_lock(&module->internal.spinlock);
797 9 : module->internal.action_in_progress++;
798 9 : spdk_spin_unlock(&module->internal.spinlock);
799 :
800 : /* Call examine_disk without holding internal.spinlock. */
801 9 : spdk_spin_unlock(&bdev->internal.spinlock);
802 9 : module->examine_disk(bdev);
803 9 : spdk_spin_lock(&bdev->internal.spinlock);
804 9 : }
805 :
806 7 : assert(bdev->internal.examine_in_progress > 0);
807 7 : bdev->internal.examine_in_progress--;
808 7 : if (bdev->internal.examine_in_progress == 0) {
809 : /* Remove any claims that were released during examine_disk */
810 16 : TAILQ_FOREACH_SAFE(claim, &bdev->internal.claim.v2.claims, link, tmpclaim) {
811 9 : if (claim->desc != NULL) {
812 9 : continue;
813 : }
814 :
815 0 : TAILQ_REMOVE(&bdev->internal.claim.v2.claims, claim, link);
816 0 : free(claim);
817 0 : }
818 7 : if (TAILQ_EMPTY(&bdev->internal.claim.v2.claims)) {
819 0 : claim_reset(bdev);
820 0 : }
821 7 : }
822 7 : }
823 :
824 123 : spdk_spin_unlock(&bdev->internal.spinlock);
825 133 : }
826 :
827 : int
828 4 : spdk_bdev_examine(const char *name)
829 : {
830 : struct spdk_bdev *bdev;
831 : struct spdk_bdev_examine_item *item;
832 4 : struct spdk_thread *thread = spdk_get_thread();
833 :
834 4 : if (spdk_unlikely(!spdk_thread_is_app_thread(thread))) {
835 1 : SPDK_ERRLOG("Cannot examine bdev %s on thread %p (%s)\n", name, thread,
836 : thread ? spdk_thread_get_name(thread) : "null");
837 1 : return -EINVAL;
838 : }
839 :
840 3 : if (g_bdev_opts.bdev_auto_examine) {
841 0 : SPDK_ERRLOG("Manual examine is not allowed if auto examine is enabled\n");
842 0 : return -EINVAL;
843 : }
844 :
845 3 : if (bdev_examine_allowlist_check(name)) {
846 0 : SPDK_ERRLOG("Duplicate bdev name for manual examine: %s\n", name);
847 0 : return -EEXIST;
848 : }
849 :
850 3 : item = calloc(1, sizeof(*item));
851 3 : if (!item) {
852 0 : return -ENOMEM;
853 : }
854 3 : item->name = strdup(name);
855 3 : if (!item->name) {
856 0 : free(item);
857 0 : return -ENOMEM;
858 : }
859 3 : TAILQ_INSERT_TAIL(&g_bdev_examine_allowlist, item, link);
860 :
861 3 : bdev = spdk_bdev_get_by_name(name);
862 3 : if (bdev) {
863 3 : bdev_examine(bdev);
864 3 : }
865 3 : return 0;
866 4 : }
867 :
868 : static inline void
869 0 : bdev_examine_allowlist_config_json(struct spdk_json_write_ctx *w)
870 : {
871 : struct spdk_bdev_examine_item *item;
872 0 : TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
873 0 : spdk_json_write_object_begin(w);
874 0 : spdk_json_write_named_string(w, "method", "bdev_examine");
875 0 : spdk_json_write_named_object_begin(w, "params");
876 0 : spdk_json_write_named_string(w, "name", item->name);
877 0 : spdk_json_write_object_end(w);
878 0 : spdk_json_write_object_end(w);
879 0 : }
880 0 : }
881 :
882 : struct spdk_bdev *
883 1 : spdk_bdev_first(void)
884 : {
885 : struct spdk_bdev *bdev;
886 :
887 1 : bdev = TAILQ_FIRST(&g_bdev_mgr.bdevs);
888 1 : if (bdev) {
889 1 : SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
890 1 : }
891 :
892 1 : return bdev;
893 : }
894 :
895 : struct spdk_bdev *
896 8 : spdk_bdev_next(struct spdk_bdev *prev)
897 : {
898 : struct spdk_bdev *bdev;
899 :
900 8 : bdev = TAILQ_NEXT(prev, internal.link);
901 8 : if (bdev) {
902 7 : SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
903 7 : }
904 :
905 8 : return bdev;
906 : }
907 :
908 : static struct spdk_bdev *
909 6 : _bdev_next_leaf(struct spdk_bdev *bdev)
910 : {
911 9 : while (bdev != NULL) {
912 8 : if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
913 5 : return bdev;
914 : } else {
915 3 : bdev = TAILQ_NEXT(bdev, internal.link);
916 : }
917 : }
918 :
919 1 : return bdev;
920 6 : }
921 :
922 : struct spdk_bdev *
923 1 : spdk_bdev_first_leaf(void)
924 : {
925 : struct spdk_bdev *bdev;
926 :
927 1 : bdev = _bdev_next_leaf(TAILQ_FIRST(&g_bdev_mgr.bdevs));
928 :
929 1 : if (bdev) {
930 1 : SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
931 1 : }
932 :
933 1 : return bdev;
934 : }
935 :
936 : struct spdk_bdev *
937 5 : spdk_bdev_next_leaf(struct spdk_bdev *prev)
938 : {
939 : struct spdk_bdev *bdev;
940 :
941 5 : bdev = _bdev_next_leaf(TAILQ_NEXT(prev, internal.link));
942 :
943 5 : if (bdev) {
944 4 : SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
945 4 : }
946 :
947 5 : return bdev;
948 : }
949 :
950 : static inline bool
951 820 : bdev_io_use_memory_domain(struct spdk_bdev_io *bdev_io)
952 : {
953 820 : return bdev_io->internal.f.has_memory_domain;
954 : }
955 :
956 : static inline bool
957 1551 : bdev_io_use_accel_sequence(struct spdk_bdev_io *bdev_io)
958 : {
959 1551 : return bdev_io->internal.f.has_accel_sequence;
960 : }
961 :
962 : static inline uint32_t
963 373 : bdev_desc_get_block_size(struct spdk_bdev_desc *desc)
964 : {
965 373 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
966 :
967 373 : if (spdk_unlikely(desc->opts.hide_metadata)) {
968 0 : return bdev->blocklen - bdev->md_len;
969 : } else {
970 373 : return bdev->blocklen;
971 : }
972 373 : }
973 :
974 : static inline uint32_t
975 110 : bdev_io_get_block_size(struct spdk_bdev_io *bdev_io)
976 : {
977 110 : return bdev_desc_get_block_size(bdev_io->internal.desc);
978 : }
979 :
980 : static inline void
981 7 : bdev_queue_nomem_io_head(struct spdk_bdev_shared_resource *shared_resource,
982 : struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
983 : {
984 : /* Wait for some of the outstanding I/O to complete before we retry any of the nomem_io.
985 : * Normally we will wait for NOMEM_THRESHOLD_COUNT I/O to complete but for low queue depth
986 : * channels we will instead wait for half to complete.
987 : */
988 7 : shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
989 : (int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
990 :
991 7 : assert(state != BDEV_IO_RETRY_STATE_INVALID);
992 7 : bdev_io->internal.retry_state = state;
993 7 : TAILQ_INSERT_HEAD(&shared_resource->nomem_io, bdev_io, internal.link);
994 7 : }
995 :
996 : static inline void
997 43 : bdev_queue_nomem_io_tail(struct spdk_bdev_shared_resource *shared_resource,
998 : struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
999 : {
1000 : /* We only queue IOs at the end of the nomem_io queue if they're submitted by the user while
1001 : * the queue isn't empty, so we don't need to update the nomem_threshold here */
1002 43 : assert(!TAILQ_EMPTY(&shared_resource->nomem_io));
1003 :
1004 43 : assert(state != BDEV_IO_RETRY_STATE_INVALID);
1005 43 : bdev_io->internal.retry_state = state;
1006 43 : TAILQ_INSERT_TAIL(&shared_resource->nomem_io, bdev_io, internal.link);
1007 43 : }
1008 :
1009 : void
1010 16 : spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len)
1011 : {
1012 : struct iovec *iovs;
1013 :
1014 16 : if (bdev_io->u.bdev.iovs == NULL) {
1015 3 : bdev_io->u.bdev.iovs = &bdev_io->iov;
1016 3 : bdev_io->u.bdev.iovcnt = 1;
1017 3 : }
1018 :
1019 16 : iovs = bdev_io->u.bdev.iovs;
1020 :
1021 16 : assert(iovs != NULL);
1022 16 : assert(bdev_io->u.bdev.iovcnt >= 1);
1023 :
1024 16 : iovs[0].iov_base = buf;
1025 16 : iovs[0].iov_len = len;
1026 16 : }
1027 :
1028 : void
1029 3 : spdk_bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
1030 : {
1031 3 : assert((len / spdk_bdev_get_md_size(bdev_io->bdev)) >= bdev_io->u.bdev.num_blocks);
1032 3 : bdev_io->u.bdev.md_buf = md_buf;
1033 3 : }
1034 :
1035 : static bool
1036 167 : _is_buf_allocated(const struct iovec *iovs)
1037 : {
1038 167 : if (iovs == NULL) {
1039 6 : return false;
1040 : }
1041 :
1042 161 : return iovs[0].iov_base != NULL;
1043 167 : }
1044 :
1045 : static bool
1046 50 : _are_iovs_aligned(struct iovec *iovs, int iovcnt, uint32_t alignment)
1047 : {
1048 : int i;
1049 : uintptr_t iov_base;
1050 :
1051 50 : if (spdk_likely(alignment == 1)) {
1052 21 : return true;
1053 : }
1054 :
1055 36 : for (i = 0; i < iovcnt; i++) {
1056 29 : iov_base = (uintptr_t)iovs[i].iov_base;
1057 29 : if ((iov_base & (alignment - 1)) != 0) {
1058 22 : return false;
1059 : }
1060 7 : }
1061 :
1062 7 : return true;
1063 50 : }
1064 :
1065 : static inline bool
1066 895 : bdev_io_needs_metadata(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
1067 : {
1068 895 : return desc->opts.hide_metadata && bdev_io->bdev->md_len != 0;
1069 : }
1070 :
1071 : static inline bool
1072 852 : bdev_io_needs_sequence_exec(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
1073 : {
1074 852 : if (!bdev_io_use_accel_sequence(bdev_io)) {
1075 852 : return false;
1076 : }
1077 :
1078 : /* For now, we don't allow splitting IOs with an accel sequence and will treat them as if
1079 : * bdev module didn't support accel sequences */
1080 0 : return !desc->accel_sequence_supported[bdev_io->type] || bdev_io->internal.f.split;
1081 852 : }
1082 :
1083 : static inline void
1084 592 : bdev_io_increment_outstanding(struct spdk_bdev_channel *bdev_ch,
1085 : struct spdk_bdev_shared_resource *shared_resource)
1086 : {
1087 592 : bdev_ch->io_outstanding++;
1088 592 : shared_resource->io_outstanding++;
1089 592 : }
1090 :
1091 : static inline void
1092 592 : bdev_io_decrement_outstanding(struct spdk_bdev_channel *bdev_ch,
1093 : struct spdk_bdev_shared_resource *shared_resource)
1094 : {
1095 592 : assert(bdev_ch->io_outstanding > 0);
1096 592 : assert(shared_resource->io_outstanding > 0);
1097 592 : bdev_ch->io_outstanding--;
1098 592 : shared_resource->io_outstanding--;
1099 592 : }
1100 :
1101 : static void
1102 0 : bdev_io_submit_sequence_cb(void *ctx, int status)
1103 : {
1104 0 : struct spdk_bdev_io *bdev_io = ctx;
1105 :
1106 0 : assert(bdev_io_use_accel_sequence(bdev_io));
1107 :
1108 0 : bdev_io->u.bdev.accel_sequence = NULL;
1109 0 : bdev_io->internal.f.has_accel_sequence = false;
1110 :
1111 0 : if (spdk_unlikely(status != 0)) {
1112 0 : SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
1113 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1114 0 : bdev_io_complete_unsubmitted(bdev_io);
1115 0 : return;
1116 : }
1117 :
1118 0 : bdev_io_submit(bdev_io);
1119 0 : }
1120 :
1121 : static void
1122 0 : bdev_io_exec_sequence_cb(void *ctx, int status)
1123 : {
1124 0 : struct spdk_bdev_io *bdev_io = ctx;
1125 0 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1126 :
1127 0 : TAILQ_REMOVE(&bdev_io->internal.ch->io_accel_exec, bdev_io, internal.link);
1128 0 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1129 :
1130 0 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1131 0 : bdev_ch_retry_io(ch);
1132 0 : }
1133 :
1134 0 : bdev_io->internal.data_transfer_cpl(bdev_io, status);
1135 0 : }
1136 :
1137 : static void
1138 0 : bdev_io_exec_sequence(struct spdk_bdev_io *bdev_io, void (*cb_fn)(void *ctx, int status))
1139 : {
1140 0 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1141 :
1142 0 : assert(bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io));
1143 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE || bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1144 0 : assert(bdev_io_use_accel_sequence(bdev_io));
1145 :
1146 : /* Since the operations are appended during submission, they're in the opposite order than
1147 : * how we want to execute them for reads (i.e. we need to execute the most recently added
1148 : * operation first), so reverse the sequence before executing it.
1149 : */
1150 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1151 0 : spdk_accel_sequence_reverse(bdev_io->internal.accel_sequence);
1152 0 : }
1153 :
1154 0 : TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_accel_exec, bdev_io, internal.link);
1155 0 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1156 0 : bdev_io->internal.data_transfer_cpl = cb_fn;
1157 :
1158 0 : spdk_accel_sequence_finish(bdev_io->internal.accel_sequence,
1159 0 : bdev_io_exec_sequence_cb, bdev_io);
1160 0 : }
1161 :
1162 : static void
1163 42 : bdev_io_get_buf_complete(struct spdk_bdev_io *bdev_io, bool status)
1164 : {
1165 42 : struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1166 : void *buf;
1167 :
1168 42 : if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
1169 0 : buf = bdev_io->internal.buf.ptr;
1170 0 : bdev_io->internal.buf.ptr = NULL;
1171 0 : bdev_io->internal.f.has_buf = false;
1172 0 : bdev_io->internal.get_aux_buf_cb(ch, bdev_io, buf);
1173 0 : bdev_io->internal.get_aux_buf_cb = NULL;
1174 0 : } else {
1175 42 : assert(bdev_io->internal.get_buf_cb != NULL);
1176 42 : bdev_io->internal.get_buf_cb(ch, bdev_io, status);
1177 42 : bdev_io->internal.get_buf_cb = NULL;
1178 : }
1179 42 : }
1180 :
1181 : static void
1182 4 : _bdev_io_pull_buffer_cpl(void *ctx, int rc)
1183 : {
1184 4 : struct spdk_bdev_io *bdev_io = ctx;
1185 :
1186 4 : if (rc) {
1187 0 : SPDK_ERRLOG("Set bounce buffer failed with rc %d\n", rc);
1188 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1189 0 : }
1190 4 : bdev_io_get_buf_complete(bdev_io, !rc);
1191 4 : }
1192 :
1193 : static void
1194 2 : bdev_io_pull_md_buf_done(void *ctx, int status)
1195 : {
1196 2 : struct spdk_bdev_io *bdev_io = ctx;
1197 2 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1198 :
1199 2 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1200 2 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1201 :
1202 2 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1203 0 : bdev_ch_retry_io(ch);
1204 0 : }
1205 :
1206 2 : assert(bdev_io->internal.data_transfer_cpl);
1207 2 : bdev_io->internal.data_transfer_cpl(bdev_io, status);
1208 2 : }
1209 :
1210 : static void
1211 4 : bdev_io_pull_md_buf(struct spdk_bdev_io *bdev_io)
1212 : {
1213 4 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1214 4 : int rc = 0;
1215 :
1216 4 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1217 2 : assert(bdev_io->internal.f.has_bounce_buf);
1218 2 : if (bdev_io_use_memory_domain(bdev_io)) {
1219 2 : TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1220 2 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1221 4 : rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
1222 2 : bdev_io->internal.memory_domain_ctx,
1223 2 : &bdev_io->internal.bounce_buf.orig_md_iov, 1,
1224 2 : &bdev_io->internal.bounce_buf.md_iov, 1,
1225 2 : bdev_io_pull_md_buf_done, bdev_io);
1226 2 : if (rc == 0) {
1227 : /* Continue to submit IO in completion callback */
1228 2 : return;
1229 : }
1230 0 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1231 0 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1232 0 : if (rc != -ENOMEM) {
1233 0 : SPDK_ERRLOG("Failed to pull data from memory domain %s, rc %d\n",
1234 : spdk_memory_domain_get_dma_device_id(
1235 : bdev_io->internal.memory_domain), rc);
1236 0 : }
1237 0 : } else {
1238 0 : memcpy(bdev_io->internal.bounce_buf.md_iov.iov_base,
1239 0 : bdev_io->internal.bounce_buf.orig_md_iov.iov_base,
1240 0 : bdev_io->internal.bounce_buf.orig_md_iov.iov_len);
1241 : }
1242 0 : }
1243 :
1244 2 : if (spdk_unlikely(rc == -ENOMEM)) {
1245 0 : bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL_MD);
1246 0 : } else {
1247 2 : assert(bdev_io->internal.data_transfer_cpl);
1248 2 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1249 : }
1250 4 : }
1251 :
1252 : static void
1253 4 : _bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
1254 : {
1255 4 : assert(bdev_io->internal.f.has_bounce_buf);
1256 :
1257 : /* save original md_buf */
1258 4 : bdev_io->internal.bounce_buf.orig_md_iov.iov_base = bdev_io->u.bdev.md_buf;
1259 4 : bdev_io->internal.bounce_buf.orig_md_iov.iov_len = len;
1260 4 : bdev_io->internal.bounce_buf.md_iov.iov_base = md_buf;
1261 4 : bdev_io->internal.bounce_buf.md_iov.iov_len = len;
1262 : /* set bounce md_buf */
1263 4 : bdev_io->u.bdev.md_buf = md_buf;
1264 :
1265 4 : bdev_io_pull_md_buf(bdev_io);
1266 4 : }
1267 :
1268 : static void
1269 42 : _bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io)
1270 : {
1271 42 : struct spdk_bdev *bdev = bdev_io->bdev;
1272 : uint64_t md_len;
1273 : void *buf;
1274 :
1275 42 : if (spdk_bdev_is_md_separate(bdev)) {
1276 7 : assert(!bdev_io_use_accel_sequence(bdev_io));
1277 :
1278 7 : buf = (char *)bdev_io->u.bdev.iovs[0].iov_base + bdev_io->u.bdev.iovs[0].iov_len;
1279 7 : md_len = bdev_io->u.bdev.num_blocks * bdev->md_len;
1280 :
1281 7 : assert(((uintptr_t)buf & (spdk_bdev_get_buf_align(bdev) - 1)) == 0);
1282 :
1283 7 : if (bdev_io->u.bdev.md_buf != NULL) {
1284 4 : _bdev_io_pull_bounce_md_buf(bdev_io, buf, md_len);
1285 4 : return;
1286 : } else {
1287 3 : spdk_bdev_io_set_md_buf(bdev_io, buf, md_len);
1288 : }
1289 3 : }
1290 :
1291 38 : bdev_io_get_buf_complete(bdev_io, true);
1292 42 : }
1293 :
1294 : static inline void
1295 26 : bdev_io_pull_data_done(struct spdk_bdev_io *bdev_io, int rc)
1296 : {
1297 26 : if (rc) {
1298 0 : SPDK_ERRLOG("Failed to get data buffer\n");
1299 0 : assert(bdev_io->internal.data_transfer_cpl);
1300 0 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1301 0 : return;
1302 : }
1303 :
1304 26 : _bdev_io_set_md_buf(bdev_io);
1305 26 : }
1306 :
1307 : static void
1308 2 : bdev_io_pull_data_done_and_track(void *ctx, int status)
1309 : {
1310 2 : struct spdk_bdev_io *bdev_io = ctx;
1311 2 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1312 :
1313 2 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1314 2 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1315 :
1316 2 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1317 0 : bdev_ch_retry_io(ch);
1318 0 : }
1319 :
1320 2 : bdev_io_pull_data_done(bdev_io, status);
1321 2 : }
1322 :
1323 : static void
1324 27 : bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
1325 : {
1326 27 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1327 27 : struct spdk_bdev_desc *desc = bdev_io->internal.desc;
1328 27 : int rc = 0;
1329 :
1330 27 : assert(bdev_io->internal.f.has_bounce_buf);
1331 :
1332 27 : if (bdev_io_needs_metadata(desc, bdev_io)) {
1333 0 : assert(bdev_io->bdev->md_interleave);
1334 :
1335 0 : if (!bdev_io_use_accel_sequence(bdev_io)) {
1336 0 : bdev_io->internal.accel_sequence = NULL;
1337 0 : }
1338 :
1339 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1340 0 : rc = spdk_accel_append_dif_generate_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
1341 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
1342 0 : bdev_io->u.bdev.memory_domain,
1343 0 : bdev_io->u.bdev.memory_domain_ctx,
1344 0 : bdev_io->internal.bounce_buf.orig_iovs,
1345 0 : bdev_io->internal.bounce_buf.orig_iovcnt,
1346 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
1347 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
1348 0 : bdev_io->u.bdev.num_blocks,
1349 0 : &bdev_io->u.bdev.dif_ctx,
1350 : NULL, NULL);
1351 0 : } else {
1352 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1353 0 : rc = spdk_accel_append_dif_verify_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
1354 0 : bdev_io->internal.bounce_buf.orig_iovs,
1355 0 : bdev_io->internal.bounce_buf.orig_iovcnt,
1356 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
1357 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
1358 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
1359 0 : bdev_io->u.bdev.memory_domain,
1360 0 : bdev_io->u.bdev.memory_domain_ctx,
1361 0 : bdev_io->u.bdev.num_blocks,
1362 0 : &bdev_io->u.bdev.dif_ctx,
1363 0 : &bdev_io->u.bdev.dif_err,
1364 : NULL, NULL);
1365 : }
1366 :
1367 0 : if (spdk_likely(rc == 0)) {
1368 0 : bdev_io->internal.f.has_accel_sequence = true;
1369 0 : bdev_io->u.bdev.accel_sequence = bdev_io->internal.accel_sequence;
1370 0 : } else if (rc != -ENOMEM) {
1371 0 : SPDK_ERRLOG("Failed to append generate/verify_copy to accel sequence: %p\n",
1372 : bdev_io->internal.accel_sequence);
1373 0 : }
1374 27 : } else if (bdev_io_needs_sequence_exec(desc, bdev_io) ||
1375 27 : (bdev_io_use_accel_sequence(bdev_io) && bdev_io_use_memory_domain(bdev_io))) {
1376 : /* If we need to exec an accel sequence or the IO uses a memory domain buffer and has a
1377 : * sequence, append a copy operation making accel change the src/dst buffers of the previous
1378 : * operation */
1379 0 : assert(bdev_io_use_accel_sequence(bdev_io));
1380 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1381 0 : rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
1382 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
1383 : NULL, NULL,
1384 0 : bdev_io->internal.bounce_buf.orig_iovs,
1385 0 : bdev_io->internal.bounce_buf.orig_iovcnt,
1386 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
1387 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
1388 : NULL, NULL);
1389 0 : } else {
1390 : /* We need to reverse the src/dst for reads */
1391 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1392 0 : rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
1393 0 : bdev_io->internal.bounce_buf.orig_iovs,
1394 0 : bdev_io->internal.bounce_buf.orig_iovcnt,
1395 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
1396 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
1397 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
1398 : NULL, NULL, NULL, NULL);
1399 : }
1400 :
1401 0 : if (spdk_unlikely(rc != 0 && rc != -ENOMEM)) {
1402 0 : SPDK_ERRLOG("Failed to append copy to accel sequence: %p\n",
1403 : bdev_io->internal.accel_sequence);
1404 0 : }
1405 27 : } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1406 : /* if this is write path, copy data from original buffer to bounce buffer */
1407 17 : if (bdev_io_use_memory_domain(bdev_io)) {
1408 3 : TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1409 3 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1410 6 : rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
1411 3 : bdev_io->internal.memory_domain_ctx,
1412 3 : bdev_io->internal.bounce_buf.orig_iovs,
1413 3 : (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
1414 3 : bdev_io->u.bdev.iovs, 1,
1415 : bdev_io_pull_data_done_and_track,
1416 3 : bdev_io);
1417 3 : if (rc == 0) {
1418 : /* Continue to submit IO in completion callback */
1419 2 : return;
1420 : }
1421 1 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1422 1 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1423 1 : if (rc != -ENOMEM) {
1424 0 : SPDK_ERRLOG("Failed to pull data from memory domain %s\n",
1425 : spdk_memory_domain_get_dma_device_id(
1426 : bdev_io->internal.memory_domain));
1427 0 : }
1428 1 : } else {
1429 14 : assert(bdev_io->u.bdev.iovcnt == 1);
1430 28 : spdk_copy_iovs_to_buf(bdev_io->u.bdev.iovs[0].iov_base,
1431 14 : bdev_io->u.bdev.iovs[0].iov_len,
1432 14 : bdev_io->internal.bounce_buf.orig_iovs,
1433 14 : bdev_io->internal.bounce_buf.orig_iovcnt);
1434 : }
1435 15 : }
1436 :
1437 25 : if (spdk_unlikely(rc == -ENOMEM)) {
1438 1 : bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL);
1439 1 : } else {
1440 24 : bdev_io_pull_data_done(bdev_io, rc);
1441 : }
1442 27 : }
1443 :
1444 : static void
1445 26 : _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len,
1446 : bdev_copy_bounce_buffer_cpl cpl_cb)
1447 : {
1448 26 : struct spdk_bdev_shared_resource *shared_resource = bdev_io->internal.ch->shared_resource;
1449 :
1450 26 : assert(bdev_io->internal.f.has_bounce_buf == false);
1451 :
1452 26 : bdev_io->internal.data_transfer_cpl = cpl_cb;
1453 26 : bdev_io->internal.f.has_bounce_buf = true;
1454 : /* save original iovec */
1455 26 : bdev_io->internal.bounce_buf.orig_iovs = bdev_io->u.bdev.iovs;
1456 26 : bdev_io->internal.bounce_buf.orig_iovcnt = bdev_io->u.bdev.iovcnt;
1457 : /* zero the other data members */
1458 26 : bdev_io->internal.bounce_buf.iov.iov_base = NULL;
1459 26 : bdev_io->internal.bounce_buf.md_iov.iov_base = NULL;
1460 26 : bdev_io->internal.bounce_buf.orig_md_iov.iov_base = NULL;
1461 : /* set bounce iov */
1462 26 : bdev_io->u.bdev.iovs = &bdev_io->internal.bounce_buf.iov;
1463 26 : bdev_io->u.bdev.iovcnt = 1;
1464 : /* set bounce buffer for this operation */
1465 26 : bdev_io->u.bdev.iovs[0].iov_base = buf;
1466 26 : bdev_io->u.bdev.iovs[0].iov_len = len;
1467 : /* Now we use 1 iov, the split condition could have been changed */
1468 26 : bdev_io->internal.f.split = bdev_io_should_split(bdev_io);
1469 :
1470 26 : if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
1471 0 : bdev_queue_nomem_io_tail(shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL);
1472 0 : } else {
1473 26 : bdev_io_pull_data(bdev_io);
1474 : }
1475 26 : }
1476 :
1477 : static void
1478 42 : _bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t len)
1479 : {
1480 42 : struct spdk_bdev *bdev = bdev_io->bdev;
1481 : bool buf_allocated;
1482 : uint64_t alignment;
1483 : void *aligned_buf;
1484 :
1485 42 : bdev_io->internal.buf.ptr = buf;
1486 42 : bdev_io->internal.f.has_buf = true;
1487 :
1488 42 : if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
1489 0 : bdev_io_get_buf_complete(bdev_io, true);
1490 0 : return;
1491 : }
1492 :
1493 42 : alignment = spdk_bdev_get_buf_align(bdev);
1494 42 : buf_allocated = _is_buf_allocated(bdev_io->u.bdev.iovs);
1495 42 : aligned_buf = (void *)(((uintptr_t)buf + (alignment - 1)) & ~(alignment - 1));
1496 :
1497 42 : if (buf_allocated) {
1498 26 : _bdev_io_pull_bounce_data_buf(bdev_io, aligned_buf, len, _bdev_io_pull_buffer_cpl);
1499 : /* Continue in completion callback */
1500 26 : return;
1501 : } else {
1502 16 : spdk_bdev_io_set_buf(bdev_io, aligned_buf, len);
1503 : }
1504 :
1505 16 : _bdev_io_set_md_buf(bdev_io);
1506 42 : }
1507 :
1508 : static inline uint64_t
1509 42 : bdev_io_get_max_buf_len(struct spdk_bdev_io *bdev_io, uint64_t len)
1510 : {
1511 42 : struct spdk_bdev *bdev = bdev_io->bdev;
1512 : uint64_t md_len, alignment;
1513 :
1514 42 : md_len = spdk_bdev_is_md_separate(bdev) ? bdev_io->u.bdev.num_blocks * bdev->md_len : 0;
1515 :
1516 : /* 1 byte alignment needs 0 byte of extra space, 64 bytes alignment needs 63 bytes of extra space, etc. */
1517 42 : alignment = spdk_bdev_get_buf_align(bdev) - 1;
1518 :
1519 42 : return len + alignment + md_len;
1520 : }
1521 :
1522 : static void
1523 42 : bdev_io_put_accel_buf(struct spdk_bdev_io *bdev_io)
1524 : {
1525 42 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1526 :
1527 84 : spdk_accel_put_buf(ch->accel_channel,
1528 42 : bdev_io->internal.buf.ptr,
1529 42 : bdev_io->u.bdev.memory_domain,
1530 42 : bdev_io->u.bdev.memory_domain_ctx);
1531 42 : }
1532 :
1533 : static void
1534 0 : _bdev_io_put_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t buf_len)
1535 : {
1536 : struct spdk_bdev_mgmt_channel *ch;
1537 :
1538 0 : ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1539 0 : spdk_iobuf_put(&ch->iobuf, buf, bdev_io_get_max_buf_len(bdev_io, buf_len));
1540 0 : }
1541 :
1542 : static void
1543 42 : bdev_io_put_buf(struct spdk_bdev_io *bdev_io)
1544 : {
1545 42 : assert(bdev_io->internal.f.has_buf);
1546 :
1547 42 : if (bdev_io->u.bdev.memory_domain == spdk_accel_get_memory_domain()) {
1548 42 : bdev_io_put_accel_buf(bdev_io);
1549 42 : } else {
1550 0 : assert(bdev_io->u.bdev.memory_domain == NULL);
1551 0 : _bdev_io_put_buf(bdev_io, bdev_io->internal.buf.ptr,
1552 0 : bdev_io->internal.buf.len);
1553 : }
1554 42 : bdev_io->internal.buf.ptr = NULL;
1555 42 : bdev_io->internal.f.has_buf = false;
1556 42 : }
1557 :
1558 3 : SPDK_LOG_DEPRECATION_REGISTER(spdk_bdev_io_put_aux_buf,
1559 : "spdk_bdev_io_put_aux_buf is deprecated", "v25.01", 0);
1560 :
1561 : void
1562 0 : spdk_bdev_io_put_aux_buf(struct spdk_bdev_io *bdev_io, void *buf)
1563 : {
1564 0 : uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
1565 :
1566 0 : SPDK_LOG_DEPRECATED(spdk_bdev_io_put_aux_buf);
1567 :
1568 0 : assert(buf != NULL);
1569 0 : _bdev_io_put_buf(bdev_io, buf, len);
1570 0 : }
1571 :
1572 : static inline void
1573 549 : bdev_submit_request(struct spdk_bdev *bdev, struct spdk_io_channel *ioch,
1574 : struct spdk_bdev_io *bdev_io)
1575 : {
1576 : /* After a request is submitted to a bdev module, the ownership of an accel sequence
1577 : * associated with that bdev_io is transferred to the bdev module. So, clear the internal
1578 : * sequence pointer to make sure we won't touch it anymore. */
1579 1016 : if ((bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE ||
1580 549 : bdev_io->type == SPDK_BDEV_IO_TYPE_READ) && bdev_io->u.bdev.accel_sequence != NULL) {
1581 0 : assert(!bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io));
1582 0 : bdev_io->internal.f.has_accel_sequence = false;
1583 0 : }
1584 :
1585 549 : bdev->fn_table->submit_request(ioch, bdev_io);
1586 549 : }
1587 :
1588 : static inline void
1589 10 : bdev_ch_resubmit_io(struct spdk_bdev_shared_resource *shared_resource, struct spdk_bdev_io *bdev_io)
1590 : {
1591 10 : struct spdk_bdev *bdev = bdev_io->bdev;
1592 :
1593 10 : bdev_io_increment_outstanding(bdev_io->internal.ch, shared_resource);
1594 10 : bdev_io->internal.error.nvme.cdw0 = 0;
1595 10 : bdev_io->num_retries++;
1596 10 : bdev_submit_request(bdev, spdk_bdev_io_get_io_channel(bdev_io), bdev_io);
1597 10 : }
1598 :
1599 : static void
1600 63 : bdev_shared_ch_retry_io(struct spdk_bdev_shared_resource *shared_resource)
1601 : {
1602 : struct spdk_bdev_io *bdev_io;
1603 :
1604 63 : if (shared_resource->io_outstanding > shared_resource->nomem_threshold) {
1605 : /*
1606 : * Allow some more I/O to complete before retrying the nomem_io queue.
1607 : * Some drivers (such as nvme) cannot immediately take a new I/O in
1608 : * the context of a completion, because the resources for the I/O are
1609 : * not released until control returns to the bdev poller. Also, we
1610 : * may require several small I/O to complete before a larger I/O
1611 : * (that requires splitting) can be submitted.
1612 : */
1613 58 : return;
1614 : }
1615 :
1616 16 : while (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
1617 12 : bdev_io = TAILQ_FIRST(&shared_resource->nomem_io);
1618 12 : TAILQ_REMOVE(&shared_resource->nomem_io, bdev_io, internal.link);
1619 :
1620 12 : switch (bdev_io->internal.retry_state) {
1621 : case BDEV_IO_RETRY_STATE_SUBMIT:
1622 10 : bdev_ch_resubmit_io(shared_resource, bdev_io);
1623 10 : break;
1624 : case BDEV_IO_RETRY_STATE_PULL:
1625 1 : bdev_io_pull_data(bdev_io);
1626 1 : break;
1627 : case BDEV_IO_RETRY_STATE_PULL_MD:
1628 0 : bdev_io_pull_md_buf(bdev_io);
1629 0 : break;
1630 : case BDEV_IO_RETRY_STATE_PUSH:
1631 1 : bdev_io_push_bounce_data(bdev_io);
1632 1 : break;
1633 : case BDEV_IO_RETRY_STATE_PUSH_MD:
1634 0 : bdev_io_push_bounce_md_buf(bdev_io);
1635 0 : break;
1636 : case BDEV_IO_RETRY_STATE_GET_ACCEL_BUF:
1637 0 : _bdev_io_get_accel_buf(bdev_io);
1638 0 : break;
1639 : default:
1640 0 : assert(0 && "invalid retry state");
1641 : break;
1642 : }
1643 :
1644 12 : if (bdev_io == TAILQ_FIRST(&shared_resource->nomem_io)) {
1645 : /* This IO completed again with NOMEM status, so break the loop and
1646 : * don't try anymore. Note that a bdev_io that fails with NOMEM
1647 : * always gets requeued at the front of the list, to maintain
1648 : * ordering.
1649 : */
1650 1 : break;
1651 : }
1652 : }
1653 63 : }
1654 :
1655 : static void
1656 63 : bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
1657 : {
1658 63 : bdev_shared_ch_retry_io(bdev_ch->shared_resource);
1659 63 : }
1660 :
1661 : static int
1662 0 : bdev_no_mem_poller(void *ctx)
1663 : {
1664 0 : struct spdk_bdev_shared_resource *shared_resource = ctx;
1665 :
1666 0 : spdk_poller_unregister(&shared_resource->nomem_poller);
1667 :
1668 0 : if (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
1669 0 : bdev_shared_ch_retry_io(shared_resource);
1670 0 : }
1671 : /* the retry cb may re-register the poller so double check */
1672 0 : if (!TAILQ_EMPTY(&shared_resource->nomem_io) &&
1673 0 : shared_resource->io_outstanding == 0 && shared_resource->nomem_poller == NULL) {
1674 : /* No IOs were submitted, try again */
1675 0 : shared_resource->nomem_poller = SPDK_POLLER_REGISTER(bdev_no_mem_poller, shared_resource,
1676 : SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * 10);
1677 0 : }
1678 :
1679 0 : return SPDK_POLLER_BUSY;
1680 : }
1681 :
1682 : static inline bool
1683 556 : _bdev_io_handle_no_mem(struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
1684 : {
1685 556 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
1686 556 : struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
1687 :
1688 556 : if (spdk_unlikely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM)) {
1689 5 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
1690 5 : bdev_queue_nomem_io_head(shared_resource, bdev_io, state);
1691 :
1692 5 : if (shared_resource->io_outstanding == 0 && !shared_resource->nomem_poller) {
1693 : /* Special case when we have nomem IOs and no outstanding IOs which completions
1694 : * could trigger retry of queued IOs
1695 : * Any IOs submitted may trigger retry of queued IOs. This poller handles a case when no
1696 : * new IOs submitted, e.g. qd==1 */
1697 0 : shared_resource->nomem_poller = SPDK_POLLER_REGISTER(bdev_no_mem_poller, shared_resource,
1698 : SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * 10);
1699 0 : }
1700 : /* If bdev module completed an I/O that has an accel sequence with NOMEM status, the
1701 : * ownership of that sequence is transferred back to the bdev layer, so we need to
1702 : * restore internal.accel_sequence to make sure that the sequence is handled
1703 : * correctly in case the I/O is later aborted. */
1704 5 : if ((bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
1705 5 : bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) && bdev_io->u.bdev.accel_sequence) {
1706 0 : assert(!bdev_io_use_accel_sequence(bdev_io));
1707 0 : bdev_io->internal.f.has_accel_sequence = true;
1708 0 : bdev_io->internal.accel_sequence = bdev_io->u.bdev.accel_sequence;
1709 0 : }
1710 :
1711 5 : return true;
1712 : }
1713 :
1714 551 : if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
1715 63 : bdev_ch_retry_io(bdev_ch);
1716 63 : }
1717 :
1718 551 : return false;
1719 556 : }
1720 :
1721 : static void
1722 26 : _bdev_io_complete_push_bounce_done(void *ctx, int rc)
1723 : {
1724 26 : struct spdk_bdev_io *bdev_io = ctx;
1725 26 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1726 :
1727 26 : if (rc) {
1728 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1729 0 : }
1730 : /* We want to free the bounce buffer here since we know we're done with it (as opposed
1731 : * to waiting for the conditional free of internal.buf.ptr in spdk_bdev_free_io()).
1732 : */
1733 26 : bdev_io_put_buf(bdev_io);
1734 :
1735 26 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1736 0 : bdev_ch_retry_io(ch);
1737 0 : }
1738 :
1739 : /* Continue with IO completion flow */
1740 26 : bdev_io_complete(bdev_io);
1741 26 : }
1742 :
1743 : static void
1744 2 : bdev_io_push_bounce_md_buf_done(void *ctx, int rc)
1745 : {
1746 2 : struct spdk_bdev_io *bdev_io = ctx;
1747 2 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1748 :
1749 2 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1750 2 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1751 2 : bdev_io->internal.f.has_bounce_buf = false;
1752 :
1753 2 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1754 0 : bdev_ch_retry_io(ch);
1755 0 : }
1756 :
1757 2 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1758 2 : }
1759 :
1760 : static inline void
1761 26 : bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io)
1762 : {
1763 26 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1764 26 : int rc = 0;
1765 :
1766 26 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1767 26 : assert(bdev_io->internal.f.has_bounce_buf);
1768 :
1769 : /* do the same for metadata buffer */
1770 26 : if (spdk_unlikely(bdev_io->internal.bounce_buf.orig_md_iov.iov_base != NULL)) {
1771 4 : assert(spdk_bdev_is_md_separate(bdev_io->bdev));
1772 :
1773 4 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1774 2 : if (bdev_io_use_memory_domain(bdev_io)) {
1775 2 : TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1776 2 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1777 : /* If memory domain is used then we need to call async push function */
1778 4 : rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
1779 2 : bdev_io->internal.memory_domain_ctx,
1780 2 : &bdev_io->internal.bounce_buf.orig_md_iov,
1781 2 : (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
1782 2 : &bdev_io->internal.bounce_buf.md_iov, 1,
1783 : bdev_io_push_bounce_md_buf_done,
1784 2 : bdev_io);
1785 2 : if (rc == 0) {
1786 : /* Continue IO completion in async callback */
1787 2 : return;
1788 : }
1789 0 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1790 0 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1791 0 : if (rc != -ENOMEM) {
1792 0 : SPDK_ERRLOG("Failed to push md to memory domain %s\n",
1793 : spdk_memory_domain_get_dma_device_id(
1794 : bdev_io->internal.memory_domain));
1795 0 : }
1796 0 : } else {
1797 0 : memcpy(bdev_io->internal.bounce_buf.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf,
1798 0 : bdev_io->internal.bounce_buf.orig_md_iov.iov_len);
1799 : }
1800 0 : }
1801 2 : }
1802 :
1803 24 : if (spdk_unlikely(rc == -ENOMEM)) {
1804 0 : bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PUSH_MD);
1805 0 : } else {
1806 24 : assert(bdev_io->internal.data_transfer_cpl);
1807 24 : bdev_io->internal.f.has_bounce_buf = false;
1808 24 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1809 : }
1810 26 : }
1811 :
1812 : static inline void
1813 26 : bdev_io_push_bounce_data_done(struct spdk_bdev_io *bdev_io, int rc)
1814 : {
1815 26 : assert(bdev_io->internal.data_transfer_cpl);
1816 26 : if (rc) {
1817 0 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1818 0 : return;
1819 : }
1820 :
1821 : /* set original buffer for this io */
1822 26 : bdev_io->u.bdev.iovcnt = bdev_io->internal.bounce_buf.orig_iovcnt;
1823 26 : bdev_io->u.bdev.iovs = bdev_io->internal.bounce_buf.orig_iovs;
1824 :
1825 : /* We don't set bdev_io->internal.f.has_bounce_buf to false here because
1826 : * we still need to clear the md buf */
1827 :
1828 26 : bdev_io_push_bounce_md_buf(bdev_io);
1829 26 : }
1830 :
1831 : static void
1832 2 : bdev_io_push_bounce_data_done_and_track(void *ctx, int status)
1833 : {
1834 2 : struct spdk_bdev_io *bdev_io = ctx;
1835 2 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1836 :
1837 2 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1838 2 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1839 :
1840 2 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1841 0 : bdev_ch_retry_io(ch);
1842 0 : }
1843 :
1844 2 : bdev_io_push_bounce_data_done(bdev_io, status);
1845 2 : }
1846 :
1847 : static inline void
1848 27 : bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
1849 : {
1850 27 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1851 27 : int rc = 0;
1852 :
1853 27 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1854 27 : assert(!bdev_io_use_accel_sequence(bdev_io));
1855 27 : assert(bdev_io->internal.f.has_bounce_buf);
1856 :
1857 : /* if this is read path, copy data from bounce buffer to original buffer */
1858 27 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1859 11 : if (bdev_io_use_memory_domain(bdev_io)) {
1860 3 : TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1861 3 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1862 : /* If memory domain is used then we need to call async push function */
1863 6 : rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
1864 3 : bdev_io->internal.memory_domain_ctx,
1865 3 : bdev_io->internal.bounce_buf.orig_iovs,
1866 3 : (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
1867 3 : &bdev_io->internal.bounce_buf.iov, 1,
1868 : bdev_io_push_bounce_data_done_and_track,
1869 3 : bdev_io);
1870 3 : if (rc == 0) {
1871 : /* Continue IO completion in async callback */
1872 2 : return;
1873 : }
1874 :
1875 1 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1876 1 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1877 1 : if (rc != -ENOMEM) {
1878 0 : SPDK_ERRLOG("Failed to push data to memory domain %s\n",
1879 : spdk_memory_domain_get_dma_device_id(
1880 : bdev_io->internal.memory_domain));
1881 0 : }
1882 1 : } else {
1883 16 : spdk_copy_buf_to_iovs(bdev_io->internal.bounce_buf.orig_iovs,
1884 8 : bdev_io->internal.bounce_buf.orig_iovcnt,
1885 8 : bdev_io->internal.bounce_buf.iov.iov_base,
1886 8 : bdev_io->internal.bounce_buf.iov.iov_len);
1887 : }
1888 9 : }
1889 :
1890 25 : if (spdk_unlikely(rc == -ENOMEM)) {
1891 1 : bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PUSH);
1892 1 : } else {
1893 24 : bdev_io_push_bounce_data_done(bdev_io, rc);
1894 : }
1895 27 : }
1896 :
1897 : static inline void
1898 26 : _bdev_io_push_bounce_data_buffer(struct spdk_bdev_io *bdev_io, bdev_copy_bounce_buffer_cpl cpl_cb)
1899 : {
1900 26 : bdev_io->internal.data_transfer_cpl = cpl_cb;
1901 26 : bdev_io_push_bounce_data(bdev_io);
1902 26 : }
1903 :
1904 : static void
1905 0 : bdev_io_get_iobuf_cb(struct spdk_iobuf_entry *iobuf, void *buf)
1906 : {
1907 : struct spdk_bdev_io *bdev_io;
1908 :
1909 0 : bdev_io = SPDK_CONTAINEROF(iobuf, struct spdk_bdev_io, internal.iobuf);
1910 0 : _bdev_io_set_buf(bdev_io, buf, bdev_io->internal.buf.len);
1911 0 : }
1912 :
1913 : static void
1914 42 : bdev_io_get_buf(struct spdk_bdev_io *bdev_io, uint64_t len)
1915 : {
1916 : struct spdk_bdev_mgmt_channel *mgmt_ch;
1917 : uint64_t max_len;
1918 : void *buf;
1919 :
1920 42 : assert(spdk_bdev_io_get_thread(bdev_io) == spdk_get_thread());
1921 42 : mgmt_ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1922 42 : max_len = bdev_io_get_max_buf_len(bdev_io, len);
1923 :
1924 42 : if (spdk_unlikely(max_len > mgmt_ch->iobuf.cache[0].large.bufsize)) {
1925 0 : SPDK_ERRLOG("Length %" PRIu64 " is larger than allowed\n", max_len);
1926 0 : bdev_io_get_buf_complete(bdev_io, false);
1927 0 : return;
1928 : }
1929 :
1930 42 : bdev_io->internal.buf.len = len;
1931 42 : buf = spdk_iobuf_get(&mgmt_ch->iobuf, max_len, &bdev_io->internal.iobuf,
1932 : bdev_io_get_iobuf_cb);
1933 42 : if (buf != NULL) {
1934 42 : _bdev_io_set_buf(bdev_io, buf, len);
1935 42 : }
1936 42 : }
1937 :
1938 : void
1939 56 : spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1940 : {
1941 56 : struct spdk_bdev *bdev = bdev_io->bdev;
1942 : uint64_t alignment;
1943 :
1944 56 : assert(cb != NULL);
1945 56 : bdev_io->internal.get_buf_cb = cb;
1946 :
1947 56 : alignment = spdk_bdev_get_buf_align(bdev);
1948 :
1949 56 : if (_is_buf_allocated(bdev_io->u.bdev.iovs) &&
1950 40 : _are_iovs_aligned(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, alignment)) {
1951 : /* Buffer already present and aligned */
1952 18 : cb(spdk_bdev_io_get_io_channel(bdev_io), bdev_io, true);
1953 18 : return;
1954 : }
1955 :
1956 38 : bdev_io_get_buf(bdev_io, len);
1957 56 : }
1958 :
1959 : static void
1960 4 : _bdev_io_get_bounce_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
1961 : uint64_t len)
1962 : {
1963 4 : assert(cb != NULL);
1964 4 : bdev_io->internal.get_buf_cb = cb;
1965 :
1966 4 : bdev_io_get_buf(bdev_io, len);
1967 4 : }
1968 :
1969 : static void
1970 0 : _bdev_io_get_accel_buf(struct spdk_bdev_io *bdev_io)
1971 : {
1972 0 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1973 : void *buf;
1974 : int rc;
1975 :
1976 0 : rc = spdk_accel_get_buf(ch->accel_channel,
1977 0 : bdev_io->internal.buf.len,
1978 : &buf,
1979 0 : &bdev_io->u.bdev.memory_domain,
1980 0 : &bdev_io->u.bdev.memory_domain_ctx);
1981 0 : if (rc != 0) {
1982 0 : bdev_queue_nomem_io_tail(ch->shared_resource, bdev_io,
1983 : BDEV_IO_RETRY_STATE_GET_ACCEL_BUF);
1984 0 : return;
1985 : }
1986 :
1987 0 : _bdev_io_set_buf(bdev_io, buf, bdev_io->internal.buf.len);
1988 0 : }
1989 :
1990 : static inline void
1991 0 : bdev_io_get_accel_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
1992 : uint64_t len)
1993 : {
1994 0 : bdev_io->internal.buf.len = len;
1995 0 : bdev_io->internal.get_buf_cb = cb;
1996 :
1997 0 : _bdev_io_get_accel_buf(bdev_io);
1998 0 : }
1999 :
2000 3 : SPDK_LOG_DEPRECATION_REGISTER(spdk_bdev_io_get_aux_buf,
2001 : "spdk_bdev_io_get_aux_buf is deprecated", "v25.01", 0);
2002 :
2003 : void
2004 0 : spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
2005 : {
2006 0 : uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
2007 :
2008 0 : SPDK_LOG_DEPRECATED(spdk_bdev_io_get_aux_buf);
2009 :
2010 0 : assert(cb != NULL);
2011 0 : assert(bdev_io->internal.get_aux_buf_cb == NULL);
2012 0 : bdev_io->internal.get_aux_buf_cb = cb;
2013 0 : bdev_io_get_buf(bdev_io, len);
2014 0 : }
2015 :
2016 : static int
2017 68 : bdev_module_get_max_ctx_size(void)
2018 : {
2019 : struct spdk_bdev_module *bdev_module;
2020 68 : int max_bdev_module_size = 0;
2021 :
2022 266 : TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
2023 198 : if (bdev_module->get_ctx_size && bdev_module->get_ctx_size() > max_bdev_module_size) {
2024 67 : max_bdev_module_size = bdev_module->get_ctx_size();
2025 67 : }
2026 198 : }
2027 :
2028 68 : return max_bdev_module_size;
2029 : }
2030 :
2031 : static void
2032 0 : bdev_enable_histogram_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
2033 : {
2034 0 : if (!bdev->internal.histogram_enabled) {
2035 0 : return;
2036 : }
2037 :
2038 0 : spdk_json_write_object_begin(w);
2039 0 : spdk_json_write_named_string(w, "method", "bdev_enable_histogram");
2040 :
2041 0 : spdk_json_write_named_object_begin(w, "params");
2042 0 : spdk_json_write_named_string(w, "name", bdev->name);
2043 :
2044 0 : spdk_json_write_named_bool(w, "enable", bdev->internal.histogram_enabled);
2045 :
2046 0 : if (bdev->internal.histogram_io_type) {
2047 0 : spdk_json_write_named_string(w, "opc",
2048 0 : spdk_bdev_get_io_type_name(bdev->internal.histogram_io_type));
2049 0 : }
2050 :
2051 0 : spdk_json_write_object_end(w);
2052 :
2053 0 : spdk_json_write_object_end(w);
2054 0 : }
2055 :
2056 : static void
2057 0 : bdev_qos_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
2058 : {
2059 : int i;
2060 0 : struct spdk_bdev_qos *qos = bdev->internal.qos;
2061 : uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
2062 :
2063 0 : if (!qos) {
2064 0 : return;
2065 : }
2066 :
2067 0 : spdk_bdev_get_qos_rate_limits(bdev, limits);
2068 :
2069 0 : spdk_json_write_object_begin(w);
2070 0 : spdk_json_write_named_string(w, "method", "bdev_set_qos_limit");
2071 :
2072 0 : spdk_json_write_named_object_begin(w, "params");
2073 0 : spdk_json_write_named_string(w, "name", bdev->name);
2074 0 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2075 0 : if (limits[i] > 0) {
2076 0 : spdk_json_write_named_uint64(w, qos_rpc_type[i], limits[i]);
2077 0 : }
2078 0 : }
2079 0 : spdk_json_write_object_end(w);
2080 :
2081 0 : spdk_json_write_object_end(w);
2082 0 : }
2083 :
2084 : void
2085 0 : spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
2086 : {
2087 : struct spdk_bdev_module *bdev_module;
2088 : struct spdk_bdev *bdev;
2089 :
2090 0 : assert(w != NULL);
2091 :
2092 0 : spdk_json_write_array_begin(w);
2093 :
2094 0 : spdk_json_write_object_begin(w);
2095 0 : spdk_json_write_named_string(w, "method", "bdev_set_options");
2096 0 : spdk_json_write_named_object_begin(w, "params");
2097 0 : spdk_json_write_named_uint32(w, "bdev_io_pool_size", g_bdev_opts.bdev_io_pool_size);
2098 0 : spdk_json_write_named_uint32(w, "bdev_io_cache_size", g_bdev_opts.bdev_io_cache_size);
2099 0 : spdk_json_write_named_bool(w, "bdev_auto_examine", g_bdev_opts.bdev_auto_examine);
2100 0 : spdk_json_write_named_uint32(w, "iobuf_small_cache_size", g_bdev_opts.iobuf_small_cache_size);
2101 0 : spdk_json_write_named_uint32(w, "iobuf_large_cache_size", g_bdev_opts.iobuf_large_cache_size);
2102 0 : spdk_json_write_object_end(w);
2103 0 : spdk_json_write_object_end(w);
2104 :
2105 0 : bdev_examine_allowlist_config_json(w);
2106 :
2107 0 : TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
2108 0 : if (bdev_module->config_json) {
2109 0 : bdev_module->config_json(w);
2110 0 : }
2111 0 : }
2112 :
2113 0 : spdk_spin_lock(&g_bdev_mgr.spinlock);
2114 :
2115 0 : TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) {
2116 0 : if (bdev->fn_table->write_config_json) {
2117 0 : bdev->fn_table->write_config_json(bdev, w);
2118 0 : }
2119 :
2120 0 : bdev_qos_config_json(bdev, w);
2121 0 : bdev_enable_histogram_config_json(bdev, w);
2122 0 : }
2123 :
2124 0 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
2125 :
2126 : /* This has to be last RPC in array to make sure all bdevs finished examine */
2127 0 : spdk_json_write_object_begin(w);
2128 0 : spdk_json_write_named_string(w, "method", "bdev_wait_for_examine");
2129 0 : spdk_json_write_object_end(w);
2130 :
2131 0 : spdk_json_write_array_end(w);
2132 0 : }
2133 :
2134 : static void
2135 72 : bdev_mgmt_channel_destroy(void *io_device, void *ctx_buf)
2136 : {
2137 72 : struct spdk_bdev_mgmt_channel *ch = ctx_buf;
2138 : struct spdk_bdev_io *bdev_io;
2139 :
2140 72 : spdk_iobuf_channel_fini(&ch->iobuf);
2141 :
2142 10226 : while (!STAILQ_EMPTY(&ch->per_thread_cache)) {
2143 10154 : bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
2144 10154 : STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
2145 10154 : ch->per_thread_cache_count--;
2146 10154 : spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
2147 : }
2148 :
2149 72 : assert(ch->per_thread_cache_count == 0);
2150 72 : }
2151 :
2152 : static int
2153 72 : bdev_mgmt_channel_create(void *io_device, void *ctx_buf)
2154 : {
2155 72 : struct spdk_bdev_mgmt_channel *ch = ctx_buf;
2156 : struct spdk_bdev_io *bdev_io;
2157 : uint32_t i;
2158 : int rc;
2159 :
2160 144 : rc = spdk_iobuf_channel_init(&ch->iobuf, "bdev",
2161 72 : g_bdev_opts.iobuf_small_cache_size,
2162 72 : g_bdev_opts.iobuf_large_cache_size);
2163 72 : if (rc != 0) {
2164 0 : SPDK_ERRLOG("Failed to create iobuf channel: %s\n", spdk_strerror(-rc));
2165 0 : return -1;
2166 : }
2167 :
2168 72 : STAILQ_INIT(&ch->per_thread_cache);
2169 72 : ch->bdev_io_cache_size = g_bdev_opts.bdev_io_cache_size;
2170 :
2171 : /* Pre-populate bdev_io cache to ensure this thread cannot be starved. */
2172 72 : ch->per_thread_cache_count = 0;
2173 10226 : for (i = 0; i < ch->bdev_io_cache_size; i++) {
2174 10154 : bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
2175 10154 : if (bdev_io == NULL) {
2176 0 : SPDK_ERRLOG("You need to increase bdev_io_pool_size using bdev_set_options RPC.\n");
2177 0 : assert(false);
2178 : bdev_mgmt_channel_destroy(io_device, ctx_buf);
2179 : return -1;
2180 : }
2181 10154 : ch->per_thread_cache_count++;
2182 10154 : STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
2183 10154 : }
2184 :
2185 72 : TAILQ_INIT(&ch->shared_resources);
2186 72 : TAILQ_INIT(&ch->io_wait_queue);
2187 :
2188 72 : return 0;
2189 72 : }
2190 :
2191 : static void
2192 68 : bdev_init_complete(int rc)
2193 : {
2194 68 : spdk_bdev_init_cb cb_fn = g_init_cb_fn;
2195 68 : void *cb_arg = g_init_cb_arg;
2196 : struct spdk_bdev_module *m;
2197 :
2198 68 : g_bdev_mgr.init_complete = true;
2199 68 : g_init_cb_fn = NULL;
2200 68 : g_init_cb_arg = NULL;
2201 :
2202 : /*
2203 : * For modules that need to know when subsystem init is complete,
2204 : * inform them now.
2205 : */
2206 68 : if (rc == 0) {
2207 266 : TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
2208 198 : if (m->init_complete) {
2209 24 : m->init_complete();
2210 24 : }
2211 198 : }
2212 68 : }
2213 :
2214 68 : cb_fn(cb_arg, rc);
2215 68 : }
2216 :
2217 : static bool
2218 271 : bdev_module_all_actions_completed(void)
2219 : {
2220 : struct spdk_bdev_module *m;
2221 :
2222 1078 : TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
2223 807 : if (m->internal.action_in_progress > 0) {
2224 0 : return false;
2225 : }
2226 807 : }
2227 271 : return true;
2228 271 : }
2229 :
2230 : static void
2231 629 : bdev_module_action_complete(void)
2232 : {
2233 : /*
2234 : * Don't finish bdev subsystem initialization if
2235 : * module pre-initialization is still in progress, or
2236 : * the subsystem been already initialized.
2237 : */
2238 629 : if (!g_bdev_mgr.module_init_complete || g_bdev_mgr.init_complete) {
2239 561 : return;
2240 : }
2241 :
2242 : /*
2243 : * Check all bdev modules for inits/examinations in progress. If any
2244 : * exist, return immediately since we cannot finish bdev subsystem
2245 : * initialization until all are completed.
2246 : */
2247 68 : if (!bdev_module_all_actions_completed()) {
2248 0 : return;
2249 : }
2250 :
2251 : /*
2252 : * Modules already finished initialization - now that all
2253 : * the bdev modules have finished their asynchronous I/O
2254 : * processing, the entire bdev layer can be marked as complete.
2255 : */
2256 68 : bdev_init_complete(0);
2257 629 : }
2258 :
2259 : static void
2260 561 : bdev_module_action_done(struct spdk_bdev_module *module)
2261 : {
2262 561 : spdk_spin_lock(&module->internal.spinlock);
2263 561 : assert(module->internal.action_in_progress > 0);
2264 561 : module->internal.action_in_progress--;
2265 561 : spdk_spin_unlock(&module->internal.spinlock);
2266 561 : bdev_module_action_complete();
2267 561 : }
2268 :
2269 : void
2270 68 : spdk_bdev_module_init_done(struct spdk_bdev_module *module)
2271 : {
2272 68 : assert(module->async_init);
2273 68 : bdev_module_action_done(module);
2274 68 : }
2275 :
2276 : void
2277 493 : spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
2278 : {
2279 493 : bdev_module_action_done(module);
2280 493 : }
2281 :
2282 : /** The last initialized bdev module */
2283 : static struct spdk_bdev_module *g_resume_bdev_module = NULL;
2284 :
2285 : static void
2286 0 : bdev_init_failed(void *cb_arg)
2287 : {
2288 0 : struct spdk_bdev_module *module = cb_arg;
2289 :
2290 0 : spdk_spin_lock(&module->internal.spinlock);
2291 0 : assert(module->internal.action_in_progress > 0);
2292 0 : module->internal.action_in_progress--;
2293 0 : spdk_spin_unlock(&module->internal.spinlock);
2294 0 : bdev_init_complete(-1);
2295 0 : }
2296 :
2297 : static int
2298 68 : bdev_modules_init(void)
2299 : {
2300 : struct spdk_bdev_module *module;
2301 68 : int rc = 0;
2302 :
2303 266 : TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
2304 198 : g_resume_bdev_module = module;
2305 198 : if (module->async_init) {
2306 68 : spdk_spin_lock(&module->internal.spinlock);
2307 68 : module->internal.action_in_progress = 1;
2308 68 : spdk_spin_unlock(&module->internal.spinlock);
2309 68 : }
2310 198 : rc = module->module_init();
2311 198 : if (rc != 0) {
2312 : /* Bump action_in_progress to prevent other modules from completion of modules_init
2313 : * Send message to defer application shutdown until resources are cleaned up */
2314 0 : spdk_spin_lock(&module->internal.spinlock);
2315 0 : module->internal.action_in_progress = 1;
2316 0 : spdk_spin_unlock(&module->internal.spinlock);
2317 0 : spdk_thread_send_msg(spdk_get_thread(), bdev_init_failed, module);
2318 0 : return rc;
2319 : }
2320 198 : }
2321 :
2322 68 : g_resume_bdev_module = NULL;
2323 68 : return 0;
2324 68 : }
2325 :
2326 : void
2327 68 : spdk_bdev_initialize(spdk_bdev_init_cb cb_fn, void *cb_arg)
2328 : {
2329 68 : int rc = 0;
2330 : char mempool_name[32];
2331 :
2332 68 : assert(cb_fn != NULL);
2333 :
2334 68 : g_init_cb_fn = cb_fn;
2335 68 : g_init_cb_arg = cb_arg;
2336 :
2337 68 : spdk_notify_type_register("bdev_register");
2338 68 : spdk_notify_type_register("bdev_unregister");
2339 :
2340 68 : snprintf(mempool_name, sizeof(mempool_name), "bdev_io_%d", getpid());
2341 :
2342 68 : rc = spdk_iobuf_register_module("bdev");
2343 68 : if (rc != 0) {
2344 0 : SPDK_ERRLOG("could not register bdev iobuf module: %s\n", spdk_strerror(-rc));
2345 0 : bdev_init_complete(-1);
2346 0 : return;
2347 : }
2348 :
2349 136 : g_bdev_mgr.bdev_io_pool = spdk_mempool_create(mempool_name,
2350 68 : g_bdev_opts.bdev_io_pool_size,
2351 68 : sizeof(struct spdk_bdev_io) +
2352 68 : bdev_module_get_max_ctx_size(),
2353 : 0,
2354 : SPDK_ENV_NUMA_ID_ANY);
2355 :
2356 68 : if (g_bdev_mgr.bdev_io_pool == NULL) {
2357 0 : SPDK_ERRLOG("could not allocate spdk_bdev_io pool\n");
2358 0 : bdev_init_complete(-1);
2359 0 : return;
2360 : }
2361 :
2362 68 : g_bdev_mgr.zero_buffer = spdk_zmalloc(ZERO_BUFFER_SIZE, ZERO_BUFFER_SIZE,
2363 : NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
2364 68 : if (!g_bdev_mgr.zero_buffer) {
2365 0 : SPDK_ERRLOG("create bdev zero buffer failed\n");
2366 0 : bdev_init_complete(-1);
2367 0 : return;
2368 : }
2369 :
2370 : #ifdef SPDK_CONFIG_VTUNE
2371 : g_bdev_mgr.domain = __itt_domain_create("spdk_bdev");
2372 : #endif
2373 :
2374 68 : spdk_io_device_register(&g_bdev_mgr, bdev_mgmt_channel_create,
2375 : bdev_mgmt_channel_destroy,
2376 : sizeof(struct spdk_bdev_mgmt_channel),
2377 : "bdev_mgr");
2378 :
2379 68 : rc = bdev_modules_init();
2380 68 : g_bdev_mgr.module_init_complete = true;
2381 68 : if (rc != 0) {
2382 0 : SPDK_ERRLOG("bdev modules init failed\n");
2383 0 : return;
2384 : }
2385 :
2386 68 : bdev_module_action_complete();
2387 68 : }
2388 :
2389 : static void
2390 68 : bdev_mgr_unregister_cb(void *io_device)
2391 : {
2392 68 : spdk_bdev_fini_cb cb_fn = g_fini_cb_fn;
2393 :
2394 68 : if (g_bdev_mgr.bdev_io_pool) {
2395 68 : if (spdk_mempool_count(g_bdev_mgr.bdev_io_pool) != g_bdev_opts.bdev_io_pool_size) {
2396 0 : SPDK_ERRLOG("bdev IO pool count is %zu but should be %u\n",
2397 : spdk_mempool_count(g_bdev_mgr.bdev_io_pool),
2398 : g_bdev_opts.bdev_io_pool_size);
2399 0 : }
2400 :
2401 68 : spdk_mempool_free(g_bdev_mgr.bdev_io_pool);
2402 68 : }
2403 :
2404 68 : spdk_free(g_bdev_mgr.zero_buffer);
2405 :
2406 68 : bdev_examine_allowlist_free();
2407 :
2408 68 : cb_fn(g_fini_cb_arg);
2409 68 : g_fini_cb_fn = NULL;
2410 68 : g_fini_cb_arg = NULL;
2411 68 : g_bdev_mgr.init_complete = false;
2412 68 : g_bdev_mgr.module_init_complete = false;
2413 68 : }
2414 :
2415 : static void
2416 68 : bdev_module_fini_iter(void *arg)
2417 : {
2418 : struct spdk_bdev_module *bdev_module;
2419 :
2420 : /* FIXME: Handling initialization failures is broken now,
2421 : * so we won't even try cleaning up after successfully
2422 : * initialized modules. if module_init_complete is false,
2423 : * just call spdk_bdev_mgr_unregister_cb
2424 : */
2425 68 : if (!g_bdev_mgr.module_init_complete) {
2426 0 : bdev_mgr_unregister_cb(NULL);
2427 0 : return;
2428 : }
2429 :
2430 : /* Start iterating from the last touched module */
2431 68 : if (!g_resume_bdev_module) {
2432 68 : bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
2433 68 : } else {
2434 0 : bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list,
2435 : internal.tailq);
2436 : }
2437 :
2438 266 : while (bdev_module) {
2439 198 : if (bdev_module->async_fini) {
2440 : /* Save our place so we can resume later. We must
2441 : * save the variable here, before calling module_fini()
2442 : * below, because in some cases the module may immediately
2443 : * call spdk_bdev_module_fini_done() and re-enter
2444 : * this function to continue iterating. */
2445 0 : g_resume_bdev_module = bdev_module;
2446 0 : }
2447 :
2448 198 : if (bdev_module->module_fini) {
2449 198 : bdev_module->module_fini();
2450 198 : }
2451 :
2452 198 : if (bdev_module->async_fini) {
2453 0 : return;
2454 : }
2455 :
2456 198 : bdev_module = TAILQ_PREV(bdev_module, bdev_module_list,
2457 : internal.tailq);
2458 : }
2459 :
2460 68 : g_resume_bdev_module = NULL;
2461 68 : spdk_io_device_unregister(&g_bdev_mgr, bdev_mgr_unregister_cb);
2462 68 : }
2463 :
2464 : void
2465 0 : spdk_bdev_module_fini_done(void)
2466 : {
2467 0 : if (spdk_get_thread() != g_fini_thread) {
2468 0 : spdk_thread_send_msg(g_fini_thread, bdev_module_fini_iter, NULL);
2469 0 : } else {
2470 0 : bdev_module_fini_iter(NULL);
2471 : }
2472 0 : }
2473 :
2474 : static void
2475 68 : bdev_finish_unregister_bdevs_iter(void *cb_arg, int bdeverrno)
2476 : {
2477 68 : struct spdk_bdev *bdev = cb_arg;
2478 :
2479 68 : if (bdeverrno && bdev) {
2480 0 : SPDK_WARNLOG("Unable to unregister bdev '%s' during spdk_bdev_finish()\n",
2481 : bdev->name);
2482 :
2483 : /*
2484 : * Since the call to spdk_bdev_unregister() failed, we have no way to free this
2485 : * bdev; try to continue by manually removing this bdev from the list and continue
2486 : * with the next bdev in the list.
2487 : */
2488 0 : TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
2489 0 : }
2490 :
2491 68 : if (TAILQ_EMPTY(&g_bdev_mgr.bdevs)) {
2492 68 : SPDK_DEBUGLOG(bdev, "Done unregistering bdevs\n");
2493 : /*
2494 : * Bdev module finish need to be deferred as we might be in the middle of some context
2495 : * (like bdev part free) that will use this bdev (or private bdev driver ctx data)
2496 : * after returning.
2497 : */
2498 68 : spdk_thread_send_msg(spdk_get_thread(), bdev_module_fini_iter, NULL);
2499 68 : return;
2500 : }
2501 :
2502 : /*
2503 : * Unregister last unclaimed bdev in the list, to ensure that bdev subsystem
2504 : * shutdown proceeds top-down. The goal is to give virtual bdevs an opportunity
2505 : * to detect clean shutdown as opposed to run-time hot removal of the underlying
2506 : * base bdevs.
2507 : *
2508 : * Also, walk the list in the reverse order.
2509 : */
2510 0 : for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
2511 0 : bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
2512 0 : spdk_spin_lock(&bdev->internal.spinlock);
2513 0 : if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
2514 0 : LOG_ALREADY_CLAIMED_DEBUG("claimed, skipping", bdev);
2515 0 : spdk_spin_unlock(&bdev->internal.spinlock);
2516 0 : continue;
2517 : }
2518 0 : spdk_spin_unlock(&bdev->internal.spinlock);
2519 :
2520 0 : SPDK_DEBUGLOG(bdev, "Unregistering bdev '%s'\n", bdev->name);
2521 0 : spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
2522 0 : return;
2523 : }
2524 :
2525 : /*
2526 : * If any bdev fails to unclaim underlying bdev properly, we may face the
2527 : * case of bdev list consisting of claimed bdevs only (if claims are managed
2528 : * correctly, this would mean there's a loop in the claims graph which is
2529 : * clearly impossible). Warn and unregister last bdev on the list then.
2530 : */
2531 0 : for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
2532 0 : bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
2533 0 : SPDK_WARNLOG("Unregistering claimed bdev '%s'!\n", bdev->name);
2534 0 : spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
2535 0 : return;
2536 : }
2537 68 : }
2538 :
2539 : static void
2540 68 : bdev_module_fini_start_iter(void *arg)
2541 : {
2542 : struct spdk_bdev_module *bdev_module;
2543 :
2544 68 : if (!g_resume_bdev_module) {
2545 68 : bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
2546 68 : } else {
2547 0 : bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list, internal.tailq);
2548 : }
2549 :
2550 266 : while (bdev_module) {
2551 198 : if (bdev_module->async_fini_start) {
2552 : /* Save our place so we can resume later. We must
2553 : * save the variable here, before calling fini_start()
2554 : * below, because in some cases the module may immediately
2555 : * call spdk_bdev_module_fini_start_done() and re-enter
2556 : * this function to continue iterating. */
2557 0 : g_resume_bdev_module = bdev_module;
2558 0 : }
2559 :
2560 198 : if (bdev_module->fini_start) {
2561 24 : bdev_module->fini_start();
2562 24 : }
2563 :
2564 198 : if (bdev_module->async_fini_start) {
2565 0 : return;
2566 : }
2567 :
2568 198 : bdev_module = TAILQ_PREV(bdev_module, bdev_module_list, internal.tailq);
2569 : }
2570 :
2571 68 : g_resume_bdev_module = NULL;
2572 :
2573 68 : bdev_finish_unregister_bdevs_iter(NULL, 0);
2574 68 : }
2575 :
2576 : void
2577 0 : spdk_bdev_module_fini_start_done(void)
2578 : {
2579 0 : if (spdk_get_thread() != g_fini_thread) {
2580 0 : spdk_thread_send_msg(g_fini_thread, bdev_module_fini_start_iter, NULL);
2581 0 : } else {
2582 0 : bdev_module_fini_start_iter(NULL);
2583 : }
2584 0 : }
2585 :
2586 : static void
2587 68 : bdev_finish_wait_for_examine_done(void *cb_arg)
2588 : {
2589 68 : bdev_module_fini_start_iter(NULL);
2590 68 : }
2591 :
2592 : static void bdev_open_async_fini(void);
2593 :
2594 : void
2595 68 : spdk_bdev_finish(spdk_bdev_fini_cb cb_fn, void *cb_arg)
2596 : {
2597 : int rc;
2598 :
2599 68 : assert(cb_fn != NULL);
2600 :
2601 68 : g_fini_thread = spdk_get_thread();
2602 :
2603 68 : g_fini_cb_fn = cb_fn;
2604 68 : g_fini_cb_arg = cb_arg;
2605 :
2606 68 : bdev_open_async_fini();
2607 :
2608 68 : rc = spdk_bdev_wait_for_examine(bdev_finish_wait_for_examine_done, NULL);
2609 68 : if (rc != 0) {
2610 0 : SPDK_ERRLOG("wait_for_examine failed: %s\n", spdk_strerror(-rc));
2611 0 : bdev_finish_wait_for_examine_done(NULL);
2612 0 : }
2613 68 : }
2614 :
2615 : struct spdk_bdev_io *
2616 699 : bdev_channel_get_io(struct spdk_bdev_channel *channel)
2617 : {
2618 699 : struct spdk_bdev_mgmt_channel *ch = channel->shared_resource->mgmt_ch;
2619 : struct spdk_bdev_io *bdev_io;
2620 :
2621 699 : if (ch->per_thread_cache_count > 0) {
2622 639 : bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
2623 639 : STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
2624 639 : ch->per_thread_cache_count--;
2625 699 : } else if (spdk_unlikely(!TAILQ_EMPTY(&ch->io_wait_queue))) {
2626 : /*
2627 : * Don't try to look for bdev_ios in the global pool if there are
2628 : * waiters on bdev_ios - we don't want this caller to jump the line.
2629 : */
2630 0 : bdev_io = NULL;
2631 0 : } else {
2632 60 : bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
2633 : }
2634 :
2635 699 : return bdev_io;
2636 : }
2637 :
2638 : void
2639 693 : spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
2640 : {
2641 : struct spdk_bdev_mgmt_channel *ch;
2642 :
2643 693 : assert(bdev_io != NULL);
2644 693 : assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING);
2645 :
2646 693 : ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
2647 :
2648 693 : if (bdev_io->internal.f.has_buf) {
2649 16 : bdev_io_put_buf(bdev_io);
2650 16 : }
2651 :
2652 693 : if (ch->per_thread_cache_count < ch->bdev_io_cache_size) {
2653 639 : ch->per_thread_cache_count++;
2654 639 : STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
2655 643 : while (ch->per_thread_cache_count > 0 && !TAILQ_EMPTY(&ch->io_wait_queue)) {
2656 : struct spdk_bdev_io_wait_entry *entry;
2657 :
2658 4 : entry = TAILQ_FIRST(&ch->io_wait_queue);
2659 4 : TAILQ_REMOVE(&ch->io_wait_queue, entry, link);
2660 4 : entry->cb_fn(entry->cb_arg);
2661 : }
2662 639 : } else {
2663 : /* We should never have a full cache with entries on the io wait queue. */
2664 54 : assert(TAILQ_EMPTY(&ch->io_wait_queue));
2665 54 : spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
2666 : }
2667 693 : }
2668 :
2669 : static bool
2670 72 : bdev_qos_is_iops_rate_limit(enum spdk_bdev_qos_rate_limit_type limit)
2671 : {
2672 72 : assert(limit != SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
2673 :
2674 72 : switch (limit) {
2675 : case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
2676 18 : return true;
2677 : case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
2678 : case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
2679 : case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
2680 54 : return false;
2681 0 : case SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES:
2682 : default:
2683 0 : return false;
2684 : }
2685 72 : }
2686 :
2687 : static bool
2688 25 : bdev_qos_io_to_limit(struct spdk_bdev_io *bdev_io)
2689 : {
2690 25 : switch (bdev_io->type) {
2691 : case SPDK_BDEV_IO_TYPE_NVME_IO:
2692 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2693 : case SPDK_BDEV_IO_TYPE_READ:
2694 : case SPDK_BDEV_IO_TYPE_WRITE:
2695 23 : return true;
2696 : case SPDK_BDEV_IO_TYPE_ZCOPY:
2697 0 : if (bdev_io->u.bdev.zcopy.start) {
2698 0 : return true;
2699 : } else {
2700 0 : return false;
2701 : }
2702 : default:
2703 2 : return false;
2704 : }
2705 25 : }
2706 :
2707 : static bool
2708 33 : bdev_is_read_io(struct spdk_bdev_io *bdev_io)
2709 : {
2710 33 : switch (bdev_io->type) {
2711 : case SPDK_BDEV_IO_TYPE_NVME_IO:
2712 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2713 : /* Bit 1 (0x2) set for read operation */
2714 0 : if (bdev_io->u.nvme_passthru.cmd.opc & SPDK_NVME_OPC_READ) {
2715 0 : return true;
2716 : } else {
2717 0 : return false;
2718 : }
2719 : case SPDK_BDEV_IO_TYPE_READ:
2720 30 : return true;
2721 : case SPDK_BDEV_IO_TYPE_ZCOPY:
2722 : /* Populate to read from disk */
2723 0 : if (bdev_io->u.bdev.zcopy.populate) {
2724 0 : return true;
2725 : } else {
2726 0 : return false;
2727 : }
2728 : default:
2729 3 : return false;
2730 : }
2731 33 : }
2732 :
2733 : static uint64_t
2734 43 : bdev_get_io_size_in_byte(struct spdk_bdev_io *bdev_io)
2735 : {
2736 43 : uint32_t blocklen = bdev_io_get_block_size(bdev_io);
2737 :
2738 43 : switch (bdev_io->type) {
2739 : case SPDK_BDEV_IO_TYPE_NVME_IO:
2740 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2741 0 : return bdev_io->u.nvme_passthru.nbytes;
2742 : case SPDK_BDEV_IO_TYPE_READ:
2743 : case SPDK_BDEV_IO_TYPE_WRITE:
2744 43 : return bdev_io->u.bdev.num_blocks * blocklen;
2745 : case SPDK_BDEV_IO_TYPE_ZCOPY:
2746 : /* Track the data in the start phase only */
2747 0 : if (bdev_io->u.bdev.zcopy.start) {
2748 0 : return bdev_io->u.bdev.num_blocks * blocklen;
2749 : } else {
2750 0 : return 0;
2751 : }
2752 : default:
2753 0 : return 0;
2754 : }
2755 43 : }
2756 :
2757 : static inline bool
2758 64 : bdev_qos_rw_queue_io(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io, uint64_t delta)
2759 : {
2760 : int64_t remaining_this_timeslice;
2761 :
2762 64 : if (!limit->max_per_timeslice) {
2763 : /* The QoS is disabled */
2764 0 : return false;
2765 : }
2766 :
2767 64 : remaining_this_timeslice = __atomic_sub_fetch(&limit->remaining_this_timeslice, delta,
2768 : __ATOMIC_RELAXED);
2769 64 : if (remaining_this_timeslice + (int64_t)delta > 0) {
2770 : /* There was still a quota for this delta -> the IO shouldn't be queued
2771 : *
2772 : * We allow a slight quota overrun here so an IO bigger than the per-timeslice
2773 : * quota can be allowed once a while. Such overrun then taken into account in
2774 : * the QoS poller, where the next timeslice quota is calculated.
2775 : */
2776 59 : return false;
2777 : }
2778 :
2779 : /* There was no quota for this delta -> the IO should be queued
2780 : * The remaining_this_timeslice must be rewinded so it reflects the real
2781 : * amount of IOs or bytes allowed.
2782 : */
2783 5 : __atomic_add_fetch(
2784 5 : &limit->remaining_this_timeslice, delta, __ATOMIC_RELAXED);
2785 5 : return true;
2786 64 : }
2787 :
2788 : static inline void
2789 5 : bdev_qos_rw_rewind_io(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io, uint64_t delta)
2790 : {
2791 5 : __atomic_add_fetch(&limit->remaining_this_timeslice, delta, __ATOMIC_RELAXED);
2792 5 : }
2793 :
2794 : static bool
2795 23 : bdev_qos_rw_iops_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2796 : {
2797 23 : return bdev_qos_rw_queue_io(limit, io, 1);
2798 : }
2799 :
2800 : static void
2801 3 : bdev_qos_rw_iops_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2802 : {
2803 3 : bdev_qos_rw_rewind_io(limit, io, 1);
2804 3 : }
2805 :
2806 : static bool
2807 41 : bdev_qos_rw_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2808 : {
2809 41 : return bdev_qos_rw_queue_io(limit, io, bdev_get_io_size_in_byte(io));
2810 : }
2811 :
2812 : static void
2813 2 : bdev_qos_rw_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2814 : {
2815 2 : bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2816 2 : }
2817 :
2818 : static bool
2819 19 : bdev_qos_r_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2820 : {
2821 19 : if (bdev_is_read_io(io) == false) {
2822 1 : return false;
2823 : }
2824 :
2825 18 : return bdev_qos_rw_bps_queue(limit, io);
2826 19 : }
2827 :
2828 : static void
2829 0 : bdev_qos_r_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2830 : {
2831 0 : if (bdev_is_read_io(io) != false) {
2832 0 : bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2833 0 : }
2834 0 : }
2835 :
2836 : static bool
2837 14 : bdev_qos_w_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2838 : {
2839 14 : if (bdev_is_read_io(io) == true) {
2840 12 : return false;
2841 : }
2842 :
2843 2 : return bdev_qos_rw_bps_queue(limit, io);
2844 14 : }
2845 :
2846 : static void
2847 0 : bdev_qos_w_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2848 : {
2849 0 : if (bdev_is_read_io(io) != true) {
2850 0 : bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2851 0 : }
2852 0 : }
2853 :
2854 : static void
2855 10 : bdev_qos_set_ops(struct spdk_bdev_qos *qos)
2856 : {
2857 : int i;
2858 :
2859 50 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2860 40 : if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
2861 15 : qos->rate_limits[i].queue_io = NULL;
2862 15 : continue;
2863 : }
2864 :
2865 25 : switch (i) {
2866 : case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
2867 9 : qos->rate_limits[i].queue_io = bdev_qos_rw_iops_queue;
2868 9 : qos->rate_limits[i].rewind_quota = bdev_qos_rw_iops_rewind_quota;
2869 9 : break;
2870 : case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
2871 7 : qos->rate_limits[i].queue_io = bdev_qos_rw_bps_queue;
2872 7 : qos->rate_limits[i].rewind_quota = bdev_qos_rw_bps_rewind_quota;
2873 7 : break;
2874 : case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
2875 5 : qos->rate_limits[i].queue_io = bdev_qos_r_bps_queue;
2876 5 : qos->rate_limits[i].rewind_quota = bdev_qos_r_bps_rewind_quota;
2877 5 : break;
2878 : case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
2879 4 : qos->rate_limits[i].queue_io = bdev_qos_w_bps_queue;
2880 4 : qos->rate_limits[i].rewind_quota = bdev_qos_w_bps_rewind_quota;
2881 4 : break;
2882 : default:
2883 0 : break;
2884 : }
2885 25 : }
2886 10 : }
2887 :
2888 : static void
2889 6 : _bdev_io_complete_in_submit(struct spdk_bdev_channel *bdev_ch,
2890 : struct spdk_bdev_io *bdev_io,
2891 : enum spdk_bdev_io_status status)
2892 : {
2893 6 : bdev_io->internal.f.in_submit_request = true;
2894 6 : bdev_io_increment_outstanding(bdev_ch, bdev_ch->shared_resource);
2895 6 : spdk_bdev_io_complete(bdev_io, status);
2896 6 : bdev_io->internal.f.in_submit_request = false;
2897 6 : }
2898 :
2899 : static inline void
2900 574 : bdev_io_do_submit(struct spdk_bdev_channel *bdev_ch, struct spdk_bdev_io *bdev_io)
2901 : {
2902 574 : struct spdk_bdev *bdev = bdev_io->bdev;
2903 574 : struct spdk_io_channel *ch = bdev_ch->channel;
2904 574 : struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
2905 :
2906 574 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
2907 16 : struct spdk_bdev_mgmt_channel *mgmt_channel = shared_resource->mgmt_ch;
2908 16 : struct spdk_bdev_io *bio_to_abort = bdev_io->u.abort.bio_to_abort;
2909 :
2910 16 : if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort) ||
2911 16 : bdev_abort_buf_io(mgmt_channel, bio_to_abort)) {
2912 0 : _bdev_io_complete_in_submit(bdev_ch, bdev_io,
2913 : SPDK_BDEV_IO_STATUS_SUCCESS);
2914 0 : return;
2915 : }
2916 16 : }
2917 :
2918 574 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE &&
2919 : bdev_io->bdev->split_on_write_unit &&
2920 : bdev_io->u.bdev.num_blocks < bdev_io->bdev->write_unit_size)) {
2921 4 : SPDK_ERRLOG("IO num_blocks %lu does not match the write_unit_size %u\n",
2922 : bdev_io->u.bdev.num_blocks, bdev_io->bdev->write_unit_size);
2923 4 : _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
2924 4 : return;
2925 : }
2926 :
2927 570 : if (spdk_likely(TAILQ_EMPTY(&shared_resource->nomem_io))) {
2928 527 : bdev_io_increment_outstanding(bdev_ch, shared_resource);
2929 527 : bdev_io->internal.f.in_submit_request = true;
2930 527 : bdev_submit_request(bdev, ch, bdev_io);
2931 527 : bdev_io->internal.f.in_submit_request = false;
2932 527 : } else {
2933 43 : bdev_queue_nomem_io_tail(shared_resource, bdev_io, BDEV_IO_RETRY_STATE_SUBMIT);
2934 43 : if (shared_resource->nomem_threshold == 0 && shared_resource->io_outstanding == 0) {
2935 : /* Special case when we have nomem IOs and no outstanding IOs which completions
2936 : * could trigger retry of queued IOs */
2937 0 : bdev_shared_ch_retry_io(shared_resource);
2938 0 : }
2939 : }
2940 574 : }
2941 :
2942 : static bool
2943 25 : bdev_qos_queue_io(struct spdk_bdev_qos *qos, struct spdk_bdev_io *bdev_io)
2944 : {
2945 : int i;
2946 :
2947 25 : if (bdev_qos_io_to_limit(bdev_io) == true) {
2948 100 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2949 82 : if (!qos->rate_limits[i].queue_io) {
2950 5 : continue;
2951 : }
2952 :
2953 231 : if (qos->rate_limits[i].queue_io(&qos->rate_limits[i],
2954 154 : bdev_io) == true) {
2955 10 : for (i -= 1; i >= 0 ; i--) {
2956 5 : if (!qos->rate_limits[i].queue_io) {
2957 0 : continue;
2958 : }
2959 :
2960 5 : qos->rate_limits[i].rewind_quota(&qos->rate_limits[i], bdev_io);
2961 5 : }
2962 5 : return true;
2963 : }
2964 72 : }
2965 18 : }
2966 :
2967 20 : return false;
2968 25 : }
2969 :
2970 : static int
2971 27 : bdev_qos_io_submit(struct spdk_bdev_channel *ch, struct spdk_bdev_qos *qos)
2972 : {
2973 27 : struct spdk_bdev_io *bdev_io = NULL, *tmp = NULL;
2974 27 : int submitted_ios = 0;
2975 :
2976 52 : TAILQ_FOREACH_SAFE(bdev_io, &ch->qos_queued_io, internal.link, tmp) {
2977 25 : if (!bdev_qos_queue_io(qos, bdev_io)) {
2978 20 : TAILQ_REMOVE(&ch->qos_queued_io, bdev_io, internal.link);
2979 20 : bdev_io_do_submit(ch, bdev_io);
2980 :
2981 20 : submitted_ios++;
2982 20 : }
2983 25 : }
2984 :
2985 27 : return submitted_ios;
2986 : }
2987 :
2988 : static void
2989 2 : bdev_queue_io_wait_with_cb(struct spdk_bdev_io *bdev_io, spdk_bdev_io_wait_cb cb_fn)
2990 : {
2991 : int rc;
2992 :
2993 2 : bdev_io->internal.waitq_entry.bdev = bdev_io->bdev;
2994 2 : bdev_io->internal.waitq_entry.cb_fn = cb_fn;
2995 2 : bdev_io->internal.waitq_entry.cb_arg = bdev_io;
2996 4 : rc = spdk_bdev_queue_io_wait(bdev_io->bdev, spdk_io_channel_from_ctx(bdev_io->internal.ch),
2997 2 : &bdev_io->internal.waitq_entry);
2998 2 : if (rc != 0) {
2999 0 : SPDK_ERRLOG("Queue IO failed, rc=%d\n", rc);
3000 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3001 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
3002 0 : }
3003 2 : }
3004 :
3005 : static bool
3006 621 : bdev_rw_should_split(struct spdk_bdev_io *bdev_io)
3007 : {
3008 : uint32_t io_boundary;
3009 621 : struct spdk_bdev *bdev = bdev_io->bdev;
3010 621 : uint32_t max_segment_size = bdev->max_segment_size;
3011 621 : uint32_t max_size = bdev->max_rw_size;
3012 621 : int max_segs = bdev->max_num_segments;
3013 :
3014 621 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE && bdev->split_on_write_unit) {
3015 24 : io_boundary = bdev->write_unit_size;
3016 621 : } else if (bdev->split_on_optimal_io_boundary) {
3017 168 : io_boundary = bdev->optimal_io_boundary;
3018 168 : } else {
3019 429 : io_boundary = 0;
3020 : }
3021 :
3022 621 : if (spdk_likely(!io_boundary && !max_segs && !max_segment_size && !max_size)) {
3023 243 : return false;
3024 : }
3025 :
3026 378 : if (io_boundary) {
3027 : uint64_t start_stripe, end_stripe;
3028 :
3029 192 : start_stripe = bdev_io->u.bdev.offset_blocks;
3030 192 : end_stripe = start_stripe + bdev_io->u.bdev.num_blocks - 1;
3031 : /* Avoid expensive div operations if possible. These spdk_u32 functions are very cheap. */
3032 192 : if (spdk_likely(spdk_u32_is_pow2(io_boundary))) {
3033 192 : start_stripe >>= spdk_u32log2(io_boundary);
3034 192 : end_stripe >>= spdk_u32log2(io_boundary);
3035 192 : } else {
3036 0 : start_stripe /= io_boundary;
3037 0 : end_stripe /= io_boundary;
3038 : }
3039 :
3040 192 : if (start_stripe != end_stripe) {
3041 75 : return true;
3042 : }
3043 117 : }
3044 :
3045 303 : if (max_segs) {
3046 150 : if (bdev_io->u.bdev.iovcnt > max_segs) {
3047 15 : return true;
3048 : }
3049 135 : }
3050 :
3051 288 : if (max_segment_size) {
3052 470 : for (int i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
3053 346 : if (bdev_io->u.bdev.iovs[i].iov_len > max_segment_size) {
3054 12 : return true;
3055 : }
3056 334 : }
3057 124 : }
3058 :
3059 276 : if (max_size) {
3060 52 : if (bdev_io->u.bdev.num_blocks > max_size) {
3061 7 : return true;
3062 : }
3063 45 : }
3064 :
3065 269 : return false;
3066 621 : }
3067 :
3068 : static bool
3069 24 : bdev_unmap_should_split(struct spdk_bdev_io *bdev_io)
3070 : {
3071 : uint32_t num_unmap_segments;
3072 :
3073 24 : if (!bdev_io->bdev->max_unmap || !bdev_io->bdev->max_unmap_segments) {
3074 3 : return false;
3075 : }
3076 21 : num_unmap_segments = spdk_divide_round_up(bdev_io->u.bdev.num_blocks, bdev_io->bdev->max_unmap);
3077 21 : if (num_unmap_segments > bdev_io->bdev->max_unmap_segments) {
3078 4 : return true;
3079 : }
3080 :
3081 17 : return false;
3082 24 : }
3083 :
3084 : static bool
3085 37 : bdev_write_zeroes_should_split(struct spdk_bdev_io *bdev_io)
3086 : {
3087 37 : if (!bdev_io->bdev->max_write_zeroes) {
3088 4 : return false;
3089 : }
3090 :
3091 33 : if (bdev_io->u.bdev.num_blocks > bdev_io->bdev->max_write_zeroes) {
3092 10 : return true;
3093 : }
3094 :
3095 23 : return false;
3096 37 : }
3097 :
3098 : static bool
3099 30 : bdev_copy_should_split(struct spdk_bdev_io *bdev_io)
3100 : {
3101 30 : if (bdev_io->bdev->max_copy != 0 &&
3102 25 : bdev_io->u.bdev.num_blocks > bdev_io->bdev->max_copy) {
3103 6 : return true;
3104 : }
3105 :
3106 24 : return false;
3107 30 : }
3108 :
3109 : static bool
3110 794 : bdev_io_should_split(struct spdk_bdev_io *bdev_io)
3111 : {
3112 794 : switch (bdev_io->type) {
3113 : case SPDK_BDEV_IO_TYPE_READ:
3114 : case SPDK_BDEV_IO_TYPE_WRITE:
3115 621 : return bdev_rw_should_split(bdev_io);
3116 : case SPDK_BDEV_IO_TYPE_UNMAP:
3117 24 : return bdev_unmap_should_split(bdev_io);
3118 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3119 37 : return bdev_write_zeroes_should_split(bdev_io);
3120 : case SPDK_BDEV_IO_TYPE_COPY:
3121 30 : return bdev_copy_should_split(bdev_io);
3122 : default:
3123 82 : return false;
3124 : }
3125 794 : }
3126 :
3127 : static uint32_t
3128 249 : _to_next_boundary(uint64_t offset, uint32_t boundary)
3129 : {
3130 249 : return (boundary - (offset % boundary));
3131 : }
3132 :
3133 : static void bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
3134 :
3135 : static void _bdev_rw_split(void *_bdev_io);
3136 :
3137 : static void bdev_unmap_split(struct spdk_bdev_io *bdev_io);
3138 :
3139 : static void
3140 0 : _bdev_unmap_split(void *_bdev_io)
3141 : {
3142 0 : return bdev_unmap_split((struct spdk_bdev_io *)_bdev_io);
3143 : }
3144 :
3145 : static void bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io);
3146 :
3147 : static void
3148 0 : _bdev_write_zeroes_split(void *_bdev_io)
3149 : {
3150 0 : return bdev_write_zeroes_split((struct spdk_bdev_io *)_bdev_io);
3151 : }
3152 :
3153 : static void bdev_copy_split(struct spdk_bdev_io *bdev_io);
3154 :
3155 : static void
3156 0 : _bdev_copy_split(void *_bdev_io)
3157 : {
3158 0 : return bdev_copy_split((struct spdk_bdev_io *)_bdev_io);
3159 : }
3160 :
3161 : static int
3162 305 : bdev_io_split_submit(struct spdk_bdev_io *bdev_io, struct iovec *iov, int iovcnt, void *md_buf,
3163 : uint64_t num_blocks, uint64_t *offset, uint64_t *remaining)
3164 : {
3165 : int rc;
3166 : uint64_t current_offset, current_remaining, current_src_offset;
3167 : spdk_bdev_io_wait_cb io_wait_fn;
3168 :
3169 305 : current_offset = *offset;
3170 305 : current_remaining = *remaining;
3171 :
3172 305 : assert(bdev_io->internal.f.split);
3173 :
3174 305 : bdev_io->internal.split.outstanding++;
3175 :
3176 305 : io_wait_fn = _bdev_rw_split;
3177 305 : switch (bdev_io->type) {
3178 : case SPDK_BDEV_IO_TYPE_READ:
3179 196 : assert(bdev_io->u.bdev.accel_sequence == NULL);
3180 392 : rc = bdev_readv_blocks_with_md(bdev_io->internal.desc,
3181 196 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3182 196 : iov, iovcnt, md_buf, current_offset,
3183 196 : num_blocks,
3184 196 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
3185 196 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
3186 : NULL,
3187 196 : bdev_io->u.bdev.dif_check_flags,
3188 196 : bdev_io_split_done, bdev_io);
3189 196 : break;
3190 : case SPDK_BDEV_IO_TYPE_WRITE:
3191 50 : assert(bdev_io->u.bdev.accel_sequence == NULL);
3192 100 : rc = bdev_writev_blocks_with_md(bdev_io->internal.desc,
3193 50 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3194 50 : iov, iovcnt, md_buf, current_offset,
3195 50 : num_blocks,
3196 50 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
3197 50 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
3198 : NULL,
3199 50 : bdev_io->u.bdev.dif_check_flags,
3200 50 : bdev_io->u.bdev.nvme_cdw12.raw,
3201 50 : bdev_io->u.bdev.nvme_cdw13.raw,
3202 50 : bdev_io_split_done, bdev_io);
3203 50 : break;
3204 : case SPDK_BDEV_IO_TYPE_UNMAP:
3205 17 : io_wait_fn = _bdev_unmap_split;
3206 34 : rc = spdk_bdev_unmap_blocks(bdev_io->internal.desc,
3207 17 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3208 17 : current_offset, num_blocks,
3209 17 : bdev_io_split_done, bdev_io);
3210 17 : break;
3211 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3212 23 : io_wait_fn = _bdev_write_zeroes_split;
3213 46 : rc = spdk_bdev_write_zeroes_blocks(bdev_io->internal.desc,
3214 23 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3215 23 : current_offset, num_blocks,
3216 23 : bdev_io_split_done, bdev_io);
3217 23 : break;
3218 : case SPDK_BDEV_IO_TYPE_COPY:
3219 19 : io_wait_fn = _bdev_copy_split;
3220 38 : current_src_offset = bdev_io->u.bdev.copy.src_offset_blocks +
3221 19 : (current_offset - bdev_io->u.bdev.offset_blocks);
3222 38 : rc = spdk_bdev_copy_blocks(bdev_io->internal.desc,
3223 19 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3224 19 : current_offset, current_src_offset, num_blocks,
3225 19 : bdev_io_split_done, bdev_io);
3226 19 : break;
3227 : default:
3228 0 : assert(false);
3229 : rc = -EINVAL;
3230 : break;
3231 : }
3232 :
3233 305 : if (rc == 0) {
3234 301 : current_offset += num_blocks;
3235 301 : current_remaining -= num_blocks;
3236 301 : bdev_io->internal.split.current_offset_blocks = current_offset;
3237 301 : bdev_io->internal.split.remaining_num_blocks = current_remaining;
3238 301 : *offset = current_offset;
3239 301 : *remaining = current_remaining;
3240 301 : } else {
3241 4 : bdev_io->internal.split.outstanding--;
3242 4 : if (rc == -ENOMEM) {
3243 4 : if (bdev_io->internal.split.outstanding == 0) {
3244 : /* No I/O is outstanding. Hence we should wait here. */
3245 1 : bdev_queue_io_wait_with_cb(bdev_io, io_wait_fn);
3246 1 : }
3247 4 : } else {
3248 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3249 0 : if (bdev_io->internal.split.outstanding == 0) {
3250 0 : bdev_ch_remove_from_io_submitted(bdev_io);
3251 0 : spdk_trace_record(TRACE_BDEV_IO_DONE, bdev_io->internal.ch->trace_id,
3252 : 0, (uintptr_t)bdev_io, bdev_io->internal.caller_ctx,
3253 : bdev_io->internal.ch->queue_depth);
3254 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
3255 0 : }
3256 : }
3257 : }
3258 :
3259 305 : return rc;
3260 : }
3261 :
3262 : static void
3263 67 : _bdev_rw_split(void *_bdev_io)
3264 : {
3265 : struct iovec *parent_iov, *iov;
3266 67 : struct spdk_bdev_io *bdev_io = _bdev_io;
3267 67 : struct spdk_bdev *bdev = bdev_io->bdev;
3268 : uint64_t parent_offset, current_offset, remaining;
3269 : uint32_t parent_iov_offset, parent_iovcnt, parent_iovpos, child_iovcnt;
3270 : uint32_t to_next_boundary, to_next_boundary_bytes, to_last_block_bytes;
3271 : uint32_t iovcnt, iov_len, child_iovsize;
3272 : uint32_t blocklen;
3273 : uint32_t io_boundary;
3274 67 : uint32_t max_segment_size = bdev->max_segment_size;
3275 67 : uint32_t max_child_iovcnt = bdev->max_num_segments;
3276 67 : uint32_t max_size = bdev->max_rw_size;
3277 67 : void *md_buf = NULL;
3278 : int rc;
3279 :
3280 67 : blocklen = bdev_io_get_block_size(bdev_io);
3281 :
3282 67 : max_size = max_size ? max_size : UINT32_MAX;
3283 67 : max_segment_size = max_segment_size ? max_segment_size : UINT32_MAX;
3284 67 : max_child_iovcnt = max_child_iovcnt ? spdk_min(max_child_iovcnt, SPDK_BDEV_IO_NUM_CHILD_IOV) :
3285 : SPDK_BDEV_IO_NUM_CHILD_IOV;
3286 :
3287 67 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE && bdev->split_on_write_unit) {
3288 5 : io_boundary = bdev->write_unit_size;
3289 67 : } else if (bdev->split_on_optimal_io_boundary) {
3290 40 : io_boundary = bdev->optimal_io_boundary;
3291 40 : } else {
3292 22 : io_boundary = UINT32_MAX;
3293 : }
3294 :
3295 67 : assert(bdev_io->internal.f.split);
3296 :
3297 67 : remaining = bdev_io->internal.split.remaining_num_blocks;
3298 67 : current_offset = bdev_io->internal.split.current_offset_blocks;
3299 67 : parent_offset = bdev_io->u.bdev.offset_blocks;
3300 67 : parent_iov_offset = (current_offset - parent_offset) * blocklen;
3301 67 : parent_iovcnt = bdev_io->u.bdev.iovcnt;
3302 :
3303 420 : for (parent_iovpos = 0; parent_iovpos < parent_iovcnt; parent_iovpos++) {
3304 420 : parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
3305 420 : if (parent_iov_offset < parent_iov->iov_len) {
3306 67 : break;
3307 : }
3308 353 : parent_iov_offset -= parent_iov->iov_len;
3309 353 : }
3310 :
3311 67 : child_iovcnt = 0;
3312 573 : while (remaining > 0 && parent_iovpos < parent_iovcnt &&
3313 264 : child_iovcnt < SPDK_BDEV_IO_NUM_CHILD_IOV) {
3314 249 : to_next_boundary = _to_next_boundary(current_offset, io_boundary);
3315 249 : to_next_boundary = spdk_min(remaining, to_next_boundary);
3316 249 : to_next_boundary = spdk_min(max_size, to_next_boundary);
3317 249 : to_next_boundary_bytes = to_next_boundary * blocklen;
3318 :
3319 249 : iov = &bdev_io->child_iov[child_iovcnt];
3320 249 : iovcnt = 0;
3321 :
3322 249 : if (bdev_io->u.bdev.md_buf) {
3323 48 : md_buf = (char *)bdev_io->u.bdev.md_buf +
3324 24 : (current_offset - parent_offset) * spdk_bdev_get_md_size(bdev);
3325 24 : }
3326 :
3327 249 : child_iovsize = spdk_min(SPDK_BDEV_IO_NUM_CHILD_IOV - child_iovcnt, max_child_iovcnt);
3328 1810 : while (to_next_boundary_bytes > 0 && parent_iovpos < parent_iovcnt &&
3329 836 : iovcnt < child_iovsize) {
3330 725 : parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
3331 725 : iov_len = parent_iov->iov_len - parent_iov_offset;
3332 :
3333 725 : iov_len = spdk_min(iov_len, max_segment_size);
3334 725 : iov_len = spdk_min(iov_len, to_next_boundary_bytes);
3335 725 : to_next_boundary_bytes -= iov_len;
3336 :
3337 725 : bdev_io->child_iov[child_iovcnt].iov_base = parent_iov->iov_base + parent_iov_offset;
3338 725 : bdev_io->child_iov[child_iovcnt].iov_len = iov_len;
3339 :
3340 725 : if (iov_len < parent_iov->iov_len - parent_iov_offset) {
3341 183 : parent_iov_offset += iov_len;
3342 183 : } else {
3343 542 : parent_iovpos++;
3344 542 : parent_iov_offset = 0;
3345 : }
3346 725 : child_iovcnt++;
3347 725 : iovcnt++;
3348 : }
3349 :
3350 249 : if (to_next_boundary_bytes > 0) {
3351 : /* We had to stop this child I/O early because we ran out of
3352 : * child_iov space or were limited by max_num_segments.
3353 : * Ensure the iovs to be aligned with block size and
3354 : * then adjust to_next_boundary before starting the
3355 : * child I/O.
3356 : */
3357 111 : assert(child_iovcnt == SPDK_BDEV_IO_NUM_CHILD_IOV ||
3358 : iovcnt == child_iovsize);
3359 111 : to_last_block_bytes = to_next_boundary_bytes % blocklen;
3360 111 : if (to_last_block_bytes != 0) {
3361 24 : uint32_t child_iovpos = child_iovcnt - 1;
3362 : /* don't decrease child_iovcnt when it equals to SPDK_BDEV_IO_NUM_CHILD_IOV
3363 : * so the loop will naturally end
3364 : */
3365 :
3366 24 : to_last_block_bytes = blocklen - to_last_block_bytes;
3367 24 : to_next_boundary_bytes += to_last_block_bytes;
3368 53 : while (to_last_block_bytes > 0 && iovcnt > 0) {
3369 32 : iov_len = spdk_min(to_last_block_bytes,
3370 : bdev_io->child_iov[child_iovpos].iov_len);
3371 32 : bdev_io->child_iov[child_iovpos].iov_len -= iov_len;
3372 32 : if (bdev_io->child_iov[child_iovpos].iov_len == 0) {
3373 15 : child_iovpos--;
3374 15 : if (--iovcnt == 0) {
3375 : /* If the child IO is less than a block size just return.
3376 : * If the first child IO of any split round is less than
3377 : * a block size, an error exit.
3378 : */
3379 3 : if (bdev_io->internal.split.outstanding == 0) {
3380 1 : SPDK_ERRLOG("The first child io was less than a block size\n");
3381 1 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3382 1 : bdev_ch_remove_from_io_submitted(bdev_io);
3383 1 : spdk_trace_record(TRACE_BDEV_IO_DONE, bdev_io->internal.ch->trace_id,
3384 : 0, (uintptr_t)bdev_io, bdev_io->internal.caller_ctx,
3385 : bdev_io->internal.ch->queue_depth);
3386 1 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
3387 1 : }
3388 :
3389 3 : return;
3390 : }
3391 12 : }
3392 :
3393 29 : to_last_block_bytes -= iov_len;
3394 :
3395 29 : if (parent_iov_offset == 0) {
3396 14 : parent_iovpos--;
3397 14 : parent_iov_offset = bdev_io->u.bdev.iovs[parent_iovpos].iov_len;
3398 14 : }
3399 29 : parent_iov_offset -= iov_len;
3400 : }
3401 :
3402 21 : assert(to_last_block_bytes == 0);
3403 21 : }
3404 108 : to_next_boundary -= to_next_boundary_bytes / blocklen;
3405 108 : }
3406 :
3407 246 : rc = bdev_io_split_submit(bdev_io, iov, iovcnt, md_buf, to_next_boundary,
3408 : ¤t_offset, &remaining);
3409 246 : if (spdk_unlikely(rc)) {
3410 4 : return;
3411 : }
3412 : }
3413 67 : }
3414 :
3415 : static void
3416 3 : bdev_unmap_split(struct spdk_bdev_io *bdev_io)
3417 : {
3418 : uint64_t offset, unmap_blocks, remaining, max_unmap_blocks;
3419 3 : uint32_t num_children_reqs = 0;
3420 : int rc;
3421 :
3422 3 : assert(bdev_io->internal.f.split);
3423 :
3424 3 : offset = bdev_io->internal.split.current_offset_blocks;
3425 3 : remaining = bdev_io->internal.split.remaining_num_blocks;
3426 3 : max_unmap_blocks = bdev_io->bdev->max_unmap * bdev_io->bdev->max_unmap_segments;
3427 :
3428 20 : while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS)) {
3429 17 : unmap_blocks = spdk_min(remaining, max_unmap_blocks);
3430 :
3431 17 : rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, unmap_blocks,
3432 : &offset, &remaining);
3433 17 : if (spdk_likely(rc == 0)) {
3434 17 : num_children_reqs++;
3435 17 : } else {
3436 0 : return;
3437 : }
3438 : }
3439 3 : }
3440 :
3441 : static void
3442 6 : bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io)
3443 : {
3444 : uint64_t offset, write_zeroes_blocks, remaining;
3445 6 : uint32_t num_children_reqs = 0;
3446 : int rc;
3447 :
3448 6 : assert(bdev_io->internal.f.split);
3449 :
3450 6 : offset = bdev_io->internal.split.current_offset_blocks;
3451 6 : remaining = bdev_io->internal.split.remaining_num_blocks;
3452 :
3453 29 : while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS)) {
3454 23 : write_zeroes_blocks = spdk_min(remaining, bdev_io->bdev->max_write_zeroes);
3455 :
3456 23 : rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, write_zeroes_blocks,
3457 : &offset, &remaining);
3458 23 : if (spdk_likely(rc == 0)) {
3459 23 : num_children_reqs++;
3460 23 : } else {
3461 0 : return;
3462 : }
3463 : }
3464 6 : }
3465 :
3466 : static void
3467 4 : bdev_copy_split(struct spdk_bdev_io *bdev_io)
3468 : {
3469 : uint64_t offset, copy_blocks, remaining;
3470 4 : uint32_t num_children_reqs = 0;
3471 : int rc;
3472 :
3473 4 : assert(bdev_io->internal.f.split);
3474 :
3475 4 : offset = bdev_io->internal.split.current_offset_blocks;
3476 4 : remaining = bdev_io->internal.split.remaining_num_blocks;
3477 :
3478 4 : assert(bdev_io->bdev->max_copy != 0);
3479 23 : while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_COPY_REQS)) {
3480 19 : copy_blocks = spdk_min(remaining, bdev_io->bdev->max_copy);
3481 :
3482 19 : rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, copy_blocks,
3483 : &offset, &remaining);
3484 19 : if (spdk_likely(rc == 0)) {
3485 19 : num_children_reqs++;
3486 19 : } else {
3487 0 : return;
3488 : }
3489 : }
3490 4 : }
3491 :
3492 : static void
3493 58 : parent_bdev_io_complete(void *ctx, int rc)
3494 : {
3495 58 : struct spdk_bdev_io *parent_io = ctx;
3496 :
3497 58 : if (rc) {
3498 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3499 0 : }
3500 :
3501 116 : parent_io->internal.cb(parent_io, parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
3502 58 : parent_io->internal.caller_ctx);
3503 58 : }
3504 :
3505 : static void
3506 0 : bdev_io_complete_parent_sequence_cb(void *ctx, int status)
3507 : {
3508 0 : struct spdk_bdev_io *bdev_io = ctx;
3509 :
3510 : /* u.bdev.accel_sequence should have already been cleared at this point */
3511 0 : assert(bdev_io->u.bdev.accel_sequence == NULL);
3512 0 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3513 0 : bdev_io->internal.f.has_accel_sequence = false;
3514 :
3515 0 : if (spdk_unlikely(status != 0)) {
3516 0 : SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
3517 0 : }
3518 :
3519 0 : parent_bdev_io_complete(bdev_io, status);
3520 0 : }
3521 :
3522 : static void
3523 301 : bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
3524 : {
3525 301 : struct spdk_bdev_io *parent_io = cb_arg;
3526 :
3527 301 : spdk_bdev_free_io(bdev_io);
3528 :
3529 301 : assert(parent_io->internal.f.split);
3530 :
3531 301 : if (!success) {
3532 21 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3533 : /* If any child I/O failed, stop further splitting process. */
3534 21 : parent_io->internal.split.current_offset_blocks += parent_io->internal.split.remaining_num_blocks;
3535 21 : parent_io->internal.split.remaining_num_blocks = 0;
3536 21 : }
3537 301 : parent_io->internal.split.outstanding--;
3538 301 : if (parent_io->internal.split.outstanding != 0) {
3539 223 : return;
3540 : }
3541 :
3542 : /*
3543 : * Parent I/O finishes when all blocks are consumed.
3544 : */
3545 78 : if (parent_io->internal.split.remaining_num_blocks == 0) {
3546 58 : assert(parent_io->internal.cb != bdev_io_split_done);
3547 58 : bdev_ch_remove_from_io_submitted(parent_io);
3548 58 : spdk_trace_record(TRACE_BDEV_IO_DONE, parent_io->internal.ch->trace_id,
3549 : 0, (uintptr_t)parent_io, bdev_io->internal.caller_ctx,
3550 : parent_io->internal.ch->queue_depth);
3551 :
3552 58 : if (spdk_likely(parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
3553 48 : if (bdev_io_needs_sequence_exec(parent_io->internal.desc, parent_io)) {
3554 0 : bdev_io_exec_sequence(parent_io, bdev_io_complete_parent_sequence_cb);
3555 0 : return;
3556 48 : } else if (parent_io->internal.f.has_bounce_buf &&
3557 0 : !bdev_io_use_accel_sequence(bdev_io)) {
3558 : /* bdev IO will be completed in the callback */
3559 0 : _bdev_io_push_bounce_data_buffer(parent_io, parent_bdev_io_complete);
3560 0 : return;
3561 : }
3562 48 : }
3563 :
3564 58 : parent_bdev_io_complete(parent_io, 0);
3565 58 : return;
3566 : }
3567 :
3568 : /*
3569 : * Continue with the splitting process. This function will complete the parent I/O if the
3570 : * splitting is done.
3571 : */
3572 20 : switch (parent_io->type) {
3573 : case SPDK_BDEV_IO_TYPE_READ:
3574 : case SPDK_BDEV_IO_TYPE_WRITE:
3575 17 : _bdev_rw_split(parent_io);
3576 17 : break;
3577 : case SPDK_BDEV_IO_TYPE_UNMAP:
3578 1 : bdev_unmap_split(parent_io);
3579 1 : break;
3580 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3581 1 : bdev_write_zeroes_split(parent_io);
3582 1 : break;
3583 : case SPDK_BDEV_IO_TYPE_COPY:
3584 1 : bdev_copy_split(parent_io);
3585 1 : break;
3586 : default:
3587 0 : assert(false);
3588 : break;
3589 : }
3590 301 : }
3591 :
3592 : static void bdev_rw_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
3593 : bool success);
3594 :
3595 : static void
3596 59 : bdev_io_split(struct spdk_bdev_io *bdev_io)
3597 : {
3598 59 : assert(bdev_io_should_split(bdev_io));
3599 59 : assert(bdev_io->internal.f.split);
3600 :
3601 59 : bdev_io->internal.split.current_offset_blocks = bdev_io->u.bdev.offset_blocks;
3602 59 : bdev_io->internal.split.remaining_num_blocks = bdev_io->u.bdev.num_blocks;
3603 59 : bdev_io->internal.split.outstanding = 0;
3604 59 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
3605 :
3606 59 : switch (bdev_io->type) {
3607 : case SPDK_BDEV_IO_TYPE_READ:
3608 : case SPDK_BDEV_IO_TYPE_WRITE:
3609 49 : if (_is_buf_allocated(bdev_io->u.bdev.iovs)) {
3610 49 : _bdev_rw_split(bdev_io);
3611 49 : } else {
3612 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
3613 0 : spdk_bdev_io_get_buf(bdev_io, bdev_rw_split_get_buf_cb,
3614 0 : bdev_io->u.bdev.num_blocks * bdev_io_get_block_size(bdev_io));
3615 : }
3616 49 : break;
3617 : case SPDK_BDEV_IO_TYPE_UNMAP:
3618 2 : bdev_unmap_split(bdev_io);
3619 2 : break;
3620 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3621 5 : bdev_write_zeroes_split(bdev_io);
3622 5 : break;
3623 : case SPDK_BDEV_IO_TYPE_COPY:
3624 3 : bdev_copy_split(bdev_io);
3625 3 : break;
3626 : default:
3627 0 : assert(false);
3628 : break;
3629 : }
3630 59 : }
3631 :
3632 : static void
3633 0 : bdev_rw_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
3634 : {
3635 0 : if (!success) {
3636 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
3637 0 : return;
3638 : }
3639 :
3640 0 : _bdev_rw_split(bdev_io);
3641 0 : }
3642 :
3643 : static inline void
3644 579 : _bdev_io_submit(struct spdk_bdev_io *bdev_io)
3645 : {
3646 579 : struct spdk_bdev *bdev = bdev_io->bdev;
3647 579 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
3648 :
3649 579 : if (spdk_likely(bdev_ch->flags == 0)) {
3650 554 : bdev_io_do_submit(bdev_ch, bdev_io);
3651 554 : return;
3652 : }
3653 :
3654 25 : if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) {
3655 2 : _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
3656 25 : } else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) {
3657 23 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) &&
3658 2 : bdev_abort_queued_io(&bdev_ch->qos_queued_io, bdev_io->u.abort.bio_to_abort)) {
3659 0 : _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
3660 0 : } else {
3661 23 : TAILQ_INSERT_TAIL(&bdev_ch->qos_queued_io, bdev_io, internal.link);
3662 23 : bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
3663 : }
3664 23 : } else {
3665 0 : SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags);
3666 0 : _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
3667 : }
3668 579 : }
3669 :
3670 : bool bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2);
3671 :
3672 : bool
3673 23 : bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2)
3674 : {
3675 23 : if (range1->length == 0 || range2->length == 0) {
3676 1 : return false;
3677 : }
3678 :
3679 22 : if (range1->offset + range1->length <= range2->offset) {
3680 1 : return false;
3681 : }
3682 :
3683 21 : if (range2->offset + range2->length <= range1->offset) {
3684 3 : return false;
3685 : }
3686 :
3687 18 : return true;
3688 23 : }
3689 :
3690 : static bool
3691 11 : bdev_io_range_is_locked(struct spdk_bdev_io *bdev_io, struct lba_range *range)
3692 : {
3693 11 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3694 : struct lba_range r;
3695 :
3696 11 : switch (bdev_io->type) {
3697 : case SPDK_BDEV_IO_TYPE_NVME_IO:
3698 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
3699 : /* Don't try to decode the NVMe command - just assume worst-case and that
3700 : * it overlaps a locked range.
3701 : */
3702 0 : return true;
3703 : case SPDK_BDEV_IO_TYPE_READ:
3704 6 : if (!range->quiesce) {
3705 4 : return false;
3706 : }
3707 : /* fallthrough */
3708 : case SPDK_BDEV_IO_TYPE_WRITE:
3709 : case SPDK_BDEV_IO_TYPE_UNMAP:
3710 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3711 : case SPDK_BDEV_IO_TYPE_ZCOPY:
3712 : case SPDK_BDEV_IO_TYPE_COPY:
3713 7 : r.offset = bdev_io->u.bdev.offset_blocks;
3714 7 : r.length = bdev_io->u.bdev.num_blocks;
3715 7 : if (!bdev_lba_range_overlapped(range, &r)) {
3716 : /* This I/O doesn't overlap the specified LBA range. */
3717 0 : return false;
3718 7 : } else if (range->owner_ch == ch && range->locked_ctx == bdev_io->internal.caller_ctx) {
3719 : /* This I/O overlaps, but the I/O is on the same channel that locked this
3720 : * range, and the caller_ctx is the same as the locked_ctx. This means
3721 : * that this I/O is associated with the lock, and is allowed to execute.
3722 : */
3723 2 : return false;
3724 : } else {
3725 5 : return true;
3726 : }
3727 : default:
3728 0 : return false;
3729 : }
3730 11 : }
3731 :
3732 : void
3733 639 : bdev_io_submit(struct spdk_bdev_io *bdev_io)
3734 : {
3735 639 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3736 :
3737 639 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
3738 :
3739 639 : if (!TAILQ_EMPTY(&ch->locked_ranges)) {
3740 : struct lba_range *range;
3741 :
3742 13 : TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
3743 8 : if (bdev_io_range_is_locked(bdev_io, range)) {
3744 3 : TAILQ_INSERT_TAIL(&ch->io_locked, bdev_io, internal.ch_link);
3745 3 : return;
3746 : }
3747 5 : }
3748 5 : }
3749 :
3750 636 : bdev_ch_add_to_io_submitted(bdev_io);
3751 :
3752 636 : bdev_io->internal.submit_tsc = spdk_get_ticks();
3753 636 : spdk_trace_record_tsc(bdev_io->internal.submit_tsc, TRACE_BDEV_IO_START,
3754 : ch->trace_id, bdev_io->u.bdev.num_blocks,
3755 : (uintptr_t)bdev_io, (uint64_t)bdev_io->type, bdev_io->internal.caller_ctx,
3756 : bdev_io->u.bdev.offset_blocks, ch->queue_depth);
3757 :
3758 636 : if (bdev_io->internal.f.split) {
3759 59 : bdev_io_split(bdev_io);
3760 59 : return;
3761 : }
3762 :
3763 577 : _bdev_io_submit(bdev_io);
3764 639 : }
3765 :
3766 : static inline int
3767 2 : bdev_io_init_dif_ctx(struct spdk_bdev_io *bdev_io)
3768 : {
3769 2 : struct spdk_bdev *bdev = bdev_io->bdev;
3770 : struct spdk_dif_ctx_init_ext_opts dif_opts;
3771 :
3772 2 : memset(&bdev_io->u.bdev.dif_err, 0, sizeof(struct spdk_dif_error));
3773 :
3774 2 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
3775 2 : dif_opts.dif_pi_format = bdev->dif_pi_format;
3776 :
3777 4 : return spdk_dif_ctx_init(&bdev_io->u.bdev.dif_ctx,
3778 2 : bdev->blocklen,
3779 2 : bdev->md_len,
3780 2 : bdev->md_interleave,
3781 2 : bdev->dif_is_head_of_md,
3782 2 : bdev->dif_type,
3783 2 : bdev_io->u.bdev.dif_check_flags,
3784 2 : bdev_io->u.bdev.offset_blocks & 0xFFFFFFFF,
3785 : 0xFFFF, 0, 0, 0, &dif_opts);
3786 : }
3787 :
3788 : static void
3789 4 : _bdev_memory_domain_get_io_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
3790 : bool success)
3791 : {
3792 4 : if (!success) {
3793 0 : SPDK_ERRLOG("Failed to get data buffer, completing IO\n");
3794 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3795 0 : bdev_io_complete_unsubmitted(bdev_io);
3796 0 : return;
3797 : }
3798 :
3799 4 : if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io)) {
3800 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
3801 0 : bdev_io_exec_sequence(bdev_io, bdev_io_submit_sequence_cb);
3802 0 : return;
3803 : }
3804 : /* For reads we'll execute the sequence after the data is read, so, for now, only
3805 : * clear out accel_sequence pointer and submit the IO */
3806 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
3807 0 : bdev_io->u.bdev.accel_sequence = NULL;
3808 0 : }
3809 :
3810 4 : bdev_io_submit(bdev_io);
3811 4 : }
3812 :
3813 : static inline void
3814 4 : _bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
3815 : {
3816 : /* bdev doesn't support memory domains, thereby buffers in this IO request can't
3817 : * be accessed directly. It is needed to allocate buffers before issuing IO operation.
3818 : * For write operation we need to pull buffers from memory domain before submitting IO.
3819 : * Once read operation completes, we need to use memory_domain push functionality to
3820 : * update data in original memory domain IO buffer.
3821 : *
3822 : * If this I/O request is not aware of metadata, buffers in thsi IO request can't be
3823 : * accessed directly too. It is needed to allocate buffers before issuing IO operation.
3824 : * For write operation we need to insert metadata before submitting IO. Once read
3825 : * operation completes, we need to strip metadata in original IO buffer.
3826 : *
3827 : * This IO request will go through a regular IO flow, so clear memory domains pointers */
3828 4 : assert(bdev_io_use_memory_domain(bdev_io) ||
3829 : bdev_io_needs_metadata(bdev_io->internal.desc, bdev_io));
3830 :
3831 4 : bdev_io->u.bdev.memory_domain = NULL;
3832 4 : bdev_io->u.bdev.memory_domain_ctx = NULL;
3833 8 : _bdev_io_get_bounce_buf(bdev_io, _bdev_memory_domain_get_io_cb,
3834 4 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
3835 4 : }
3836 :
3837 : static inline void
3838 0 : _bdev_io_ext_use_accel_buffer(struct spdk_bdev_io *bdev_io)
3839 : {
3840 0 : assert(bdev_io_use_memory_domain(bdev_io));
3841 0 : assert(bdev_io_needs_metadata(bdev_io->internal.desc, bdev_io));
3842 :
3843 0 : bdev_io->u.bdev.memory_domain = NULL;
3844 0 : bdev_io->u.bdev.memory_domain_ctx = NULL;
3845 0 : bdev_io_get_accel_buf(bdev_io, _bdev_memory_domain_get_io_cb,
3846 0 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
3847 0 : }
3848 :
3849 : /* We need to allocate bounce buffer
3850 : * - if bdev doesn't support memory domains,
3851 : * - if it does support them, but we need to execute an accel sequence and the data buffer is
3852 : * from accel memory domain (to avoid doing a push/pull from that domain), or
3853 : * - if IO is not aware of metadata.
3854 : */
3855 : static inline bool
3856 292 : bdev_io_needs_bounce_buffer(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
3857 : {
3858 292 : if (bdev_io_use_memory_domain(bdev_io)) {
3859 4 : if (!desc->memory_domains_supported ||
3860 0 : (bdev_io_needs_sequence_exec(desc, bdev_io) &&
3861 0 : (bdev_io->internal.memory_domain == spdk_accel_get_memory_domain() ||
3862 0 : bdev_io_needs_metadata(desc, bdev_io)))) {
3863 4 : return true;
3864 : }
3865 :
3866 0 : return false;
3867 : }
3868 :
3869 288 : if (bdev_io_needs_metadata(desc, bdev_io)) {
3870 0 : return true;
3871 : }
3872 :
3873 288 : return false;
3874 292 : }
3875 :
3876 : /* We need to allocate fake accel buffer if bdev supports memory domains but IO is not
3877 : * aware of metadata.
3878 : */
3879 : static inline bool
3880 288 : bdev_io_needs_accel_buffer(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
3881 : {
3882 288 : if (bdev_io_needs_metadata(desc, bdev_io)) {
3883 0 : assert(bdev_io_use_memory_domain(bdev_io));
3884 0 : return true;
3885 : }
3886 :
3887 288 : return false;
3888 288 : }
3889 :
3890 : static inline void
3891 292 : _bdev_io_submit_ext(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
3892 : {
3893 292 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3894 : int rc;
3895 :
3896 292 : if (spdk_unlikely(ch->flags & BDEV_CH_RESET_IN_PROGRESS)) {
3897 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
3898 0 : bdev_io_complete_unsubmitted(bdev_io);
3899 0 : return;
3900 : }
3901 :
3902 292 : if (bdev_io_needs_metadata(desc, bdev_io)) {
3903 0 : rc = bdev_io_init_dif_ctx(bdev_io);
3904 0 : if (spdk_unlikely(rc != 0)) {
3905 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3906 0 : bdev_io_complete_unsubmitted(bdev_io);
3907 0 : return;
3908 : }
3909 0 : }
3910 :
3911 292 : if (bdev_io_needs_bounce_buffer(desc, bdev_io)) {
3912 4 : _bdev_io_ext_use_bounce_buffer(bdev_io);
3913 4 : return;
3914 : }
3915 :
3916 288 : if (bdev_io_needs_accel_buffer(desc, bdev_io)) {
3917 0 : _bdev_io_ext_use_accel_buffer(bdev_io);
3918 0 : return;
3919 : }
3920 :
3921 288 : if (bdev_io_needs_sequence_exec(desc, bdev_io)) {
3922 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
3923 0 : bdev_io_exec_sequence(bdev_io, bdev_io_submit_sequence_cb);
3924 0 : return;
3925 : }
3926 : /* For reads we'll execute the sequence after the data is read, so, for now, only
3927 : * clear out accel_sequence pointer and submit the IO */
3928 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
3929 0 : bdev_io->u.bdev.accel_sequence = NULL;
3930 0 : }
3931 :
3932 288 : bdev_io_submit(bdev_io);
3933 292 : }
3934 :
3935 : static void
3936 12 : bdev_io_submit_reset(struct spdk_bdev_io *bdev_io)
3937 : {
3938 12 : struct spdk_bdev *bdev = bdev_io->bdev;
3939 12 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
3940 12 : struct spdk_io_channel *ch = bdev_ch->channel;
3941 :
3942 12 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
3943 :
3944 12 : bdev_io->internal.f.in_submit_request = true;
3945 12 : bdev_submit_request(bdev, ch, bdev_io);
3946 12 : bdev_io->internal.f.in_submit_request = false;
3947 12 : }
3948 :
3949 : void
3950 693 : bdev_io_init(struct spdk_bdev_io *bdev_io,
3951 : struct spdk_bdev *bdev, void *cb_arg,
3952 : spdk_bdev_io_completion_cb cb)
3953 : {
3954 693 : bdev_io->bdev = bdev;
3955 693 : bdev_io->internal.f.raw = 0;
3956 693 : bdev_io->internal.caller_ctx = cb_arg;
3957 693 : bdev_io->internal.cb = cb;
3958 693 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
3959 693 : bdev_io->internal.f.in_submit_request = false;
3960 693 : bdev_io->internal.error.nvme.cdw0 = 0;
3961 693 : bdev_io->num_retries = 0;
3962 693 : bdev_io->internal.get_buf_cb = NULL;
3963 693 : bdev_io->internal.get_aux_buf_cb = NULL;
3964 693 : bdev_io->internal.data_transfer_cpl = NULL;
3965 693 : bdev_io->internal.f.split = bdev_io_should_split(bdev_io);
3966 693 : }
3967 :
3968 : static bool
3969 540 : bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
3970 : {
3971 540 : return bdev->fn_table->io_type_supported(bdev->ctxt, io_type);
3972 : }
3973 :
3974 : bool
3975 178 : spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
3976 : {
3977 : bool supported;
3978 :
3979 178 : supported = bdev_io_type_supported(bdev, io_type);
3980 :
3981 178 : if (!supported) {
3982 7 : switch (io_type) {
3983 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3984 : /* The bdev layer will emulate write zeroes as long as write is supported. */
3985 0 : supported = bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE);
3986 0 : break;
3987 : default:
3988 7 : break;
3989 : }
3990 7 : }
3991 :
3992 178 : return supported;
3993 : }
3994 :
3995 : static const char *g_io_type_strings[] = {
3996 : [SPDK_BDEV_IO_TYPE_READ] = "read",
3997 : [SPDK_BDEV_IO_TYPE_WRITE] = "write",
3998 : [SPDK_BDEV_IO_TYPE_UNMAP] = "unmap",
3999 : [SPDK_BDEV_IO_TYPE_FLUSH] = "flush",
4000 : [SPDK_BDEV_IO_TYPE_RESET] = "reset",
4001 : [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = "nvme_admin",
4002 : [SPDK_BDEV_IO_TYPE_NVME_IO] = "nvme_io",
4003 : [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = "nvme_io_md",
4004 : [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = "write_zeroes",
4005 : [SPDK_BDEV_IO_TYPE_ZCOPY] = "zcopy",
4006 : [SPDK_BDEV_IO_TYPE_GET_ZONE_INFO] = "get_zone_info",
4007 : [SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT] = "zone_management",
4008 : [SPDK_BDEV_IO_TYPE_ZONE_APPEND] = "zone_append",
4009 : [SPDK_BDEV_IO_TYPE_COMPARE] = "compare",
4010 : [SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE] = "compare_and_write",
4011 : [SPDK_BDEV_IO_TYPE_ABORT] = "abort",
4012 : [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = "seek_hole",
4013 : [SPDK_BDEV_IO_TYPE_SEEK_DATA] = "seek_data",
4014 : [SPDK_BDEV_IO_TYPE_COPY] = "copy",
4015 : [SPDK_BDEV_IO_TYPE_NVME_IOV_MD] = "nvme_iov_md",
4016 : };
4017 :
4018 : const char *
4019 0 : spdk_bdev_get_io_type_name(enum spdk_bdev_io_type io_type)
4020 : {
4021 0 : if (io_type <= SPDK_BDEV_IO_TYPE_INVALID || io_type >= SPDK_BDEV_NUM_IO_TYPES) {
4022 0 : return NULL;
4023 : }
4024 :
4025 0 : return g_io_type_strings[io_type];
4026 0 : }
4027 :
4028 : int
4029 0 : spdk_bdev_get_io_type(const char *io_type_string)
4030 : {
4031 : int i;
4032 :
4033 0 : for (i = SPDK_BDEV_IO_TYPE_READ; i < SPDK_BDEV_NUM_IO_TYPES; ++i) {
4034 0 : if (!strcmp(io_type_string, g_io_type_strings[i])) {
4035 0 : return i;
4036 : }
4037 0 : }
4038 :
4039 0 : return -1;
4040 0 : }
4041 :
4042 : uint64_t
4043 0 : spdk_bdev_io_get_submit_tsc(struct spdk_bdev_io *bdev_io)
4044 : {
4045 0 : return bdev_io->internal.submit_tsc;
4046 : }
4047 :
4048 : int
4049 0 : spdk_bdev_dump_info_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
4050 : {
4051 0 : if (bdev->fn_table->dump_info_json) {
4052 0 : return bdev->fn_table->dump_info_json(bdev->ctxt, w);
4053 : }
4054 :
4055 0 : return 0;
4056 0 : }
4057 :
4058 : static void
4059 10 : bdev_qos_update_max_quota_per_timeslice(struct spdk_bdev_qos *qos)
4060 : {
4061 10 : uint32_t max_per_timeslice = 0;
4062 : int i;
4063 :
4064 50 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4065 40 : if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
4066 15 : qos->rate_limits[i].max_per_timeslice = 0;
4067 15 : continue;
4068 : }
4069 :
4070 50 : max_per_timeslice = qos->rate_limits[i].limit *
4071 25 : SPDK_BDEV_QOS_TIMESLICE_IN_USEC / SPDK_SEC_TO_USEC;
4072 :
4073 25 : qos->rate_limits[i].max_per_timeslice = spdk_max(max_per_timeslice,
4074 : qos->rate_limits[i].min_per_timeslice);
4075 :
4076 50 : __atomic_store_n(&qos->rate_limits[i].remaining_this_timeslice,
4077 25 : qos->rate_limits[i].max_per_timeslice, __ATOMIC_RELEASE);
4078 25 : }
4079 :
4080 10 : bdev_qos_set_ops(qos);
4081 10 : }
4082 :
4083 : static void
4084 4 : bdev_channel_submit_qos_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
4085 : struct spdk_io_channel *io_ch, void *ctx)
4086 : {
4087 4 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
4088 : int status;
4089 :
4090 4 : bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
4091 :
4092 : /* if all IOs were sent then continue the iteration, otherwise - stop it */
4093 : /* TODO: channels round robing */
4094 4 : status = TAILQ_EMPTY(&bdev_ch->qos_queued_io) ? 0 : 1;
4095 :
4096 4 : spdk_bdev_for_each_channel_continue(i, status);
4097 4 : }
4098 :
4099 :
4100 : static void
4101 2 : bdev_channel_submit_qos_io_done(struct spdk_bdev *bdev, void *ctx, int status)
4102 : {
4103 :
4104 2 : }
4105 :
4106 : static int
4107 3 : bdev_channel_poll_qos(void *arg)
4108 : {
4109 3 : struct spdk_bdev *bdev = arg;
4110 3 : struct spdk_bdev_qos *qos = bdev->internal.qos;
4111 3 : uint64_t now = spdk_get_ticks();
4112 : int i;
4113 : int64_t remaining_last_timeslice;
4114 :
4115 3 : if (spdk_unlikely(qos->thread == NULL)) {
4116 : /* Old QoS was unbound to remove and new QoS is not enabled yet. */
4117 1 : return SPDK_POLLER_IDLE;
4118 : }
4119 :
4120 2 : if (now < (qos->last_timeslice + qos->timeslice_size)) {
4121 : /* We received our callback earlier than expected - return
4122 : * immediately and wait to do accounting until at least one
4123 : * timeslice has actually expired. This should never happen
4124 : * with a well-behaved timer implementation.
4125 : */
4126 0 : return SPDK_POLLER_IDLE;
4127 : }
4128 :
4129 : /* Reset for next round of rate limiting */
4130 10 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4131 : /* We may have allowed the IOs or bytes to slightly overrun in the last
4132 : * timeslice. remaining_this_timeslice is signed, so if it's negative
4133 : * here, we'll account for the overrun so that the next timeslice will
4134 : * be appropriately reduced.
4135 : */
4136 8 : remaining_last_timeslice = __atomic_exchange_n(&qos->rate_limits[i].remaining_this_timeslice,
4137 : 0, __ATOMIC_RELAXED);
4138 8 : if (remaining_last_timeslice < 0) {
4139 : /* There could be a race condition here as both bdev_qos_rw_queue_io() and bdev_channel_poll_qos()
4140 : * potentially use 2 atomic ops each, so they can intertwine.
4141 : * This race can potentially cause the limits to be a little fuzzy but won't cause any real damage.
4142 : */
4143 0 : __atomic_store_n(&qos->rate_limits[i].remaining_this_timeslice,
4144 0 : remaining_last_timeslice, __ATOMIC_RELAXED);
4145 0 : }
4146 8 : }
4147 :
4148 4 : while (now >= (qos->last_timeslice + qos->timeslice_size)) {
4149 2 : qos->last_timeslice += qos->timeslice_size;
4150 10 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4151 16 : __atomic_add_fetch(&qos->rate_limits[i].remaining_this_timeslice,
4152 8 : qos->rate_limits[i].max_per_timeslice, __ATOMIC_RELAXED);
4153 8 : }
4154 : }
4155 :
4156 2 : spdk_bdev_for_each_channel(bdev, bdev_channel_submit_qos_io, qos,
4157 : bdev_channel_submit_qos_io_done);
4158 :
4159 2 : return SPDK_POLLER_BUSY;
4160 3 : }
4161 :
4162 : static void
4163 75 : bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
4164 : {
4165 : struct spdk_bdev_shared_resource *shared_resource;
4166 : struct lba_range *range;
4167 :
4168 75 : bdev_free_io_stat(ch->stat);
4169 : #ifdef SPDK_CONFIG_VTUNE
4170 : bdev_free_io_stat(ch->prev_stat);
4171 : #endif
4172 :
4173 75 : while (!TAILQ_EMPTY(&ch->locked_ranges)) {
4174 0 : range = TAILQ_FIRST(&ch->locked_ranges);
4175 0 : TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
4176 0 : free(range);
4177 : }
4178 :
4179 75 : spdk_put_io_channel(ch->channel);
4180 75 : spdk_put_io_channel(ch->accel_channel);
4181 :
4182 75 : shared_resource = ch->shared_resource;
4183 :
4184 75 : assert(TAILQ_EMPTY(&ch->io_locked));
4185 75 : assert(TAILQ_EMPTY(&ch->io_submitted));
4186 75 : assert(TAILQ_EMPTY(&ch->io_accel_exec));
4187 75 : assert(TAILQ_EMPTY(&ch->io_memory_domain));
4188 75 : assert(ch->io_outstanding == 0);
4189 75 : assert(shared_resource->ref > 0);
4190 75 : shared_resource->ref--;
4191 75 : if (shared_resource->ref == 0) {
4192 74 : assert(shared_resource->io_outstanding == 0);
4193 74 : TAILQ_REMOVE(&shared_resource->mgmt_ch->shared_resources, shared_resource, link);
4194 74 : spdk_put_io_channel(spdk_io_channel_from_ctx(shared_resource->mgmt_ch));
4195 74 : spdk_poller_unregister(&shared_resource->nomem_poller);
4196 74 : free(shared_resource);
4197 74 : }
4198 75 : }
4199 :
4200 : static void
4201 84 : bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
4202 : {
4203 84 : struct spdk_bdev_qos *qos = bdev->internal.qos;
4204 : int i;
4205 :
4206 84 : assert(spdk_spin_held(&bdev->internal.spinlock));
4207 :
4208 : /* Rate limiting on this bdev enabled */
4209 84 : if (qos) {
4210 17 : if (qos->ch == NULL) {
4211 : struct spdk_io_channel *io_ch;
4212 :
4213 9 : SPDK_DEBUGLOG(bdev, "Selecting channel %p as QoS channel for bdev %s on thread %p\n", ch,
4214 : bdev->name, spdk_get_thread());
4215 :
4216 : /* No qos channel has been selected, so set one up */
4217 :
4218 : /* Take another reference to ch */
4219 9 : io_ch = spdk_get_io_channel(__bdev_to_io_dev(bdev));
4220 9 : assert(io_ch != NULL);
4221 9 : qos->ch = ch;
4222 :
4223 9 : qos->thread = spdk_io_channel_get_thread(io_ch);
4224 :
4225 45 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4226 36 : if (bdev_qos_is_iops_rate_limit(i) == true) {
4227 9 : qos->rate_limits[i].min_per_timeslice =
4228 : SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE;
4229 9 : } else {
4230 27 : qos->rate_limits[i].min_per_timeslice =
4231 : SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE;
4232 : }
4233 :
4234 36 : if (qos->rate_limits[i].limit == 0) {
4235 2 : qos->rate_limits[i].limit = SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
4236 2 : }
4237 36 : }
4238 9 : bdev_qos_update_max_quota_per_timeslice(qos);
4239 9 : qos->timeslice_size =
4240 9 : SPDK_BDEV_QOS_TIMESLICE_IN_USEC * spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
4241 9 : qos->last_timeslice = spdk_get_ticks();
4242 9 : qos->poller = SPDK_POLLER_REGISTER(bdev_channel_poll_qos,
4243 : bdev,
4244 : SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
4245 9 : }
4246 :
4247 17 : ch->flags |= BDEV_CH_QOS_ENABLED;
4248 17 : }
4249 84 : }
4250 :
4251 : struct poll_timeout_ctx {
4252 : struct spdk_bdev_desc *desc;
4253 : uint64_t timeout_in_sec;
4254 : spdk_bdev_io_timeout_cb cb_fn;
4255 : void *cb_arg;
4256 : };
4257 :
4258 : static void
4259 278 : bdev_desc_free(struct spdk_bdev_desc *desc)
4260 : {
4261 278 : spdk_spin_destroy(&desc->spinlock);
4262 278 : free(desc->media_events_buffer);
4263 278 : free(desc);
4264 278 : }
4265 :
4266 : static void
4267 8 : bdev_channel_poll_timeout_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
4268 : {
4269 8 : struct poll_timeout_ctx *ctx = _ctx;
4270 8 : struct spdk_bdev_desc *desc = ctx->desc;
4271 :
4272 8 : free(ctx);
4273 :
4274 8 : spdk_spin_lock(&desc->spinlock);
4275 8 : desc->refs--;
4276 8 : if (desc->closed == true && desc->refs == 0) {
4277 1 : spdk_spin_unlock(&desc->spinlock);
4278 1 : bdev_desc_free(desc);
4279 1 : return;
4280 : }
4281 7 : spdk_spin_unlock(&desc->spinlock);
4282 8 : }
4283 :
4284 : static void
4285 13 : bdev_channel_poll_timeout_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
4286 : struct spdk_io_channel *io_ch, void *_ctx)
4287 : {
4288 13 : struct poll_timeout_ctx *ctx = _ctx;
4289 13 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
4290 13 : struct spdk_bdev_desc *desc = ctx->desc;
4291 : struct spdk_bdev_io *bdev_io;
4292 : uint64_t now;
4293 :
4294 13 : spdk_spin_lock(&desc->spinlock);
4295 13 : if (desc->closed == true) {
4296 1 : spdk_spin_unlock(&desc->spinlock);
4297 1 : spdk_bdev_for_each_channel_continue(i, -1);
4298 1 : return;
4299 : }
4300 12 : spdk_spin_unlock(&desc->spinlock);
4301 :
4302 12 : now = spdk_get_ticks();
4303 22 : TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
4304 : /* Exclude any I/O that are generated via splitting. */
4305 15 : if (bdev_io->internal.cb == bdev_io_split_done) {
4306 3 : continue;
4307 : }
4308 :
4309 : /* Once we find an I/O that has not timed out, we can immediately
4310 : * exit the loop.
4311 : */
4312 24 : if (now < (bdev_io->internal.submit_tsc +
4313 12 : ctx->timeout_in_sec * spdk_get_ticks_hz())) {
4314 5 : goto end;
4315 : }
4316 :
4317 7 : if (bdev_io->internal.desc == desc) {
4318 7 : ctx->cb_fn(ctx->cb_arg, bdev_io);
4319 7 : }
4320 14 : }
4321 :
4322 : end:
4323 12 : spdk_bdev_for_each_channel_continue(i, 0);
4324 13 : }
4325 :
4326 : static int
4327 8 : bdev_poll_timeout_io(void *arg)
4328 : {
4329 8 : struct spdk_bdev_desc *desc = arg;
4330 8 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4331 : struct poll_timeout_ctx *ctx;
4332 :
4333 8 : ctx = calloc(1, sizeof(struct poll_timeout_ctx));
4334 8 : if (!ctx) {
4335 0 : SPDK_ERRLOG("failed to allocate memory\n");
4336 0 : return SPDK_POLLER_BUSY;
4337 : }
4338 8 : ctx->desc = desc;
4339 8 : ctx->cb_arg = desc->cb_arg;
4340 8 : ctx->cb_fn = desc->cb_fn;
4341 8 : ctx->timeout_in_sec = desc->timeout_in_sec;
4342 :
4343 : /* Take a ref on the descriptor in case it gets closed while we are checking
4344 : * all of the channels.
4345 : */
4346 8 : spdk_spin_lock(&desc->spinlock);
4347 8 : desc->refs++;
4348 8 : spdk_spin_unlock(&desc->spinlock);
4349 :
4350 8 : spdk_bdev_for_each_channel(bdev, bdev_channel_poll_timeout_io, ctx,
4351 : bdev_channel_poll_timeout_io_done);
4352 :
4353 8 : return SPDK_POLLER_BUSY;
4354 8 : }
4355 :
4356 : int
4357 5 : spdk_bdev_set_timeout(struct spdk_bdev_desc *desc, uint64_t timeout_in_sec,
4358 : spdk_bdev_io_timeout_cb cb_fn, void *cb_arg)
4359 : {
4360 5 : assert(desc->thread == spdk_get_thread());
4361 :
4362 5 : spdk_poller_unregister(&desc->io_timeout_poller);
4363 :
4364 5 : if (timeout_in_sec) {
4365 4 : assert(cb_fn != NULL);
4366 4 : desc->io_timeout_poller = SPDK_POLLER_REGISTER(bdev_poll_timeout_io,
4367 : desc,
4368 : SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * SPDK_SEC_TO_USEC /
4369 : 1000);
4370 4 : if (desc->io_timeout_poller == NULL) {
4371 0 : SPDK_ERRLOG("can not register the desc timeout IO poller\n");
4372 0 : return -1;
4373 : }
4374 4 : }
4375 :
4376 5 : desc->cb_fn = cb_fn;
4377 5 : desc->cb_arg = cb_arg;
4378 5 : desc->timeout_in_sec = timeout_in_sec;
4379 :
4380 5 : return 0;
4381 5 : }
4382 :
4383 : static int
4384 77 : bdev_channel_create(void *io_device, void *ctx_buf)
4385 : {
4386 77 : struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
4387 77 : struct spdk_bdev_channel *ch = ctx_buf;
4388 : struct spdk_io_channel *mgmt_io_ch;
4389 : struct spdk_bdev_mgmt_channel *mgmt_ch;
4390 : struct spdk_bdev_shared_resource *shared_resource;
4391 : struct lba_range *range;
4392 :
4393 77 : ch->bdev = bdev;
4394 77 : ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt);
4395 77 : if (!ch->channel) {
4396 2 : return -1;
4397 : }
4398 :
4399 75 : ch->accel_channel = spdk_accel_get_io_channel();
4400 75 : if (!ch->accel_channel) {
4401 0 : spdk_put_io_channel(ch->channel);
4402 0 : return -1;
4403 : }
4404 :
4405 75 : spdk_trace_record(TRACE_BDEV_IOCH_CREATE, bdev->internal.trace_id, 0, 0,
4406 : spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
4407 :
4408 75 : assert(ch->histogram == NULL);
4409 75 : if (bdev->internal.histogram_enabled) {
4410 0 : ch->histogram = spdk_histogram_data_alloc();
4411 0 : if (ch->histogram == NULL) {
4412 0 : SPDK_ERRLOG("Could not allocate histogram\n");
4413 0 : }
4414 0 : }
4415 :
4416 75 : mgmt_io_ch = spdk_get_io_channel(&g_bdev_mgr);
4417 75 : if (!mgmt_io_ch) {
4418 0 : spdk_put_io_channel(ch->channel);
4419 0 : spdk_put_io_channel(ch->accel_channel);
4420 0 : return -1;
4421 : }
4422 :
4423 75 : mgmt_ch = __io_ch_to_bdev_mgmt_ch(mgmt_io_ch);
4424 77 : TAILQ_FOREACH(shared_resource, &mgmt_ch->shared_resources, link) {
4425 3 : if (shared_resource->shared_ch == ch->channel) {
4426 1 : spdk_put_io_channel(mgmt_io_ch);
4427 1 : shared_resource->ref++;
4428 1 : break;
4429 : }
4430 2 : }
4431 :
4432 75 : if (shared_resource == NULL) {
4433 74 : shared_resource = calloc(1, sizeof(*shared_resource));
4434 74 : if (shared_resource == NULL) {
4435 0 : spdk_put_io_channel(ch->channel);
4436 0 : spdk_put_io_channel(ch->accel_channel);
4437 0 : spdk_put_io_channel(mgmt_io_ch);
4438 0 : return -1;
4439 : }
4440 :
4441 74 : shared_resource->mgmt_ch = mgmt_ch;
4442 74 : shared_resource->io_outstanding = 0;
4443 74 : TAILQ_INIT(&shared_resource->nomem_io);
4444 74 : shared_resource->nomem_threshold = 0;
4445 74 : shared_resource->shared_ch = ch->channel;
4446 74 : shared_resource->ref = 1;
4447 74 : TAILQ_INSERT_TAIL(&mgmt_ch->shared_resources, shared_resource, link);
4448 74 : }
4449 :
4450 75 : ch->io_outstanding = 0;
4451 75 : TAILQ_INIT(&ch->locked_ranges);
4452 75 : TAILQ_INIT(&ch->qos_queued_io);
4453 75 : ch->flags = 0;
4454 75 : ch->trace_id = bdev->internal.trace_id;
4455 75 : ch->shared_resource = shared_resource;
4456 :
4457 75 : TAILQ_INIT(&ch->io_submitted);
4458 75 : TAILQ_INIT(&ch->io_locked);
4459 75 : TAILQ_INIT(&ch->io_accel_exec);
4460 75 : TAILQ_INIT(&ch->io_memory_domain);
4461 :
4462 75 : ch->stat = bdev_alloc_io_stat(false);
4463 75 : if (ch->stat == NULL) {
4464 0 : bdev_channel_destroy_resource(ch);
4465 0 : return -1;
4466 : }
4467 :
4468 75 : ch->stat->ticks_rate = spdk_get_ticks_hz();
4469 :
4470 : #ifdef SPDK_CONFIG_VTUNE
4471 : {
4472 : char *name;
4473 : __itt_init_ittlib(NULL, 0);
4474 : name = spdk_sprintf_alloc("spdk_bdev_%s_%p", ch->bdev->name, ch);
4475 : if (!name) {
4476 : bdev_channel_destroy_resource(ch);
4477 : return -1;
4478 : }
4479 : ch->handle = __itt_string_handle_create(name);
4480 : free(name);
4481 : ch->start_tsc = spdk_get_ticks();
4482 : ch->interval_tsc = spdk_get_ticks_hz() / 100;
4483 : ch->prev_stat = bdev_alloc_io_stat(false);
4484 : if (ch->prev_stat == NULL) {
4485 : bdev_channel_destroy_resource(ch);
4486 : return -1;
4487 : }
4488 : }
4489 : #endif
4490 :
4491 75 : spdk_spin_lock(&bdev->internal.spinlock);
4492 75 : bdev_enable_qos(bdev, ch);
4493 :
4494 76 : TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
4495 : struct lba_range *new_range;
4496 :
4497 1 : new_range = calloc(1, sizeof(*new_range));
4498 1 : if (new_range == NULL) {
4499 0 : spdk_spin_unlock(&bdev->internal.spinlock);
4500 0 : bdev_channel_destroy_resource(ch);
4501 0 : return -1;
4502 : }
4503 1 : new_range->length = range->length;
4504 1 : new_range->offset = range->offset;
4505 1 : new_range->locked_ctx = range->locked_ctx;
4506 1 : TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq);
4507 1 : }
4508 :
4509 75 : spdk_spin_unlock(&bdev->internal.spinlock);
4510 :
4511 75 : return 0;
4512 77 : }
4513 :
4514 : static int
4515 0 : bdev_abort_all_buf_io_cb(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry,
4516 : void *cb_ctx)
4517 : {
4518 0 : struct spdk_bdev_channel *bdev_ch = cb_ctx;
4519 : struct spdk_bdev_io *bdev_io;
4520 : uint64_t buf_len;
4521 :
4522 0 : bdev_io = SPDK_CONTAINEROF(entry, struct spdk_bdev_io, internal.iobuf);
4523 0 : if (bdev_io->internal.ch == bdev_ch) {
4524 0 : buf_len = bdev_io_get_max_buf_len(bdev_io, bdev_io->internal.buf.len);
4525 0 : spdk_iobuf_entry_abort(ch, entry, buf_len);
4526 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
4527 0 : }
4528 :
4529 0 : return 0;
4530 : }
4531 :
4532 : /*
4533 : * Abort I/O that are waiting on a data buffer.
4534 : */
4535 : static void
4536 98 : bdev_abort_all_buf_io(struct spdk_bdev_mgmt_channel *mgmt_ch, struct spdk_bdev_channel *ch)
4537 : {
4538 98 : spdk_iobuf_for_each_entry(&mgmt_ch->iobuf, bdev_abort_all_buf_io_cb, ch);
4539 98 : }
4540 :
4541 : /*
4542 : * Abort I/O that are queued waiting for submission. These types of I/O are
4543 : * linked using the spdk_bdev_io link TAILQ_ENTRY.
4544 : */
4545 : static void
4546 117 : bdev_abort_all_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
4547 : {
4548 : struct spdk_bdev_io *bdev_io, *tmp;
4549 :
4550 156 : TAILQ_FOREACH_SAFE(bdev_io, queue, internal.link, tmp) {
4551 39 : if (bdev_io->internal.ch == ch) {
4552 39 : TAILQ_REMOVE(queue, bdev_io, internal.link);
4553 : /*
4554 : * spdk_bdev_io_complete() assumes that the completed I/O had
4555 : * been submitted to the bdev module. Since in this case it
4556 : * hadn't, bump io_outstanding to account for the decrement
4557 : * that spdk_bdev_io_complete() will do.
4558 : */
4559 39 : if (bdev_io->type != SPDK_BDEV_IO_TYPE_RESET) {
4560 39 : bdev_io_increment_outstanding(ch, ch->shared_resource);
4561 39 : }
4562 39 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
4563 39 : }
4564 39 : }
4565 117 : }
4566 :
4567 : static bool
4568 18 : bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort)
4569 : {
4570 : struct spdk_bdev_io *bdev_io;
4571 :
4572 18 : TAILQ_FOREACH(bdev_io, queue, internal.link) {
4573 0 : if (bdev_io == bio_to_abort) {
4574 0 : TAILQ_REMOVE(queue, bio_to_abort, internal.link);
4575 0 : spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
4576 0 : return true;
4577 : }
4578 0 : }
4579 :
4580 18 : return false;
4581 18 : }
4582 :
4583 : static int
4584 0 : bdev_abort_buf_io_cb(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry, void *cb_ctx)
4585 : {
4586 0 : struct spdk_bdev_io *bdev_io, *bio_to_abort = cb_ctx;
4587 : uint64_t buf_len;
4588 :
4589 0 : bdev_io = SPDK_CONTAINEROF(entry, struct spdk_bdev_io, internal.iobuf);
4590 0 : if (bdev_io == bio_to_abort) {
4591 0 : buf_len = bdev_io_get_max_buf_len(bdev_io, bdev_io->internal.buf.len);
4592 0 : spdk_iobuf_entry_abort(ch, entry, buf_len);
4593 0 : spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
4594 0 : return 1;
4595 : }
4596 :
4597 0 : return 0;
4598 0 : }
4599 :
4600 : static bool
4601 16 : bdev_abort_buf_io(struct spdk_bdev_mgmt_channel *mgmt_ch, struct spdk_bdev_io *bio_to_abort)
4602 : {
4603 : int rc;
4604 :
4605 16 : rc = spdk_iobuf_for_each_entry(&mgmt_ch->iobuf, bdev_abort_buf_io_cb, bio_to_abort);
4606 16 : return rc == 1;
4607 : }
4608 :
4609 : static void
4610 7 : bdev_qos_channel_destroy(void *cb_arg)
4611 : {
4612 7 : struct spdk_bdev_qos *qos = cb_arg;
4613 :
4614 7 : spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
4615 7 : spdk_poller_unregister(&qos->poller);
4616 :
4617 7 : SPDK_DEBUGLOG(bdev, "Free QoS %p.\n", qos);
4618 :
4619 7 : free(qos);
4620 7 : }
4621 :
4622 : static int
4623 7 : bdev_qos_destroy(struct spdk_bdev *bdev)
4624 : {
4625 : int i;
4626 :
4627 : /*
4628 : * Cleanly shutting down the QoS poller is tricky, because
4629 : * during the asynchronous operation the user could open
4630 : * a new descriptor and create a new channel, spawning
4631 : * a new QoS poller.
4632 : *
4633 : * The strategy is to create a new QoS structure here and swap it
4634 : * in. The shutdown path then continues to refer to the old one
4635 : * until it completes and then releases it.
4636 : */
4637 : struct spdk_bdev_qos *new_qos, *old_qos;
4638 :
4639 7 : old_qos = bdev->internal.qos;
4640 :
4641 7 : new_qos = calloc(1, sizeof(*new_qos));
4642 7 : if (!new_qos) {
4643 0 : SPDK_ERRLOG("Unable to allocate memory to shut down QoS.\n");
4644 0 : return -ENOMEM;
4645 : }
4646 :
4647 : /* Copy the old QoS data into the newly allocated structure */
4648 7 : memcpy(new_qos, old_qos, sizeof(*new_qos));
4649 :
4650 : /* Zero out the key parts of the QoS structure */
4651 7 : new_qos->ch = NULL;
4652 7 : new_qos->thread = NULL;
4653 7 : new_qos->poller = NULL;
4654 : /*
4655 : * The limit member of spdk_bdev_qos_limit structure is not zeroed.
4656 : * It will be used later for the new QoS structure.
4657 : */
4658 35 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4659 28 : new_qos->rate_limits[i].remaining_this_timeslice = 0;
4660 28 : new_qos->rate_limits[i].min_per_timeslice = 0;
4661 28 : new_qos->rate_limits[i].max_per_timeslice = 0;
4662 28 : }
4663 :
4664 7 : bdev->internal.qos = new_qos;
4665 :
4666 7 : if (old_qos->thread == NULL) {
4667 0 : free(old_qos);
4668 0 : } else {
4669 7 : spdk_thread_send_msg(old_qos->thread, bdev_qos_channel_destroy, old_qos);
4670 : }
4671 :
4672 : /* It is safe to continue with destroying the bdev even though the QoS channel hasn't
4673 : * been destroyed yet. The destruction path will end up waiting for the final
4674 : * channel to be put before it releases resources. */
4675 :
4676 7 : return 0;
4677 7 : }
4678 :
4679 : void
4680 79 : spdk_bdev_add_io_stat(struct spdk_bdev_io_stat *total, struct spdk_bdev_io_stat *add)
4681 : {
4682 79 : total->bytes_read += add->bytes_read;
4683 79 : total->num_read_ops += add->num_read_ops;
4684 79 : total->bytes_written += add->bytes_written;
4685 79 : total->num_write_ops += add->num_write_ops;
4686 79 : total->bytes_unmapped += add->bytes_unmapped;
4687 79 : total->num_unmap_ops += add->num_unmap_ops;
4688 79 : total->bytes_copied += add->bytes_copied;
4689 79 : total->num_copy_ops += add->num_copy_ops;
4690 79 : total->read_latency_ticks += add->read_latency_ticks;
4691 79 : total->write_latency_ticks += add->write_latency_ticks;
4692 79 : total->unmap_latency_ticks += add->unmap_latency_ticks;
4693 79 : total->copy_latency_ticks += add->copy_latency_ticks;
4694 79 : if (total->max_read_latency_ticks < add->max_read_latency_ticks) {
4695 7 : total->max_read_latency_ticks = add->max_read_latency_ticks;
4696 7 : }
4697 79 : if (total->min_read_latency_ticks > add->min_read_latency_ticks) {
4698 39 : total->min_read_latency_ticks = add->min_read_latency_ticks;
4699 39 : }
4700 79 : if (total->max_write_latency_ticks < add->max_write_latency_ticks) {
4701 4 : total->max_write_latency_ticks = add->max_write_latency_ticks;
4702 4 : }
4703 79 : if (total->min_write_latency_ticks > add->min_write_latency_ticks) {
4704 24 : total->min_write_latency_ticks = add->min_write_latency_ticks;
4705 24 : }
4706 79 : if (total->max_unmap_latency_ticks < add->max_unmap_latency_ticks) {
4707 0 : total->max_unmap_latency_ticks = add->max_unmap_latency_ticks;
4708 0 : }
4709 79 : if (total->min_unmap_latency_ticks > add->min_unmap_latency_ticks) {
4710 3 : total->min_unmap_latency_ticks = add->min_unmap_latency_ticks;
4711 3 : }
4712 79 : if (total->max_copy_latency_ticks < add->max_copy_latency_ticks) {
4713 0 : total->max_copy_latency_ticks = add->max_copy_latency_ticks;
4714 0 : }
4715 79 : if (total->min_copy_latency_ticks > add->min_copy_latency_ticks) {
4716 4 : total->min_copy_latency_ticks = add->min_copy_latency_ticks;
4717 4 : }
4718 79 : }
4719 :
4720 : static void
4721 5 : bdev_get_io_stat(struct spdk_bdev_io_stat *to_stat, struct spdk_bdev_io_stat *from_stat)
4722 : {
4723 5 : memcpy(to_stat, from_stat, offsetof(struct spdk_bdev_io_stat, io_error));
4724 :
4725 5 : if (to_stat->io_error != NULL && from_stat->io_error != NULL) {
4726 0 : memcpy(to_stat->io_error, from_stat->io_error,
4727 : sizeof(struct spdk_bdev_io_error_stat));
4728 0 : }
4729 5 : }
4730 :
4731 : void
4732 216 : spdk_bdev_reset_io_stat(struct spdk_bdev_io_stat *stat, enum spdk_bdev_reset_stat_mode mode)
4733 : {
4734 216 : if (mode == SPDK_BDEV_RESET_STAT_NONE) {
4735 5 : return;
4736 : }
4737 :
4738 211 : stat->max_read_latency_ticks = 0;
4739 211 : stat->min_read_latency_ticks = UINT64_MAX;
4740 211 : stat->max_write_latency_ticks = 0;
4741 211 : stat->min_write_latency_ticks = UINT64_MAX;
4742 211 : stat->max_unmap_latency_ticks = 0;
4743 211 : stat->min_unmap_latency_ticks = UINT64_MAX;
4744 211 : stat->max_copy_latency_ticks = 0;
4745 211 : stat->min_copy_latency_ticks = UINT64_MAX;
4746 :
4747 211 : if (mode != SPDK_BDEV_RESET_STAT_ALL) {
4748 2 : return;
4749 : }
4750 :
4751 209 : stat->bytes_read = 0;
4752 209 : stat->num_read_ops = 0;
4753 209 : stat->bytes_written = 0;
4754 209 : stat->num_write_ops = 0;
4755 209 : stat->bytes_unmapped = 0;
4756 209 : stat->num_unmap_ops = 0;
4757 209 : stat->bytes_copied = 0;
4758 209 : stat->num_copy_ops = 0;
4759 209 : stat->read_latency_ticks = 0;
4760 209 : stat->write_latency_ticks = 0;
4761 209 : stat->unmap_latency_ticks = 0;
4762 209 : stat->copy_latency_ticks = 0;
4763 :
4764 209 : if (stat->io_error != NULL) {
4765 133 : memset(stat->io_error, 0, sizeof(struct spdk_bdev_io_error_stat));
4766 133 : }
4767 216 : }
4768 :
4769 : struct spdk_bdev_io_stat *
4770 207 : bdev_alloc_io_stat(bool io_error_stat)
4771 : {
4772 : struct spdk_bdev_io_stat *stat;
4773 :
4774 207 : stat = malloc(sizeof(struct spdk_bdev_io_stat));
4775 207 : if (stat == NULL) {
4776 0 : return NULL;
4777 : }
4778 :
4779 207 : if (io_error_stat) {
4780 132 : stat->io_error = malloc(sizeof(struct spdk_bdev_io_error_stat));
4781 132 : if (stat->io_error == NULL) {
4782 0 : free(stat);
4783 0 : return NULL;
4784 : }
4785 132 : } else {
4786 75 : stat->io_error = NULL;
4787 : }
4788 :
4789 207 : spdk_bdev_reset_io_stat(stat, SPDK_BDEV_RESET_STAT_ALL);
4790 :
4791 207 : return stat;
4792 207 : }
4793 :
4794 : void
4795 207 : bdev_free_io_stat(struct spdk_bdev_io_stat *stat)
4796 : {
4797 207 : if (stat != NULL) {
4798 207 : free(stat->io_error);
4799 207 : free(stat);
4800 207 : }
4801 207 : }
4802 :
4803 : void
4804 0 : spdk_bdev_dump_io_stat_json(struct spdk_bdev_io_stat *stat, struct spdk_json_write_ctx *w)
4805 : {
4806 : int i;
4807 :
4808 0 : spdk_json_write_named_uint64(w, "bytes_read", stat->bytes_read);
4809 0 : spdk_json_write_named_uint64(w, "num_read_ops", stat->num_read_ops);
4810 0 : spdk_json_write_named_uint64(w, "bytes_written", stat->bytes_written);
4811 0 : spdk_json_write_named_uint64(w, "num_write_ops", stat->num_write_ops);
4812 0 : spdk_json_write_named_uint64(w, "bytes_unmapped", stat->bytes_unmapped);
4813 0 : spdk_json_write_named_uint64(w, "num_unmap_ops", stat->num_unmap_ops);
4814 0 : spdk_json_write_named_uint64(w, "bytes_copied", stat->bytes_copied);
4815 0 : spdk_json_write_named_uint64(w, "num_copy_ops", stat->num_copy_ops);
4816 0 : spdk_json_write_named_uint64(w, "read_latency_ticks", stat->read_latency_ticks);
4817 0 : spdk_json_write_named_uint64(w, "max_read_latency_ticks", stat->max_read_latency_ticks);
4818 0 : spdk_json_write_named_uint64(w, "min_read_latency_ticks",
4819 0 : stat->min_read_latency_ticks != UINT64_MAX ?
4820 0 : stat->min_read_latency_ticks : 0);
4821 0 : spdk_json_write_named_uint64(w, "write_latency_ticks", stat->write_latency_ticks);
4822 0 : spdk_json_write_named_uint64(w, "max_write_latency_ticks", stat->max_write_latency_ticks);
4823 0 : spdk_json_write_named_uint64(w, "min_write_latency_ticks",
4824 0 : stat->min_write_latency_ticks != UINT64_MAX ?
4825 0 : stat->min_write_latency_ticks : 0);
4826 0 : spdk_json_write_named_uint64(w, "unmap_latency_ticks", stat->unmap_latency_ticks);
4827 0 : spdk_json_write_named_uint64(w, "max_unmap_latency_ticks", stat->max_unmap_latency_ticks);
4828 0 : spdk_json_write_named_uint64(w, "min_unmap_latency_ticks",
4829 0 : stat->min_unmap_latency_ticks != UINT64_MAX ?
4830 0 : stat->min_unmap_latency_ticks : 0);
4831 0 : spdk_json_write_named_uint64(w, "copy_latency_ticks", stat->copy_latency_ticks);
4832 0 : spdk_json_write_named_uint64(w, "max_copy_latency_ticks", stat->max_copy_latency_ticks);
4833 0 : spdk_json_write_named_uint64(w, "min_copy_latency_ticks",
4834 0 : stat->min_copy_latency_ticks != UINT64_MAX ?
4835 0 : stat->min_copy_latency_ticks : 0);
4836 :
4837 0 : if (stat->io_error != NULL) {
4838 0 : spdk_json_write_named_object_begin(w, "io_error");
4839 0 : for (i = 0; i < -SPDK_MIN_BDEV_IO_STATUS; i++) {
4840 0 : if (stat->io_error->error_status[i] != 0) {
4841 0 : spdk_json_write_named_uint32(w, bdev_io_status_get_string(-(i + 1)),
4842 0 : stat->io_error->error_status[i]);
4843 0 : }
4844 0 : }
4845 0 : spdk_json_write_object_end(w);
4846 0 : }
4847 0 : }
4848 :
4849 : static void
4850 79 : bdev_channel_abort_queued_ios(struct spdk_bdev_channel *ch)
4851 : {
4852 79 : struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
4853 79 : struct spdk_bdev_mgmt_channel *mgmt_ch = shared_resource->mgmt_ch;
4854 :
4855 79 : bdev_abort_all_queued_io(&shared_resource->nomem_io, ch);
4856 79 : bdev_abort_all_buf_io(mgmt_ch, ch);
4857 79 : }
4858 :
4859 : static void
4860 75 : bdev_channel_destroy(void *io_device, void *ctx_buf)
4861 : {
4862 75 : struct spdk_bdev_channel *ch = ctx_buf;
4863 :
4864 75 : SPDK_DEBUGLOG(bdev, "Destroying channel %p for bdev %s on thread %p\n", ch, ch->bdev->name,
4865 : spdk_get_thread());
4866 :
4867 75 : spdk_trace_record(TRACE_BDEV_IOCH_DESTROY, ch->bdev->internal.trace_id, 0, 0,
4868 : spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
4869 :
4870 : /* This channel is going away, so add its statistics into the bdev so that they don't get lost. */
4871 75 : spdk_spin_lock(&ch->bdev->internal.spinlock);
4872 75 : spdk_bdev_add_io_stat(ch->bdev->internal.stat, ch->stat);
4873 75 : spdk_spin_unlock(&ch->bdev->internal.spinlock);
4874 :
4875 75 : bdev_channel_abort_queued_ios(ch);
4876 :
4877 75 : if (ch->histogram) {
4878 0 : spdk_histogram_data_free(ch->histogram);
4879 0 : }
4880 :
4881 75 : bdev_channel_destroy_resource(ch);
4882 75 : }
4883 :
4884 : /*
4885 : * If the name already exists in the global bdev name tree, RB_INSERT() returns a pointer
4886 : * to it. Hence we do not have to call bdev_get_by_name() when using this function.
4887 : */
4888 : static int
4889 267 : bdev_name_add(struct spdk_bdev_name *bdev_name, struct spdk_bdev *bdev, const char *name)
4890 : {
4891 : struct spdk_bdev_name *tmp;
4892 :
4893 267 : bdev_name->name = strdup(name);
4894 267 : if (bdev_name->name == NULL) {
4895 0 : SPDK_ERRLOG("Unable to allocate bdev name\n");
4896 0 : return -ENOMEM;
4897 : }
4898 :
4899 267 : bdev_name->bdev = bdev;
4900 :
4901 267 : spdk_spin_lock(&g_bdev_mgr.spinlock);
4902 267 : tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
4903 267 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
4904 :
4905 267 : if (tmp != NULL) {
4906 4 : SPDK_ERRLOG("Bdev name %s already exists\n", name);
4907 4 : free(bdev_name->name);
4908 4 : return -EEXIST;
4909 : }
4910 :
4911 263 : return 0;
4912 267 : }
4913 :
4914 : static void
4915 263 : bdev_name_del_unsafe(struct spdk_bdev_name *bdev_name)
4916 : {
4917 263 : RB_REMOVE(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
4918 263 : free(bdev_name->name);
4919 263 : }
4920 :
4921 : static void
4922 5 : bdev_name_del(struct spdk_bdev_name *bdev_name)
4923 : {
4924 5 : spdk_spin_lock(&g_bdev_mgr.spinlock);
4925 5 : bdev_name_del_unsafe(bdev_name);
4926 5 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
4927 5 : }
4928 :
4929 : int
4930 138 : spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
4931 : {
4932 : struct spdk_bdev_alias *tmp;
4933 : int ret;
4934 :
4935 138 : if (alias == NULL) {
4936 1 : SPDK_ERRLOG("Empty alias passed\n");
4937 1 : return -EINVAL;
4938 : }
4939 :
4940 137 : tmp = calloc(1, sizeof(*tmp));
4941 137 : if (tmp == NULL) {
4942 0 : SPDK_ERRLOG("Unable to allocate alias\n");
4943 0 : return -ENOMEM;
4944 : }
4945 :
4946 137 : ret = bdev_name_add(&tmp->alias, bdev, alias);
4947 137 : if (ret != 0) {
4948 4 : free(tmp);
4949 4 : return ret;
4950 : }
4951 :
4952 133 : TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
4953 :
4954 133 : return 0;
4955 138 : }
4956 :
4957 : static int
4958 134 : bdev_alias_del(struct spdk_bdev *bdev, const char *alias,
4959 : void (*alias_del_fn)(struct spdk_bdev_name *n))
4960 : {
4961 : struct spdk_bdev_alias *tmp;
4962 :
4963 139 : TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
4964 135 : if (strcmp(alias, tmp->alias.name) == 0) {
4965 130 : TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
4966 130 : alias_del_fn(&tmp->alias);
4967 130 : free(tmp);
4968 130 : return 0;
4969 : }
4970 5 : }
4971 :
4972 4 : return -ENOENT;
4973 134 : }
4974 :
4975 : int
4976 4 : spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
4977 : {
4978 : int rc;
4979 :
4980 4 : rc = bdev_alias_del(bdev, alias, bdev_name_del);
4981 4 : if (rc == -ENOENT) {
4982 2 : SPDK_INFOLOG(bdev, "Alias %s does not exist\n", alias);
4983 2 : }
4984 :
4985 4 : return rc;
4986 : }
4987 :
4988 : void
4989 2 : spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
4990 : {
4991 : struct spdk_bdev_alias *p, *tmp;
4992 :
4993 5 : TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
4994 3 : TAILQ_REMOVE(&bdev->aliases, p, tailq);
4995 3 : bdev_name_del(&p->alias);
4996 3 : free(p);
4997 3 : }
4998 2 : }
4999 :
5000 : struct spdk_io_channel *
5001 77 : spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
5002 : {
5003 77 : return spdk_get_io_channel(__bdev_to_io_dev(spdk_bdev_desc_get_bdev(desc)));
5004 : }
5005 :
5006 : void *
5007 0 : spdk_bdev_get_module_ctx(struct spdk_bdev_desc *desc)
5008 : {
5009 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5010 0 : void *ctx = NULL;
5011 :
5012 0 : if (bdev->fn_table->get_module_ctx) {
5013 0 : ctx = bdev->fn_table->get_module_ctx(bdev->ctxt);
5014 0 : }
5015 :
5016 0 : return ctx;
5017 : }
5018 :
5019 : const char *
5020 0 : spdk_bdev_get_module_name(const struct spdk_bdev *bdev)
5021 : {
5022 0 : return bdev->module->name;
5023 : }
5024 :
5025 : const char *
5026 263 : spdk_bdev_get_name(const struct spdk_bdev *bdev)
5027 : {
5028 263 : return bdev->name;
5029 : }
5030 :
5031 : const char *
5032 0 : spdk_bdev_get_product_name(const struct spdk_bdev *bdev)
5033 : {
5034 0 : return bdev->product_name;
5035 : }
5036 :
5037 : const struct spdk_bdev_aliases_list *
5038 0 : spdk_bdev_get_aliases(const struct spdk_bdev *bdev)
5039 : {
5040 0 : return &bdev->aliases;
5041 : }
5042 :
5043 : uint32_t
5044 5 : spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
5045 : {
5046 5 : return bdev->blocklen;
5047 : }
5048 :
5049 : uint32_t
5050 0 : spdk_bdev_get_write_unit_size(const struct spdk_bdev *bdev)
5051 : {
5052 0 : return bdev->write_unit_size;
5053 : }
5054 :
5055 : uint64_t
5056 0 : spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
5057 : {
5058 0 : return bdev->blockcnt;
5059 : }
5060 :
5061 : const char *
5062 0 : spdk_bdev_get_qos_rpc_type(enum spdk_bdev_qos_rate_limit_type type)
5063 : {
5064 0 : return qos_rpc_type[type];
5065 : }
5066 :
5067 : void
5068 0 : spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
5069 : {
5070 : int i;
5071 :
5072 0 : memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
5073 :
5074 0 : spdk_spin_lock(&bdev->internal.spinlock);
5075 0 : if (bdev->internal.qos) {
5076 0 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
5077 0 : if (bdev->internal.qos->rate_limits[i].limit !=
5078 : SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
5079 0 : limits[i] = bdev->internal.qos->rate_limits[i].limit;
5080 0 : if (bdev_qos_is_iops_rate_limit(i) == false) {
5081 : /* Change from Byte to Megabyte which is user visible. */
5082 0 : limits[i] = limits[i] / 1024 / 1024;
5083 0 : }
5084 0 : }
5085 0 : }
5086 0 : }
5087 0 : spdk_spin_unlock(&bdev->internal.spinlock);
5088 0 : }
5089 :
5090 : size_t
5091 280 : spdk_bdev_get_buf_align(const struct spdk_bdev *bdev)
5092 : {
5093 280 : return 1 << bdev->required_alignment;
5094 : }
5095 :
5096 : uint32_t
5097 0 : spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
5098 : {
5099 0 : return bdev->optimal_io_boundary;
5100 : }
5101 :
5102 : bool
5103 0 : spdk_bdev_has_write_cache(const struct spdk_bdev *bdev)
5104 : {
5105 0 : return bdev->write_cache;
5106 : }
5107 :
5108 : const struct spdk_uuid *
5109 0 : spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
5110 : {
5111 0 : return &bdev->uuid;
5112 : }
5113 :
5114 : uint16_t
5115 0 : spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
5116 : {
5117 0 : return bdev->acwu;
5118 : }
5119 :
5120 : uint32_t
5121 29 : spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
5122 : {
5123 29 : return bdev->md_len;
5124 : }
5125 :
5126 : bool
5127 135 : spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
5128 : {
5129 135 : return (bdev->md_len != 0) && bdev->md_interleave;
5130 : }
5131 :
5132 : bool
5133 117 : spdk_bdev_is_md_separate(const struct spdk_bdev *bdev)
5134 : {
5135 117 : return (bdev->md_len != 0) && !bdev->md_interleave;
5136 : }
5137 :
5138 : bool
5139 0 : spdk_bdev_is_zoned(const struct spdk_bdev *bdev)
5140 : {
5141 0 : return bdev->zoned;
5142 : }
5143 :
5144 : uint32_t
5145 126 : spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
5146 : {
5147 126 : if (spdk_bdev_is_md_interleaved(bdev)) {
5148 0 : return bdev->blocklen - bdev->md_len;
5149 : } else {
5150 126 : return bdev->blocklen;
5151 : }
5152 126 : }
5153 :
5154 : uint32_t
5155 0 : spdk_bdev_get_physical_block_size(const struct spdk_bdev *bdev)
5156 : {
5157 0 : return bdev->phys_blocklen;
5158 : }
5159 :
5160 : static uint32_t
5161 9 : _bdev_get_block_size_with_md(const struct spdk_bdev *bdev)
5162 : {
5163 9 : if (!spdk_bdev_is_md_interleaved(bdev)) {
5164 6 : return bdev->blocklen + bdev->md_len;
5165 : } else {
5166 3 : return bdev->blocklen;
5167 : }
5168 9 : }
5169 :
5170 : /* We have to use the typedef in the function declaration to appease astyle. */
5171 : typedef enum spdk_dif_type spdk_dif_type_t;
5172 : typedef enum spdk_dif_pi_format spdk_dif_pi_format_t;
5173 :
5174 : spdk_dif_type_t
5175 0 : spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
5176 : {
5177 0 : if (bdev->md_len != 0) {
5178 0 : return bdev->dif_type;
5179 : } else {
5180 0 : return SPDK_DIF_DISABLE;
5181 : }
5182 0 : }
5183 :
5184 : spdk_dif_pi_format_t
5185 0 : spdk_bdev_get_dif_pi_format(const struct spdk_bdev *bdev)
5186 : {
5187 0 : return bdev->dif_pi_format;
5188 : }
5189 :
5190 : bool
5191 0 : spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
5192 : {
5193 0 : if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
5194 0 : return bdev->dif_is_head_of_md;
5195 : } else {
5196 0 : return false;
5197 : }
5198 0 : }
5199 :
5200 : bool
5201 0 : spdk_bdev_is_dif_check_enabled(const struct spdk_bdev *bdev,
5202 : enum spdk_dif_check_type check_type)
5203 : {
5204 0 : if (spdk_bdev_get_dif_type(bdev) == SPDK_DIF_DISABLE) {
5205 0 : return false;
5206 : }
5207 :
5208 0 : switch (check_type) {
5209 : case SPDK_DIF_CHECK_TYPE_REFTAG:
5210 0 : return (bdev->dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) != 0;
5211 : case SPDK_DIF_CHECK_TYPE_APPTAG:
5212 0 : return (bdev->dif_check_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) != 0;
5213 : case SPDK_DIF_CHECK_TYPE_GUARD:
5214 0 : return (bdev->dif_check_flags & SPDK_DIF_FLAGS_GUARD_CHECK) != 0;
5215 : default:
5216 0 : return false;
5217 : }
5218 0 : }
5219 :
5220 : static uint32_t
5221 3 : bdev_get_max_write(const struct spdk_bdev *bdev, uint64_t num_bytes)
5222 : {
5223 : uint64_t aligned_length, max_write_blocks;
5224 :
5225 3 : aligned_length = num_bytes - (spdk_bdev_get_buf_align(bdev) - 1);
5226 3 : max_write_blocks = aligned_length / _bdev_get_block_size_with_md(bdev);
5227 3 : max_write_blocks -= max_write_blocks % bdev->write_unit_size;
5228 :
5229 3 : return max_write_blocks;
5230 : }
5231 :
5232 : uint32_t
5233 1 : spdk_bdev_get_max_copy(const struct spdk_bdev *bdev)
5234 : {
5235 1 : return bdev->max_copy;
5236 : }
5237 :
5238 : uint64_t
5239 0 : spdk_bdev_get_qd(const struct spdk_bdev *bdev)
5240 : {
5241 0 : return bdev->internal.measured_queue_depth;
5242 : }
5243 :
5244 : uint64_t
5245 0 : spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev)
5246 : {
5247 0 : return bdev->internal.period;
5248 : }
5249 :
5250 : uint64_t
5251 0 : spdk_bdev_get_weighted_io_time(const struct spdk_bdev *bdev)
5252 : {
5253 0 : return bdev->internal.weighted_io_time;
5254 : }
5255 :
5256 : uint64_t
5257 0 : spdk_bdev_get_io_time(const struct spdk_bdev *bdev)
5258 : {
5259 0 : return bdev->internal.io_time;
5260 : }
5261 :
5262 0 : union spdk_bdev_nvme_ctratt spdk_bdev_get_nvme_ctratt(struct spdk_bdev *bdev)
5263 : {
5264 0 : return bdev->ctratt;
5265 : }
5266 :
5267 : uint32_t
5268 0 : spdk_bdev_get_nvme_nsid(struct spdk_bdev *bdev)
5269 : {
5270 0 : return bdev->nsid;
5271 : }
5272 :
5273 : uint32_t
5274 0 : spdk_bdev_desc_get_block_size(struct spdk_bdev_desc *desc)
5275 : {
5276 0 : struct spdk_bdev *bdev = desc->bdev;
5277 :
5278 0 : return desc->opts.hide_metadata ? bdev->blocklen - bdev->md_len : bdev->blocklen;
5279 : }
5280 :
5281 : uint32_t
5282 0 : spdk_bdev_desc_get_md_size(struct spdk_bdev_desc *desc)
5283 : {
5284 0 : struct spdk_bdev *bdev = desc->bdev;
5285 :
5286 0 : return desc->opts.hide_metadata ? 0 : bdev->md_len;
5287 : }
5288 :
5289 : bool
5290 0 : spdk_bdev_desc_is_md_interleaved(struct spdk_bdev_desc *desc)
5291 : {
5292 0 : struct spdk_bdev *bdev = desc->bdev;
5293 :
5294 0 : return desc->opts.hide_metadata ? false : spdk_bdev_is_md_interleaved(bdev);
5295 : }
5296 :
5297 : bool
5298 0 : spdk_bdev_desc_is_md_separate(struct spdk_bdev_desc *desc)
5299 : {
5300 0 : struct spdk_bdev *bdev = desc->bdev;
5301 :
5302 0 : return desc->opts.hide_metadata ? false : spdk_bdev_is_md_separate(bdev);
5303 : }
5304 :
5305 : spdk_dif_type_t
5306 0 : spdk_bdev_desc_get_dif_type(struct spdk_bdev_desc *desc)
5307 : {
5308 0 : struct spdk_bdev *bdev = desc->bdev;
5309 :
5310 0 : return desc->opts.hide_metadata ? SPDK_DIF_DISABLE : spdk_bdev_get_dif_type(bdev);
5311 : }
5312 :
5313 : spdk_dif_pi_format_t
5314 0 : spdk_bdev_desc_get_dif_pi_format(struct spdk_bdev_desc *desc)
5315 : {
5316 0 : struct spdk_bdev *bdev = desc->bdev;
5317 :
5318 0 : return desc->opts.hide_metadata ? SPDK_DIF_PI_FORMAT_16 : spdk_bdev_get_dif_pi_format(bdev);
5319 : }
5320 :
5321 : bool
5322 0 : spdk_bdev_desc_is_dif_head_of_md(struct spdk_bdev_desc *desc)
5323 : {
5324 0 : struct spdk_bdev *bdev = desc->bdev;
5325 :
5326 0 : return desc->opts.hide_metadata ? false : spdk_bdev_is_dif_head_of_md(bdev);
5327 : }
5328 :
5329 : bool
5330 0 : spdk_bdev_desc_is_dif_check_enabled(struct spdk_bdev_desc *desc,
5331 : enum spdk_dif_check_type check_type)
5332 : {
5333 0 : struct spdk_bdev *bdev = desc->bdev;
5334 :
5335 0 : return desc->opts.hide_metadata ? false : spdk_bdev_is_dif_check_enabled(bdev, check_type);
5336 : }
5337 :
5338 : static void bdev_update_qd_sampling_period(void *ctx);
5339 :
5340 : static void
5341 1 : _calculate_measured_qd_cpl(struct spdk_bdev *bdev, void *_ctx, int status)
5342 : {
5343 1 : bdev->internal.measured_queue_depth = bdev->internal.temporary_queue_depth;
5344 :
5345 1 : if (bdev->internal.measured_queue_depth) {
5346 0 : bdev->internal.io_time += bdev->internal.period;
5347 0 : bdev->internal.weighted_io_time += bdev->internal.period * bdev->internal.measured_queue_depth;
5348 0 : }
5349 :
5350 1 : bdev->internal.qd_poll_in_progress = false;
5351 :
5352 1 : bdev_update_qd_sampling_period(bdev);
5353 1 : }
5354 :
5355 : static void
5356 1 : _calculate_measured_qd(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
5357 : struct spdk_io_channel *io_ch, void *_ctx)
5358 : {
5359 1 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(io_ch);
5360 :
5361 1 : bdev->internal.temporary_queue_depth += ch->io_outstanding;
5362 1 : spdk_bdev_for_each_channel_continue(i, 0);
5363 1 : }
5364 :
5365 : static int
5366 1 : bdev_calculate_measured_queue_depth(void *ctx)
5367 : {
5368 1 : struct spdk_bdev *bdev = ctx;
5369 :
5370 1 : bdev->internal.qd_poll_in_progress = true;
5371 1 : bdev->internal.temporary_queue_depth = 0;
5372 1 : spdk_bdev_for_each_channel(bdev, _calculate_measured_qd, bdev, _calculate_measured_qd_cpl);
5373 1 : return SPDK_POLLER_BUSY;
5374 : }
5375 :
5376 : static void
5377 5 : bdev_update_qd_sampling_period(void *ctx)
5378 : {
5379 5 : struct spdk_bdev *bdev = ctx;
5380 :
5381 5 : if (bdev->internal.period == bdev->internal.new_period) {
5382 0 : return;
5383 : }
5384 :
5385 5 : if (bdev->internal.qd_poll_in_progress) {
5386 1 : return;
5387 : }
5388 :
5389 4 : bdev->internal.period = bdev->internal.new_period;
5390 :
5391 4 : spdk_poller_unregister(&bdev->internal.qd_poller);
5392 4 : if (bdev->internal.period != 0) {
5393 2 : bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth,
5394 : bdev, bdev->internal.period);
5395 2 : } else {
5396 2 : spdk_bdev_close(bdev->internal.qd_desc);
5397 2 : bdev->internal.qd_desc = NULL;
5398 : }
5399 5 : }
5400 :
5401 : static void
5402 0 : _tmp_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *ctx)
5403 : {
5404 0 : SPDK_NOTICELOG("Unexpected event type: %d\n", type);
5405 0 : }
5406 :
5407 : void
5408 135 : spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period)
5409 : {
5410 : int rc;
5411 :
5412 135 : if (bdev->internal.new_period == period) {
5413 129 : return;
5414 : }
5415 :
5416 6 : bdev->internal.new_period = period;
5417 :
5418 6 : if (bdev->internal.qd_desc != NULL) {
5419 4 : assert(bdev->internal.period != 0);
5420 :
5421 8 : spdk_thread_send_msg(bdev->internal.qd_desc->thread,
5422 4 : bdev_update_qd_sampling_period, bdev);
5423 4 : return;
5424 : }
5425 :
5426 2 : assert(bdev->internal.period == 0);
5427 :
5428 4 : rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), false, _tmp_bdev_event_cb,
5429 2 : NULL, &bdev->internal.qd_desc);
5430 2 : if (rc != 0) {
5431 0 : return;
5432 : }
5433 :
5434 2 : bdev->internal.period = period;
5435 2 : bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth,
5436 : bdev, period);
5437 135 : }
5438 :
5439 : struct bdev_get_current_qd_ctx {
5440 : uint64_t current_qd;
5441 : spdk_bdev_get_current_qd_cb cb_fn;
5442 : void *cb_arg;
5443 : };
5444 :
5445 : static void
5446 0 : bdev_get_current_qd_done(struct spdk_bdev *bdev, void *_ctx, int status)
5447 : {
5448 0 : struct bdev_get_current_qd_ctx *ctx = _ctx;
5449 :
5450 0 : ctx->cb_fn(bdev, ctx->current_qd, ctx->cb_arg, 0);
5451 :
5452 0 : free(ctx);
5453 0 : }
5454 :
5455 : static void
5456 0 : bdev_get_current_qd(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
5457 : struct spdk_io_channel *io_ch, void *_ctx)
5458 : {
5459 0 : struct bdev_get_current_qd_ctx *ctx = _ctx;
5460 0 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
5461 :
5462 0 : ctx->current_qd += bdev_ch->io_outstanding;
5463 :
5464 0 : spdk_bdev_for_each_channel_continue(i, 0);
5465 0 : }
5466 :
5467 : void
5468 0 : spdk_bdev_get_current_qd(struct spdk_bdev *bdev, spdk_bdev_get_current_qd_cb cb_fn,
5469 : void *cb_arg)
5470 : {
5471 : struct bdev_get_current_qd_ctx *ctx;
5472 :
5473 0 : assert(cb_fn != NULL);
5474 :
5475 0 : ctx = calloc(1, sizeof(*ctx));
5476 0 : if (ctx == NULL) {
5477 0 : cb_fn(bdev, 0, cb_arg, -ENOMEM);
5478 0 : return;
5479 : }
5480 :
5481 0 : ctx->cb_fn = cb_fn;
5482 0 : ctx->cb_arg = cb_arg;
5483 :
5484 0 : spdk_bdev_for_each_channel(bdev, bdev_get_current_qd, ctx, bdev_get_current_qd_done);
5485 0 : }
5486 :
5487 : static void
5488 25 : _event_notify(struct spdk_bdev_desc *desc, enum spdk_bdev_event_type type)
5489 : {
5490 25 : assert(desc->thread == spdk_get_thread());
5491 :
5492 25 : spdk_spin_lock(&desc->spinlock);
5493 25 : desc->refs--;
5494 25 : if (!desc->closed) {
5495 14 : spdk_spin_unlock(&desc->spinlock);
5496 28 : desc->callback.event_fn(type,
5497 14 : desc->bdev,
5498 14 : desc->callback.ctx);
5499 14 : return;
5500 11 : } else if (desc->refs == 0) {
5501 : /* This descriptor was closed after this event_notify message was sent.
5502 : * spdk_bdev_close() could not free the descriptor since this message was
5503 : * in flight, so we free it now using bdev_desc_free().
5504 : */
5505 10 : spdk_spin_unlock(&desc->spinlock);
5506 10 : bdev_desc_free(desc);
5507 10 : return;
5508 : }
5509 1 : spdk_spin_unlock(&desc->spinlock);
5510 25 : }
5511 :
5512 : static void
5513 25 : event_notify(struct spdk_bdev_desc *desc, spdk_msg_fn event_notify_fn)
5514 : {
5515 25 : spdk_spin_lock(&desc->spinlock);
5516 25 : desc->refs++;
5517 25 : spdk_thread_send_msg(desc->thread, event_notify_fn, desc);
5518 25 : spdk_spin_unlock(&desc->spinlock);
5519 25 : }
5520 :
5521 : static void
5522 6 : _resize_notify(void *ctx)
5523 : {
5524 6 : struct spdk_bdev_desc *desc = ctx;
5525 :
5526 6 : _event_notify(desc, SPDK_BDEV_EVENT_RESIZE);
5527 6 : }
5528 :
5529 : int
5530 11 : spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
5531 : {
5532 : struct spdk_bdev_desc *desc;
5533 : int ret;
5534 :
5535 11 : if (size == bdev->blockcnt) {
5536 0 : return 0;
5537 : }
5538 :
5539 11 : spdk_spin_lock(&bdev->internal.spinlock);
5540 :
5541 : /* bdev has open descriptors */
5542 11 : if (!TAILQ_EMPTY(&bdev->internal.open_descs) &&
5543 7 : bdev->blockcnt > size) {
5544 1 : ret = -EBUSY;
5545 1 : } else {
5546 10 : bdev->blockcnt = size;
5547 16 : TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
5548 6 : event_notify(desc, _resize_notify);
5549 6 : }
5550 10 : ret = 0;
5551 : }
5552 :
5553 11 : spdk_spin_unlock(&bdev->internal.spinlock);
5554 :
5555 11 : return ret;
5556 11 : }
5557 :
5558 : /*
5559 : * Convert I/O offset and length from bytes to blocks.
5560 : *
5561 : * Returns zero on success or non-zero if the byte parameters aren't divisible by the block size.
5562 : */
5563 : static uint64_t
5564 20 : bdev_bytes_to_blocks(struct spdk_bdev_desc *desc, uint64_t offset_bytes,
5565 : uint64_t *offset_blocks, uint64_t num_bytes, uint64_t *num_blocks)
5566 : {
5567 20 : uint32_t block_size = bdev_desc_get_block_size(desc);
5568 : uint8_t shift_cnt;
5569 :
5570 : /* Avoid expensive div operations if possible. These spdk_u32 functions are very cheap. */
5571 20 : if (spdk_likely(spdk_u32_is_pow2(block_size))) {
5572 17 : shift_cnt = spdk_u32log2(block_size);
5573 17 : *offset_blocks = offset_bytes >> shift_cnt;
5574 17 : *num_blocks = num_bytes >> shift_cnt;
5575 34 : return (offset_bytes - (*offset_blocks << shift_cnt)) |
5576 17 : (num_bytes - (*num_blocks << shift_cnt));
5577 : } else {
5578 3 : *offset_blocks = offset_bytes / block_size;
5579 3 : *num_blocks = num_bytes / block_size;
5580 3 : return (offset_bytes % block_size) | (num_bytes % block_size);
5581 : }
5582 20 : }
5583 :
5584 : static bool
5585 689 : bdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t offset_blocks, uint64_t num_blocks)
5586 : {
5587 : /* Return failure if offset_blocks + num_blocks is less than offset_blocks; indicates there
5588 : * has been an overflow and hence the offset has been wrapped around */
5589 689 : if (offset_blocks + num_blocks < offset_blocks) {
5590 1 : return false;
5591 : }
5592 :
5593 : /* Return failure if offset_blocks + num_blocks exceeds the size of the bdev */
5594 688 : if (offset_blocks + num_blocks > bdev->blockcnt) {
5595 2 : return false;
5596 : }
5597 :
5598 686 : return true;
5599 689 : }
5600 :
5601 : static void
5602 2 : bdev_seek_complete_cb(void *ctx)
5603 : {
5604 2 : struct spdk_bdev_io *bdev_io = ctx;
5605 :
5606 2 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5607 2 : bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
5608 2 : }
5609 :
5610 : static int
5611 4 : bdev_seek(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5612 : uint64_t offset_blocks, enum spdk_bdev_io_type io_type,
5613 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5614 : {
5615 4 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5616 : struct spdk_bdev_io *bdev_io;
5617 4 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5618 :
5619 4 : assert(io_type == SPDK_BDEV_IO_TYPE_SEEK_DATA || io_type == SPDK_BDEV_IO_TYPE_SEEK_HOLE);
5620 :
5621 : /* Check if offset_blocks is valid looking at the validity of one block */
5622 4 : if (!bdev_io_valid_blocks(bdev, offset_blocks, 1)) {
5623 0 : return -EINVAL;
5624 : }
5625 :
5626 4 : bdev_io = bdev_channel_get_io(channel);
5627 4 : if (!bdev_io) {
5628 0 : return -ENOMEM;
5629 : }
5630 :
5631 4 : bdev_io->internal.ch = channel;
5632 4 : bdev_io->internal.desc = desc;
5633 4 : bdev_io->type = io_type;
5634 4 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5635 4 : bdev_io->u.bdev.memory_domain = NULL;
5636 4 : bdev_io->u.bdev.memory_domain_ctx = NULL;
5637 4 : bdev_io->u.bdev.accel_sequence = NULL;
5638 4 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5639 :
5640 4 : if (!spdk_bdev_io_type_supported(bdev, io_type)) {
5641 : /* In case bdev doesn't support seek to next data/hole offset,
5642 : * it is assumed that only data and no holes are present */
5643 2 : if (io_type == SPDK_BDEV_IO_TYPE_SEEK_DATA) {
5644 1 : bdev_io->u.bdev.seek.offset = offset_blocks;
5645 1 : } else {
5646 1 : bdev_io->u.bdev.seek.offset = UINT64_MAX;
5647 : }
5648 :
5649 2 : spdk_thread_send_msg(spdk_get_thread(), bdev_seek_complete_cb, bdev_io);
5650 2 : return 0;
5651 : }
5652 :
5653 2 : bdev_io_submit(bdev_io);
5654 2 : return 0;
5655 4 : }
5656 :
5657 : int
5658 2 : spdk_bdev_seek_data(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5659 : uint64_t offset_blocks,
5660 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5661 : {
5662 2 : return bdev_seek(desc, ch, offset_blocks, SPDK_BDEV_IO_TYPE_SEEK_DATA, cb, cb_arg);
5663 : }
5664 :
5665 : int
5666 2 : spdk_bdev_seek_hole(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5667 : uint64_t offset_blocks,
5668 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5669 : {
5670 2 : return bdev_seek(desc, ch, offset_blocks, SPDK_BDEV_IO_TYPE_SEEK_HOLE, cb, cb_arg);
5671 : }
5672 :
5673 : uint64_t
5674 4 : spdk_bdev_io_get_seek_offset(const struct spdk_bdev_io *bdev_io)
5675 : {
5676 4 : return bdev_io->u.bdev.seek.offset;
5677 : }
5678 :
5679 : static int
5680 204 : bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
5681 : void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5682 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5683 : {
5684 204 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5685 : struct spdk_bdev_io *bdev_io;
5686 204 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5687 :
5688 204 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5689 0 : return -EINVAL;
5690 : }
5691 :
5692 204 : bdev_io = bdev_channel_get_io(channel);
5693 204 : if (!bdev_io) {
5694 1 : return -ENOMEM;
5695 : }
5696 :
5697 203 : bdev_io->internal.ch = channel;
5698 203 : bdev_io->internal.desc = desc;
5699 203 : bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
5700 203 : bdev_io->u.bdev.iovs = &bdev_io->iov;
5701 203 : bdev_io->u.bdev.iovs[0].iov_base = buf;
5702 203 : bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev_desc_get_block_size(desc);
5703 203 : bdev_io->u.bdev.iovcnt = 1;
5704 203 : bdev_io->u.bdev.md_buf = md_buf;
5705 203 : bdev_io->u.bdev.num_blocks = num_blocks;
5706 203 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5707 203 : bdev_io->u.bdev.memory_domain = NULL;
5708 203 : bdev_io->u.bdev.memory_domain_ctx = NULL;
5709 203 : bdev_io->u.bdev.accel_sequence = NULL;
5710 203 : bdev_io->u.bdev.dif_check_flags = bdev->dif_check_flags;
5711 203 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5712 :
5713 203 : bdev_io_submit(bdev_io);
5714 203 : return 0;
5715 204 : }
5716 :
5717 : int
5718 3 : spdk_bdev_read(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5719 : void *buf, uint64_t offset, uint64_t nbytes,
5720 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5721 : {
5722 : uint64_t offset_blocks, num_blocks;
5723 :
5724 3 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
5725 0 : return -EINVAL;
5726 : }
5727 :
5728 3 : return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
5729 3 : }
5730 :
5731 : int
5732 200 : spdk_bdev_read_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5733 : void *buf, uint64_t offset_blocks, uint64_t num_blocks,
5734 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5735 : {
5736 200 : return bdev_read_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks, cb, cb_arg);
5737 : }
5738 :
5739 : int
5740 4 : spdk_bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5741 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5742 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5743 : {
5744 8 : struct iovec iov = {
5745 4 : .iov_base = buf,
5746 : };
5747 :
5748 4 : if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
5749 0 : return -EINVAL;
5750 : }
5751 :
5752 4 : if ((md_buf || desc->opts.hide_metadata) && !_is_buf_allocated(&iov)) {
5753 0 : return -EINVAL;
5754 : }
5755 :
5756 8 : return bdev_read_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
5757 4 : cb, cb_arg);
5758 4 : }
5759 :
5760 : int
5761 5 : spdk_bdev_readv(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5762 : struct iovec *iov, int iovcnt,
5763 : uint64_t offset, uint64_t nbytes,
5764 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5765 : {
5766 : uint64_t offset_blocks, num_blocks;
5767 :
5768 5 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
5769 0 : return -EINVAL;
5770 : }
5771 :
5772 5 : return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
5773 5 : }
5774 :
5775 : static int
5776 226 : bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5777 : struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
5778 : uint64_t num_blocks, struct spdk_memory_domain *domain, void *domain_ctx,
5779 : struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
5780 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5781 : {
5782 226 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5783 : struct spdk_bdev_io *bdev_io;
5784 226 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5785 :
5786 226 : if (spdk_unlikely(!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks))) {
5787 0 : return -EINVAL;
5788 : }
5789 :
5790 226 : bdev_io = bdev_channel_get_io(channel);
5791 226 : if (spdk_unlikely(!bdev_io)) {
5792 2 : return -ENOMEM;
5793 : }
5794 :
5795 224 : bdev_io->internal.ch = channel;
5796 224 : bdev_io->internal.desc = desc;
5797 224 : bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
5798 224 : bdev_io->u.bdev.iovs = iov;
5799 224 : bdev_io->u.bdev.iovcnt = iovcnt;
5800 224 : bdev_io->u.bdev.md_buf = md_buf;
5801 224 : bdev_io->u.bdev.num_blocks = num_blocks;
5802 224 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5803 224 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5804 :
5805 224 : if (seq != NULL) {
5806 0 : bdev_io->internal.f.has_accel_sequence = true;
5807 0 : bdev_io->internal.accel_sequence = seq;
5808 0 : }
5809 :
5810 224 : if (domain != NULL) {
5811 2 : bdev_io->internal.f.has_memory_domain = true;
5812 2 : bdev_io->internal.memory_domain = domain;
5813 2 : bdev_io->internal.memory_domain_ctx = domain_ctx;
5814 2 : }
5815 :
5816 224 : bdev_io->u.bdev.memory_domain = domain;
5817 224 : bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
5818 224 : bdev_io->u.bdev.accel_sequence = seq;
5819 224 : bdev_io->u.bdev.dif_check_flags = dif_check_flags;
5820 :
5821 224 : _bdev_io_submit_ext(desc, bdev_io);
5822 :
5823 224 : return 0;
5824 226 : }
5825 :
5826 : int
5827 21 : spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5828 : struct iovec *iov, int iovcnt,
5829 : uint64_t offset_blocks, uint64_t num_blocks,
5830 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5831 : {
5832 21 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5833 :
5834 42 : return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
5835 21 : num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, cb, cb_arg);
5836 : }
5837 :
5838 : int
5839 4 : spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5840 : struct iovec *iov, int iovcnt, void *md_buf,
5841 : uint64_t offset_blocks, uint64_t num_blocks,
5842 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5843 : {
5844 4 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5845 :
5846 4 : if (md_buf && !spdk_bdev_is_md_separate(bdev)) {
5847 0 : return -EINVAL;
5848 : }
5849 :
5850 4 : if (md_buf && !_is_buf_allocated(iov)) {
5851 0 : return -EINVAL;
5852 : }
5853 :
5854 8 : return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
5855 4 : num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, cb, cb_arg);
5856 4 : }
5857 :
5858 : static inline bool
5859 14 : _bdev_io_check_opts(struct spdk_bdev_ext_io_opts *opts, struct iovec *iov)
5860 : {
5861 : /*
5862 : * We check if opts size is at least of size when we first introduced
5863 : * spdk_bdev_ext_io_opts (ac6f2bdd8d) since access to those members
5864 : * are not checked internal.
5865 : */
5866 24 : return opts->size >= offsetof(struct spdk_bdev_ext_io_opts, metadata) +
5867 14 : sizeof(opts->metadata) &&
5868 10 : opts->size <= sizeof(*opts) &&
5869 : /* When memory domain is used, the user must provide data buffers */
5870 8 : (!opts->memory_domain || (iov && iov[0].iov_base));
5871 : }
5872 :
5873 : int
5874 8 : spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5875 : struct iovec *iov, int iovcnt,
5876 : uint64_t offset_blocks, uint64_t num_blocks,
5877 : spdk_bdev_io_completion_cb cb, void *cb_arg,
5878 : struct spdk_bdev_ext_io_opts *opts)
5879 : {
5880 8 : struct spdk_memory_domain *domain = NULL;
5881 8 : struct spdk_accel_sequence *seq = NULL;
5882 8 : void *domain_ctx = NULL, *md = NULL;
5883 8 : uint32_t dif_check_flags = 0;
5884 8 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5885 :
5886 8 : if (opts) {
5887 7 : if (spdk_unlikely(!_bdev_io_check_opts(opts, iov))) {
5888 3 : return -EINVAL;
5889 : }
5890 :
5891 4 : md = opts->metadata;
5892 4 : domain = bdev_get_ext_io_opt(opts, memory_domain, NULL);
5893 4 : domain_ctx = bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL);
5894 4 : seq = bdev_get_ext_io_opt(opts, accel_sequence, NULL);
5895 4 : if (md) {
5896 4 : if (spdk_unlikely(!spdk_bdev_is_md_separate(bdev))) {
5897 0 : return -EINVAL;
5898 : }
5899 :
5900 4 : if (spdk_unlikely(!_is_buf_allocated(iov))) {
5901 0 : return -EINVAL;
5902 : }
5903 :
5904 4 : if (spdk_unlikely(seq != NULL)) {
5905 0 : return -EINVAL;
5906 : }
5907 4 : }
5908 4 : }
5909 :
5910 10 : dif_check_flags = bdev->dif_check_flags &
5911 5 : ~(bdev_get_ext_io_opt(opts, dif_check_flags_exclude_mask, 0));
5912 :
5913 10 : return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks,
5914 5 : num_blocks, domain, domain_ctx, seq, dif_check_flags, cb, cb_arg);
5915 8 : }
5916 :
5917 : static int
5918 36 : bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5919 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5920 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5921 : {
5922 36 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5923 : struct spdk_bdev_io *bdev_io;
5924 36 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5925 :
5926 36 : if (!desc->write) {
5927 0 : return -EBADF;
5928 : }
5929 :
5930 36 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5931 0 : return -EINVAL;
5932 : }
5933 :
5934 36 : bdev_io = bdev_channel_get_io(channel);
5935 36 : if (!bdev_io) {
5936 0 : return -ENOMEM;
5937 : }
5938 :
5939 36 : bdev_io->internal.ch = channel;
5940 36 : bdev_io->internal.desc = desc;
5941 36 : bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
5942 36 : bdev_io->u.bdev.iovs = &bdev_io->iov;
5943 36 : bdev_io->u.bdev.iovs[0].iov_base = buf;
5944 36 : bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev_desc_get_block_size(desc);
5945 36 : bdev_io->u.bdev.iovcnt = 1;
5946 36 : bdev_io->u.bdev.md_buf = md_buf;
5947 36 : bdev_io->u.bdev.num_blocks = num_blocks;
5948 36 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5949 36 : bdev_io->u.bdev.memory_domain = NULL;
5950 36 : bdev_io->u.bdev.memory_domain_ctx = NULL;
5951 36 : bdev_io->u.bdev.accel_sequence = NULL;
5952 36 : bdev_io->u.bdev.dif_check_flags = bdev->dif_check_flags;
5953 36 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5954 :
5955 36 : bdev_io_submit(bdev_io);
5956 36 : return 0;
5957 36 : }
5958 :
5959 : int
5960 3 : spdk_bdev_write(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5961 : void *buf, uint64_t offset, uint64_t nbytes,
5962 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5963 : {
5964 : uint64_t offset_blocks, num_blocks;
5965 :
5966 3 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
5967 0 : return -EINVAL;
5968 : }
5969 :
5970 3 : return spdk_bdev_write_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
5971 3 : }
5972 :
5973 : int
5974 27 : spdk_bdev_write_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5975 : void *buf, uint64_t offset_blocks, uint64_t num_blocks,
5976 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5977 : {
5978 54 : return bdev_write_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
5979 27 : cb, cb_arg);
5980 : }
5981 :
5982 : int
5983 3 : spdk_bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5984 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5985 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5986 : {
5987 6 : struct iovec iov = {
5988 3 : .iov_base = buf,
5989 : };
5990 :
5991 3 : if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
5992 0 : return -EINVAL;
5993 : }
5994 :
5995 3 : if (md_buf && !_is_buf_allocated(&iov)) {
5996 0 : return -EINVAL;
5997 : }
5998 :
5999 6 : return bdev_write_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
6000 3 : cb, cb_arg);
6001 3 : }
6002 :
6003 : static int
6004 70 : bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6005 : struct iovec *iov, int iovcnt, void *md_buf,
6006 : uint64_t offset_blocks, uint64_t num_blocks,
6007 : struct spdk_memory_domain *domain, void *domain_ctx,
6008 : struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
6009 : uint32_t nvme_cdw12_raw, uint32_t nvme_cdw13_raw,
6010 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6011 : {
6012 70 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6013 : struct spdk_bdev_io *bdev_io;
6014 70 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6015 :
6016 70 : if (spdk_unlikely(!desc->write)) {
6017 0 : return -EBADF;
6018 : }
6019 :
6020 70 : if (spdk_unlikely(!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks))) {
6021 0 : return -EINVAL;
6022 : }
6023 :
6024 70 : bdev_io = bdev_channel_get_io(channel);
6025 70 : if (spdk_unlikely(!bdev_io)) {
6026 2 : return -ENOMEM;
6027 : }
6028 :
6029 68 : bdev_io->internal.ch = channel;
6030 68 : bdev_io->internal.desc = desc;
6031 68 : bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
6032 68 : bdev_io->u.bdev.iovs = iov;
6033 68 : bdev_io->u.bdev.iovcnt = iovcnt;
6034 68 : bdev_io->u.bdev.md_buf = md_buf;
6035 68 : bdev_io->u.bdev.num_blocks = num_blocks;
6036 68 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6037 68 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6038 68 : if (seq != NULL) {
6039 0 : bdev_io->internal.f.has_accel_sequence = true;
6040 0 : bdev_io->internal.accel_sequence = seq;
6041 0 : }
6042 :
6043 68 : if (domain != NULL) {
6044 2 : bdev_io->internal.f.has_memory_domain = true;
6045 2 : bdev_io->internal.memory_domain = domain;
6046 2 : bdev_io->internal.memory_domain_ctx = domain_ctx;
6047 2 : }
6048 :
6049 68 : bdev_io->u.bdev.memory_domain = domain;
6050 68 : bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
6051 68 : bdev_io->u.bdev.accel_sequence = seq;
6052 68 : bdev_io->u.bdev.dif_check_flags = dif_check_flags;
6053 68 : bdev_io->u.bdev.nvme_cdw12.raw = nvme_cdw12_raw;
6054 68 : bdev_io->u.bdev.nvme_cdw13.raw = nvme_cdw13_raw;
6055 :
6056 68 : _bdev_io_submit_ext(desc, bdev_io);
6057 :
6058 68 : return 0;
6059 70 : }
6060 :
6061 : int
6062 3 : spdk_bdev_writev(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6063 : struct iovec *iov, int iovcnt,
6064 : uint64_t offset, uint64_t len,
6065 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6066 : {
6067 : uint64_t offset_blocks, num_blocks;
6068 :
6069 3 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, len, &num_blocks) != 0) {
6070 0 : return -EINVAL;
6071 : }
6072 :
6073 3 : return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
6074 3 : }
6075 :
6076 : int
6077 14 : spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6078 : struct iovec *iov, int iovcnt,
6079 : uint64_t offset_blocks, uint64_t num_blocks,
6080 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6081 : {
6082 14 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6083 :
6084 28 : return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
6085 14 : num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, 0, 0,
6086 14 : cb, cb_arg);
6087 : }
6088 :
6089 : int
6090 1 : spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6091 : struct iovec *iov, int iovcnt, void *md_buf,
6092 : uint64_t offset_blocks, uint64_t num_blocks,
6093 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6094 : {
6095 1 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6096 :
6097 1 : if (md_buf && !spdk_bdev_is_md_separate(bdev)) {
6098 0 : return -EINVAL;
6099 : }
6100 :
6101 1 : if (md_buf && !_is_buf_allocated(iov)) {
6102 0 : return -EINVAL;
6103 : }
6104 :
6105 2 : return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
6106 1 : num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, 0, 0,
6107 1 : cb, cb_arg);
6108 1 : }
6109 :
6110 : int
6111 8 : spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6112 : struct iovec *iov, int iovcnt,
6113 : uint64_t offset_blocks, uint64_t num_blocks,
6114 : spdk_bdev_io_completion_cb cb, void *cb_arg,
6115 : struct spdk_bdev_ext_io_opts *opts)
6116 : {
6117 8 : struct spdk_memory_domain *domain = NULL;
6118 8 : struct spdk_accel_sequence *seq = NULL;
6119 8 : void *domain_ctx = NULL, *md = NULL;
6120 8 : uint32_t dif_check_flags = 0;
6121 8 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6122 8 : uint32_t nvme_cdw12_raw = 0;
6123 8 : uint32_t nvme_cdw13_raw = 0;
6124 :
6125 8 : if (opts) {
6126 7 : if (spdk_unlikely(!_bdev_io_check_opts(opts, iov))) {
6127 3 : return -EINVAL;
6128 : }
6129 4 : md = opts->metadata;
6130 4 : domain = bdev_get_ext_io_opt(opts, memory_domain, NULL);
6131 4 : domain_ctx = bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL);
6132 4 : seq = bdev_get_ext_io_opt(opts, accel_sequence, NULL);
6133 4 : nvme_cdw12_raw = bdev_get_ext_io_opt(opts, nvme_cdw12.raw, 0);
6134 4 : nvme_cdw13_raw = bdev_get_ext_io_opt(opts, nvme_cdw13.raw, 0);
6135 4 : if (md) {
6136 4 : if (spdk_unlikely(!spdk_bdev_is_md_separate(bdev))) {
6137 0 : return -EINVAL;
6138 : }
6139 :
6140 4 : if (spdk_unlikely(!_is_buf_allocated(iov))) {
6141 0 : return -EINVAL;
6142 : }
6143 :
6144 4 : if (spdk_unlikely(seq != NULL)) {
6145 0 : return -EINVAL;
6146 : }
6147 4 : }
6148 4 : }
6149 :
6150 10 : dif_check_flags = bdev->dif_check_flags &
6151 5 : ~(bdev_get_ext_io_opt(opts, dif_check_flags_exclude_mask, 0));
6152 :
6153 10 : return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, num_blocks,
6154 5 : domain, domain_ctx, seq, dif_check_flags,
6155 5 : nvme_cdw12_raw, nvme_cdw13_raw, cb, cb_arg);
6156 8 : }
6157 :
6158 : static void
6159 11 : bdev_compare_do_read_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6160 : {
6161 11 : struct spdk_bdev_io *parent_io = cb_arg;
6162 11 : struct spdk_bdev *bdev = parent_io->bdev;
6163 11 : uint8_t *read_buf = bdev_io->u.bdev.iovs[0].iov_base;
6164 11 : int i, rc = 0;
6165 :
6166 11 : if (!success) {
6167 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
6168 0 : parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
6169 0 : spdk_bdev_free_io(bdev_io);
6170 0 : return;
6171 : }
6172 :
6173 17 : for (i = 0; i < parent_io->u.bdev.iovcnt; i++) {
6174 22 : rc = memcmp(read_buf,
6175 11 : parent_io->u.bdev.iovs[i].iov_base,
6176 11 : parent_io->u.bdev.iovs[i].iov_len);
6177 11 : if (rc) {
6178 5 : break;
6179 : }
6180 6 : read_buf += parent_io->u.bdev.iovs[i].iov_len;
6181 6 : }
6182 :
6183 11 : if (rc == 0 && parent_io->u.bdev.md_buf && spdk_bdev_is_md_separate(bdev)) {
6184 4 : rc = memcmp(bdev_io->u.bdev.md_buf,
6185 2 : parent_io->u.bdev.md_buf,
6186 2 : spdk_bdev_get_md_size(bdev));
6187 2 : }
6188 :
6189 11 : spdk_bdev_free_io(bdev_io);
6190 :
6191 11 : if (rc == 0) {
6192 5 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
6193 5 : parent_io->internal.cb(parent_io, true, parent_io->internal.caller_ctx);
6194 5 : } else {
6195 6 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
6196 6 : parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
6197 : }
6198 11 : }
6199 :
6200 : static void
6201 11 : bdev_compare_do_read(void *_bdev_io)
6202 : {
6203 11 : struct spdk_bdev_io *bdev_io = _bdev_io;
6204 : int rc;
6205 :
6206 22 : rc = spdk_bdev_read_blocks(bdev_io->internal.desc,
6207 11 : spdk_io_channel_from_ctx(bdev_io->internal.ch), NULL,
6208 11 : bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6209 11 : bdev_compare_do_read_done, bdev_io);
6210 :
6211 11 : if (rc == -ENOMEM) {
6212 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_do_read);
6213 11 : } else if (rc != 0) {
6214 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
6215 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
6216 0 : }
6217 11 : }
6218 :
6219 : static int
6220 16 : bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6221 : struct iovec *iov, int iovcnt, void *md_buf,
6222 : uint64_t offset_blocks, uint64_t num_blocks,
6223 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6224 : {
6225 16 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6226 : struct spdk_bdev_io *bdev_io;
6227 16 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6228 :
6229 16 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6230 0 : return -EINVAL;
6231 : }
6232 :
6233 16 : bdev_io = bdev_channel_get_io(channel);
6234 16 : if (!bdev_io) {
6235 0 : return -ENOMEM;
6236 : }
6237 :
6238 16 : bdev_io->internal.ch = channel;
6239 16 : bdev_io->internal.desc = desc;
6240 16 : bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
6241 16 : bdev_io->u.bdev.iovs = iov;
6242 16 : bdev_io->u.bdev.iovcnt = iovcnt;
6243 16 : bdev_io->u.bdev.md_buf = md_buf;
6244 16 : bdev_io->u.bdev.num_blocks = num_blocks;
6245 16 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6246 16 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6247 16 : bdev_io->u.bdev.memory_domain = NULL;
6248 16 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6249 16 : bdev_io->u.bdev.accel_sequence = NULL;
6250 :
6251 16 : if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
6252 7 : bdev_io_submit(bdev_io);
6253 7 : return 0;
6254 : }
6255 :
6256 9 : bdev_compare_do_read(bdev_io);
6257 :
6258 9 : return 0;
6259 16 : }
6260 :
6261 : int
6262 10 : spdk_bdev_comparev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6263 : struct iovec *iov, int iovcnt,
6264 : uint64_t offset_blocks, uint64_t num_blocks,
6265 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6266 : {
6267 20 : return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
6268 10 : num_blocks, cb, cb_arg);
6269 : }
6270 :
6271 : int
6272 6 : spdk_bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6273 : struct iovec *iov, int iovcnt, void *md_buf,
6274 : uint64_t offset_blocks, uint64_t num_blocks,
6275 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6276 : {
6277 6 : if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
6278 0 : return -EINVAL;
6279 : }
6280 :
6281 6 : if (md_buf && !_is_buf_allocated(iov)) {
6282 0 : return -EINVAL;
6283 : }
6284 :
6285 12 : return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
6286 6 : num_blocks, cb, cb_arg);
6287 6 : }
6288 :
6289 : static int
6290 4 : bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6291 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
6292 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6293 : {
6294 4 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6295 : struct spdk_bdev_io *bdev_io;
6296 4 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6297 :
6298 4 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6299 0 : return -EINVAL;
6300 : }
6301 :
6302 4 : bdev_io = bdev_channel_get_io(channel);
6303 4 : if (!bdev_io) {
6304 0 : return -ENOMEM;
6305 : }
6306 :
6307 4 : bdev_io->internal.ch = channel;
6308 4 : bdev_io->internal.desc = desc;
6309 4 : bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
6310 4 : bdev_io->u.bdev.iovs = &bdev_io->iov;
6311 4 : bdev_io->u.bdev.iovs[0].iov_base = buf;
6312 4 : bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev_desc_get_block_size(desc);
6313 4 : bdev_io->u.bdev.iovcnt = 1;
6314 4 : bdev_io->u.bdev.md_buf = md_buf;
6315 4 : bdev_io->u.bdev.num_blocks = num_blocks;
6316 4 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6317 4 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6318 4 : bdev_io->u.bdev.memory_domain = NULL;
6319 4 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6320 4 : bdev_io->u.bdev.accel_sequence = NULL;
6321 :
6322 4 : if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
6323 2 : bdev_io_submit(bdev_io);
6324 2 : return 0;
6325 : }
6326 :
6327 2 : bdev_compare_do_read(bdev_io);
6328 :
6329 2 : return 0;
6330 4 : }
6331 :
6332 : int
6333 4 : spdk_bdev_compare_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6334 : void *buf, uint64_t offset_blocks, uint64_t num_blocks,
6335 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6336 : {
6337 8 : return bdev_compare_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
6338 4 : cb, cb_arg);
6339 : }
6340 :
6341 : int
6342 0 : spdk_bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6343 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
6344 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6345 : {
6346 0 : struct iovec iov = {
6347 0 : .iov_base = buf,
6348 : };
6349 :
6350 0 : if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
6351 0 : return -EINVAL;
6352 : }
6353 :
6354 0 : if (md_buf && !_is_buf_allocated(&iov)) {
6355 0 : return -EINVAL;
6356 : }
6357 :
6358 0 : return bdev_compare_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
6359 0 : cb, cb_arg);
6360 0 : }
6361 :
6362 : static void
6363 2 : bdev_comparev_and_writev_blocks_unlocked(struct lba_range *range, void *ctx, int unlock_status)
6364 : {
6365 2 : struct spdk_bdev_io *bdev_io = ctx;
6366 :
6367 2 : if (unlock_status) {
6368 0 : SPDK_ERRLOG("LBA range unlock failed\n");
6369 0 : }
6370 :
6371 4 : bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS ? true :
6372 2 : false, bdev_io->internal.caller_ctx);
6373 2 : }
6374 :
6375 : static void
6376 2 : bdev_comparev_and_writev_blocks_unlock(struct spdk_bdev_io *bdev_io, int status)
6377 : {
6378 2 : bdev_io->internal.status = status;
6379 :
6380 4 : bdev_unlock_lba_range(bdev_io->internal.desc, spdk_io_channel_from_ctx(bdev_io->internal.ch),
6381 2 : bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6382 2 : bdev_comparev_and_writev_blocks_unlocked, bdev_io);
6383 2 : }
6384 :
6385 : static void
6386 1 : bdev_compare_and_write_do_write_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6387 : {
6388 1 : struct spdk_bdev_io *parent_io = cb_arg;
6389 :
6390 1 : if (!success) {
6391 0 : SPDK_ERRLOG("Compare and write operation failed\n");
6392 0 : }
6393 :
6394 1 : spdk_bdev_free_io(bdev_io);
6395 :
6396 2 : bdev_comparev_and_writev_blocks_unlock(parent_io,
6397 1 : success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
6398 1 : }
6399 :
6400 : static void
6401 1 : bdev_compare_and_write_do_write(void *_bdev_io)
6402 : {
6403 1 : struct spdk_bdev_io *bdev_io = _bdev_io;
6404 : int rc;
6405 :
6406 2 : rc = spdk_bdev_writev_blocks(bdev_io->internal.desc,
6407 1 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
6408 1 : bdev_io->u.bdev.fused_iovs, bdev_io->u.bdev.fused_iovcnt,
6409 1 : bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6410 1 : bdev_compare_and_write_do_write_done, bdev_io);
6411 :
6412 :
6413 1 : if (rc == -ENOMEM) {
6414 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_write);
6415 1 : } else if (rc != 0) {
6416 0 : bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
6417 0 : }
6418 1 : }
6419 :
6420 : static void
6421 2 : bdev_compare_and_write_do_compare_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6422 : {
6423 2 : struct spdk_bdev_io *parent_io = cb_arg;
6424 :
6425 2 : spdk_bdev_free_io(bdev_io);
6426 :
6427 2 : if (!success) {
6428 1 : bdev_comparev_and_writev_blocks_unlock(parent_io, SPDK_BDEV_IO_STATUS_MISCOMPARE);
6429 1 : return;
6430 : }
6431 :
6432 1 : bdev_compare_and_write_do_write(parent_io);
6433 2 : }
6434 :
6435 : static void
6436 2 : bdev_compare_and_write_do_compare(void *_bdev_io)
6437 : {
6438 2 : struct spdk_bdev_io *bdev_io = _bdev_io;
6439 : int rc;
6440 :
6441 4 : rc = spdk_bdev_comparev_blocks(bdev_io->internal.desc,
6442 2 : spdk_io_channel_from_ctx(bdev_io->internal.ch), bdev_io->u.bdev.iovs,
6443 2 : bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6444 2 : bdev_compare_and_write_do_compare_done, bdev_io);
6445 :
6446 2 : if (rc == -ENOMEM) {
6447 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_compare);
6448 2 : } else if (rc != 0) {
6449 0 : bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED);
6450 0 : }
6451 2 : }
6452 :
6453 : static void
6454 2 : bdev_comparev_and_writev_blocks_locked(struct lba_range *range, void *ctx, int status)
6455 : {
6456 2 : struct spdk_bdev_io *bdev_io = ctx;
6457 :
6458 2 : if (status) {
6459 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED;
6460 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
6461 0 : return;
6462 : }
6463 :
6464 2 : bdev_compare_and_write_do_compare(bdev_io);
6465 2 : }
6466 :
6467 : int
6468 2 : spdk_bdev_comparev_and_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6469 : struct iovec *compare_iov, int compare_iovcnt,
6470 : struct iovec *write_iov, int write_iovcnt,
6471 : uint64_t offset_blocks, uint64_t num_blocks,
6472 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6473 : {
6474 2 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6475 : struct spdk_bdev_io *bdev_io;
6476 2 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6477 :
6478 2 : if (!desc->write) {
6479 0 : return -EBADF;
6480 : }
6481 :
6482 2 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6483 0 : return -EINVAL;
6484 : }
6485 :
6486 2 : if (num_blocks > bdev->acwu) {
6487 0 : return -EINVAL;
6488 : }
6489 :
6490 2 : bdev_io = bdev_channel_get_io(channel);
6491 2 : if (!bdev_io) {
6492 0 : return -ENOMEM;
6493 : }
6494 :
6495 2 : bdev_io->internal.ch = channel;
6496 2 : bdev_io->internal.desc = desc;
6497 2 : bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
6498 2 : bdev_io->u.bdev.iovs = compare_iov;
6499 2 : bdev_io->u.bdev.iovcnt = compare_iovcnt;
6500 2 : bdev_io->u.bdev.fused_iovs = write_iov;
6501 2 : bdev_io->u.bdev.fused_iovcnt = write_iovcnt;
6502 2 : bdev_io->u.bdev.md_buf = NULL;
6503 2 : bdev_io->u.bdev.num_blocks = num_blocks;
6504 2 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6505 2 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6506 2 : bdev_io->u.bdev.memory_domain = NULL;
6507 2 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6508 2 : bdev_io->u.bdev.accel_sequence = NULL;
6509 :
6510 2 : if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) {
6511 0 : bdev_io_submit(bdev_io);
6512 0 : return 0;
6513 : }
6514 :
6515 4 : return bdev_lock_lba_range(desc, ch, offset_blocks, num_blocks,
6516 2 : bdev_comparev_and_writev_blocks_locked, bdev_io);
6517 2 : }
6518 :
6519 : int
6520 2 : spdk_bdev_zcopy_start(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6521 : struct iovec *iov, int iovcnt,
6522 : uint64_t offset_blocks, uint64_t num_blocks,
6523 : bool populate,
6524 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6525 : {
6526 2 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6527 : struct spdk_bdev_io *bdev_io;
6528 2 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6529 :
6530 2 : if (!desc->write) {
6531 0 : return -EBADF;
6532 : }
6533 :
6534 2 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6535 0 : return -EINVAL;
6536 : }
6537 :
6538 2 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY)) {
6539 0 : return -ENOTSUP;
6540 : }
6541 :
6542 2 : bdev_io = bdev_channel_get_io(channel);
6543 2 : if (!bdev_io) {
6544 0 : return -ENOMEM;
6545 : }
6546 :
6547 2 : bdev_io->internal.ch = channel;
6548 2 : bdev_io->internal.desc = desc;
6549 2 : bdev_io->type = SPDK_BDEV_IO_TYPE_ZCOPY;
6550 2 : bdev_io->u.bdev.num_blocks = num_blocks;
6551 2 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6552 2 : bdev_io->u.bdev.iovs = iov;
6553 2 : bdev_io->u.bdev.iovcnt = iovcnt;
6554 2 : bdev_io->u.bdev.md_buf = NULL;
6555 2 : bdev_io->u.bdev.zcopy.populate = populate ? 1 : 0;
6556 2 : bdev_io->u.bdev.zcopy.commit = 0;
6557 2 : bdev_io->u.bdev.zcopy.start = 1;
6558 2 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6559 2 : bdev_io->u.bdev.memory_domain = NULL;
6560 2 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6561 2 : bdev_io->u.bdev.accel_sequence = NULL;
6562 :
6563 2 : bdev_io_submit(bdev_io);
6564 :
6565 2 : return 0;
6566 2 : }
6567 :
6568 : int
6569 2 : spdk_bdev_zcopy_end(struct spdk_bdev_io *bdev_io, bool commit,
6570 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6571 : {
6572 2 : if (bdev_io->type != SPDK_BDEV_IO_TYPE_ZCOPY) {
6573 0 : return -EINVAL;
6574 : }
6575 :
6576 2 : bdev_io->u.bdev.zcopy.commit = commit ? 1 : 0;
6577 2 : bdev_io->u.bdev.zcopy.start = 0;
6578 2 : bdev_io->internal.caller_ctx = cb_arg;
6579 2 : bdev_io->internal.cb = cb;
6580 2 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
6581 :
6582 2 : bdev_io_submit(bdev_io);
6583 :
6584 2 : return 0;
6585 2 : }
6586 :
6587 : int
6588 0 : spdk_bdev_write_zeroes(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6589 : uint64_t offset, uint64_t len,
6590 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6591 : {
6592 : uint64_t offset_blocks, num_blocks;
6593 :
6594 0 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, len, &num_blocks) != 0) {
6595 0 : return -EINVAL;
6596 : }
6597 :
6598 0 : return spdk_bdev_write_zeroes_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6599 0 : }
6600 :
6601 : int
6602 33 : spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6603 : uint64_t offset_blocks, uint64_t num_blocks,
6604 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6605 : {
6606 33 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6607 : struct spdk_bdev_io *bdev_io;
6608 33 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6609 :
6610 33 : if (!desc->write) {
6611 0 : return -EBADF;
6612 : }
6613 :
6614 33 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6615 0 : return -EINVAL;
6616 : }
6617 :
6618 33 : if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) &&
6619 10 : !bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE)) {
6620 1 : return -ENOTSUP;
6621 : }
6622 :
6623 32 : bdev_io = bdev_channel_get_io(channel);
6624 :
6625 32 : if (!bdev_io) {
6626 0 : return -ENOMEM;
6627 : }
6628 :
6629 32 : bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
6630 32 : bdev_io->internal.ch = channel;
6631 32 : bdev_io->internal.desc = desc;
6632 32 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6633 32 : bdev_io->u.bdev.num_blocks = num_blocks;
6634 32 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6635 32 : bdev_io->u.bdev.memory_domain = NULL;
6636 32 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6637 32 : bdev_io->u.bdev.accel_sequence = NULL;
6638 :
6639 : /* If the write_zeroes size is large and should be split, use the generic split
6640 : * logic regardless of whether SPDK_BDEV_IO_TYPE_WRITE_ZEREOS is supported or not.
6641 : *
6642 : * Then, send the write_zeroes request if SPDK_BDEV_IO_TYPE_WRITE_ZEROES is supported
6643 : * or emulate it using regular write request otherwise.
6644 : */
6645 32 : if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) ||
6646 9 : bdev_io->internal.f.split) {
6647 26 : bdev_io_submit(bdev_io);
6648 26 : return 0;
6649 : }
6650 :
6651 6 : assert(_bdev_get_block_size_with_md(bdev) <= ZERO_BUFFER_SIZE);
6652 :
6653 6 : return bdev_write_zero_buffer(bdev_io);
6654 33 : }
6655 :
6656 : int
6657 0 : spdk_bdev_unmap(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6658 : uint64_t offset, uint64_t nbytes,
6659 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6660 : {
6661 : uint64_t offset_blocks, num_blocks;
6662 :
6663 0 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
6664 0 : return -EINVAL;
6665 : }
6666 :
6667 0 : return spdk_bdev_unmap_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6668 0 : }
6669 :
6670 : static void
6671 0 : bdev_io_complete_cb(void *ctx)
6672 : {
6673 0 : struct spdk_bdev_io *bdev_io = ctx;
6674 :
6675 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
6676 0 : bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
6677 0 : }
6678 :
6679 : int
6680 22 : spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6681 : uint64_t offset_blocks, uint64_t num_blocks,
6682 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6683 : {
6684 22 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6685 : struct spdk_bdev_io *bdev_io;
6686 22 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6687 :
6688 22 : if (!desc->write) {
6689 0 : return -EBADF;
6690 : }
6691 :
6692 22 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6693 0 : return -EINVAL;
6694 : }
6695 :
6696 22 : bdev_io = bdev_channel_get_io(channel);
6697 22 : if (!bdev_io) {
6698 0 : return -ENOMEM;
6699 : }
6700 :
6701 22 : bdev_io->internal.ch = channel;
6702 22 : bdev_io->internal.desc = desc;
6703 22 : bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
6704 :
6705 22 : bdev_io->u.bdev.iovs = &bdev_io->iov;
6706 22 : bdev_io->u.bdev.iovs[0].iov_base = NULL;
6707 22 : bdev_io->u.bdev.iovs[0].iov_len = 0;
6708 22 : bdev_io->u.bdev.iovcnt = 1;
6709 :
6710 22 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6711 22 : bdev_io->u.bdev.num_blocks = num_blocks;
6712 22 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6713 22 : bdev_io->u.bdev.memory_domain = NULL;
6714 22 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6715 22 : bdev_io->u.bdev.accel_sequence = NULL;
6716 :
6717 22 : if (num_blocks == 0) {
6718 0 : spdk_thread_send_msg(spdk_get_thread(), bdev_io_complete_cb, bdev_io);
6719 0 : return 0;
6720 : }
6721 :
6722 22 : bdev_io_submit(bdev_io);
6723 22 : return 0;
6724 22 : }
6725 :
6726 : int
6727 0 : spdk_bdev_flush(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6728 : uint64_t offset, uint64_t length,
6729 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6730 : {
6731 : uint64_t offset_blocks, num_blocks;
6732 :
6733 0 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, length, &num_blocks) != 0) {
6734 0 : return -EINVAL;
6735 : }
6736 :
6737 0 : return spdk_bdev_flush_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6738 0 : }
6739 :
6740 : int
6741 2 : spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6742 : uint64_t offset_blocks, uint64_t num_blocks,
6743 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6744 : {
6745 2 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6746 : struct spdk_bdev_io *bdev_io;
6747 2 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6748 :
6749 2 : if (!desc->write) {
6750 0 : return -EBADF;
6751 : }
6752 :
6753 2 : if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH))) {
6754 0 : return -ENOTSUP;
6755 : }
6756 :
6757 2 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6758 0 : return -EINVAL;
6759 : }
6760 :
6761 2 : bdev_io = bdev_channel_get_io(channel);
6762 2 : if (!bdev_io) {
6763 0 : return -ENOMEM;
6764 : }
6765 :
6766 2 : bdev_io->internal.ch = channel;
6767 2 : bdev_io->internal.desc = desc;
6768 2 : bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
6769 2 : bdev_io->u.bdev.iovs = NULL;
6770 2 : bdev_io->u.bdev.iovcnt = 0;
6771 2 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6772 2 : bdev_io->u.bdev.num_blocks = num_blocks;
6773 2 : bdev_io->u.bdev.memory_domain = NULL;
6774 2 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6775 2 : bdev_io->u.bdev.accel_sequence = NULL;
6776 2 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6777 :
6778 2 : bdev_io_submit(bdev_io);
6779 2 : return 0;
6780 2 : }
6781 :
6782 : static int bdev_reset_poll_for_outstanding_io(void *ctx);
6783 :
6784 : static void
6785 13 : bdev_reset_check_outstanding_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
6786 : {
6787 13 : struct spdk_bdev_io *bdev_io = _ctx;
6788 13 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
6789 :
6790 13 : if (status == -EBUSY) {
6791 9 : if (spdk_get_ticks() < bdev_io->u.reset.wait_poller.stop_time_tsc) {
6792 8 : bdev_io->u.reset.wait_poller.poller = SPDK_POLLER_REGISTER(bdev_reset_poll_for_outstanding_io,
6793 : bdev_io, BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
6794 8 : } else {
6795 1 : if (TAILQ_EMPTY(&ch->io_memory_domain) && TAILQ_EMPTY(&ch->io_accel_exec)) {
6796 : /* If outstanding IOs are still present and reset_io_drain_timeout
6797 : * seconds passed, start the reset. */
6798 1 : bdev_io_submit_reset(bdev_io);
6799 1 : } else {
6800 : /* We still have in progress memory domain pull/push or we're
6801 : * executing accel sequence. Since we cannot abort either of those
6802 : * operations, fail the reset request. */
6803 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
6804 : }
6805 : }
6806 9 : } else {
6807 4 : SPDK_DEBUGLOG(bdev,
6808 : "Skipping reset for underlying device of bdev: %s - no outstanding I/O.\n",
6809 : ch->bdev->name);
6810 : /* Mark the completion status as a SUCCESS and complete the reset. */
6811 4 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
6812 : }
6813 13 : }
6814 :
6815 : static void
6816 13 : bdev_reset_check_outstanding_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6817 : struct spdk_io_channel *io_ch, void *_ctx)
6818 : {
6819 13 : struct spdk_bdev_channel *cur_ch = __io_ch_to_bdev_ch(io_ch);
6820 13 : int status = 0;
6821 :
6822 17 : if (cur_ch->io_outstanding > 0 ||
6823 4 : !TAILQ_EMPTY(&cur_ch->io_memory_domain) ||
6824 4 : !TAILQ_EMPTY(&cur_ch->io_accel_exec)) {
6825 : /* If a channel has outstanding IO, set status to -EBUSY code. This will stop
6826 : * further iteration over the rest of the channels and pass non-zero status
6827 : * to the callback function. */
6828 9 : status = -EBUSY;
6829 9 : }
6830 13 : spdk_bdev_for_each_channel_continue(i, status);
6831 13 : }
6832 :
6833 : static int
6834 8 : bdev_reset_poll_for_outstanding_io(void *ctx)
6835 : {
6836 8 : struct spdk_bdev_io *bdev_io = ctx;
6837 :
6838 8 : spdk_poller_unregister(&bdev_io->u.reset.wait_poller.poller);
6839 8 : spdk_bdev_for_each_channel(bdev_io->bdev, bdev_reset_check_outstanding_io, bdev_io,
6840 : bdev_reset_check_outstanding_io_done);
6841 :
6842 8 : return SPDK_POLLER_BUSY;
6843 : }
6844 :
6845 : static void
6846 16 : bdev_reset_freeze_channel_done(struct spdk_bdev *bdev, void *_ctx, int status)
6847 : {
6848 16 : struct spdk_bdev_io *bdev_io = _ctx;
6849 :
6850 16 : if (bdev->reset_io_drain_timeout == 0) {
6851 11 : bdev_io_submit_reset(bdev_io);
6852 11 : return;
6853 : }
6854 :
6855 10 : bdev_io->u.reset.wait_poller.stop_time_tsc = spdk_get_ticks() +
6856 5 : (bdev->reset_io_drain_timeout * spdk_get_ticks_hz());
6857 :
6858 : /* In case bdev->reset_io_drain_timeout is not equal to zero,
6859 : * submit the reset to the underlying module only if outstanding I/O
6860 : * remain after reset_io_drain_timeout seconds have passed. */
6861 5 : spdk_bdev_for_each_channel(bdev, bdev_reset_check_outstanding_io, bdev_io,
6862 : bdev_reset_check_outstanding_io_done);
6863 16 : }
6864 :
6865 : static void
6866 19 : bdev_reset_freeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6867 : struct spdk_io_channel *ch, void *_ctx)
6868 : {
6869 : struct spdk_bdev_channel *channel;
6870 : struct spdk_bdev_mgmt_channel *mgmt_channel;
6871 : struct spdk_bdev_shared_resource *shared_resource;
6872 : bdev_io_tailq_t tmp_queued;
6873 :
6874 19 : TAILQ_INIT(&tmp_queued);
6875 :
6876 19 : channel = __io_ch_to_bdev_ch(ch);
6877 19 : shared_resource = channel->shared_resource;
6878 19 : mgmt_channel = shared_resource->mgmt_ch;
6879 :
6880 19 : channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
6881 :
6882 19 : if ((channel->flags & BDEV_CH_QOS_ENABLED) != 0) {
6883 2 : TAILQ_SWAP(&channel->qos_queued_io, &tmp_queued, spdk_bdev_io, internal.link);
6884 2 : }
6885 :
6886 19 : bdev_abort_all_queued_io(&shared_resource->nomem_io, channel);
6887 19 : bdev_abort_all_buf_io(mgmt_channel, channel);
6888 19 : bdev_abort_all_queued_io(&tmp_queued, channel);
6889 :
6890 19 : spdk_bdev_for_each_channel_continue(i, 0);
6891 19 : }
6892 :
6893 : static void
6894 18 : bdev_start_reset(struct spdk_bdev_io *bdev_io)
6895 : {
6896 18 : struct spdk_bdev *bdev = bdev_io->bdev;
6897 18 : bool freeze_channel = false;
6898 :
6899 18 : bdev_ch_add_to_io_submitted(bdev_io);
6900 :
6901 : /**
6902 : * Take a channel reference for the target bdev for the life of this
6903 : * reset. This guards against the channel getting destroyed before
6904 : * the reset is completed. We will release the reference when this
6905 : * reset is completed.
6906 : */
6907 18 : bdev_io->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev));
6908 :
6909 18 : spdk_spin_lock(&bdev->internal.spinlock);
6910 18 : if (bdev->internal.reset_in_progress == NULL) {
6911 16 : bdev->internal.reset_in_progress = bdev_io;
6912 16 : freeze_channel = true;
6913 16 : } else {
6914 2 : TAILQ_INSERT_TAIL(&bdev->internal.queued_resets, bdev_io, internal.link);
6915 : }
6916 18 : spdk_spin_unlock(&bdev->internal.spinlock);
6917 :
6918 18 : if (freeze_channel) {
6919 16 : spdk_bdev_for_each_channel(bdev, bdev_reset_freeze_channel, bdev_io,
6920 : bdev_reset_freeze_channel_done);
6921 16 : }
6922 18 : }
6923 :
6924 : int
6925 18 : spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6926 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6927 : {
6928 18 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6929 : struct spdk_bdev_io *bdev_io;
6930 18 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6931 :
6932 18 : bdev_io = bdev_channel_get_io(channel);
6933 18 : if (!bdev_io) {
6934 0 : return -ENOMEM;
6935 : }
6936 :
6937 18 : bdev_io->internal.ch = channel;
6938 18 : bdev_io->internal.desc = desc;
6939 18 : bdev_io->internal.submit_tsc = spdk_get_ticks();
6940 18 : bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
6941 18 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6942 :
6943 18 : bdev_start_reset(bdev_io);
6944 18 : return 0;
6945 18 : }
6946 :
6947 : void
6948 0 : spdk_bdev_get_io_stat(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
6949 : struct spdk_bdev_io_stat *stat, enum spdk_bdev_reset_stat_mode reset_mode)
6950 : {
6951 0 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6952 :
6953 0 : bdev_get_io_stat(stat, channel->stat);
6954 0 : spdk_bdev_reset_io_stat(channel->stat, reset_mode);
6955 0 : }
6956 :
6957 : static void
6958 5 : bdev_get_device_stat_done(struct spdk_bdev *bdev, void *_ctx, int status)
6959 : {
6960 5 : struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = _ctx;
6961 :
6962 10 : bdev_iostat_ctx->cb(bdev, bdev_iostat_ctx->stat,
6963 5 : bdev_iostat_ctx->cb_arg, 0);
6964 5 : free(bdev_iostat_ctx);
6965 5 : }
6966 :
6967 : static void
6968 4 : bdev_get_each_channel_stat(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6969 : struct spdk_io_channel *ch, void *_ctx)
6970 : {
6971 4 : struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = _ctx;
6972 4 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6973 :
6974 4 : spdk_bdev_add_io_stat(bdev_iostat_ctx->stat, channel->stat);
6975 4 : spdk_bdev_reset_io_stat(channel->stat, bdev_iostat_ctx->reset_mode);
6976 4 : spdk_bdev_for_each_channel_continue(i, 0);
6977 4 : }
6978 :
6979 : void
6980 5 : spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat,
6981 : enum spdk_bdev_reset_stat_mode reset_mode, spdk_bdev_get_device_stat_cb cb, void *cb_arg)
6982 : {
6983 : struct spdk_bdev_iostat_ctx *bdev_iostat_ctx;
6984 :
6985 5 : assert(bdev != NULL);
6986 5 : assert(stat != NULL);
6987 5 : assert(cb != NULL);
6988 :
6989 5 : bdev_iostat_ctx = calloc(1, sizeof(struct spdk_bdev_iostat_ctx));
6990 5 : if (bdev_iostat_ctx == NULL) {
6991 0 : SPDK_ERRLOG("Unable to allocate memory for spdk_bdev_iostat_ctx\n");
6992 0 : cb(bdev, stat, cb_arg, -ENOMEM);
6993 0 : return;
6994 : }
6995 :
6996 5 : bdev_iostat_ctx->stat = stat;
6997 5 : bdev_iostat_ctx->cb = cb;
6998 5 : bdev_iostat_ctx->cb_arg = cb_arg;
6999 5 : bdev_iostat_ctx->reset_mode = reset_mode;
7000 :
7001 : /* Start with the statistics from previously deleted channels. */
7002 5 : spdk_spin_lock(&bdev->internal.spinlock);
7003 5 : bdev_get_io_stat(bdev_iostat_ctx->stat, bdev->internal.stat);
7004 5 : spdk_bdev_reset_io_stat(bdev->internal.stat, reset_mode);
7005 5 : spdk_spin_unlock(&bdev->internal.spinlock);
7006 :
7007 : /* Then iterate and add the statistics from each existing channel. */
7008 5 : spdk_bdev_for_each_channel(bdev, bdev_get_each_channel_stat, bdev_iostat_ctx,
7009 : bdev_get_device_stat_done);
7010 5 : }
7011 :
7012 : struct bdev_iostat_reset_ctx {
7013 : enum spdk_bdev_reset_stat_mode mode;
7014 : bdev_reset_device_stat_cb cb;
7015 : void *cb_arg;
7016 : };
7017 :
7018 : static void
7019 0 : bdev_reset_device_stat_done(struct spdk_bdev *bdev, void *_ctx, int status)
7020 : {
7021 0 : struct bdev_iostat_reset_ctx *ctx = _ctx;
7022 :
7023 0 : ctx->cb(bdev, ctx->cb_arg, 0);
7024 :
7025 0 : free(ctx);
7026 0 : }
7027 :
7028 : static void
7029 0 : bdev_reset_each_channel_stat(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
7030 : struct spdk_io_channel *ch, void *_ctx)
7031 : {
7032 0 : struct bdev_iostat_reset_ctx *ctx = _ctx;
7033 0 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7034 :
7035 0 : spdk_bdev_reset_io_stat(channel->stat, ctx->mode);
7036 :
7037 0 : spdk_bdev_for_each_channel_continue(i, 0);
7038 0 : }
7039 :
7040 : void
7041 0 : bdev_reset_device_stat(struct spdk_bdev *bdev, enum spdk_bdev_reset_stat_mode mode,
7042 : bdev_reset_device_stat_cb cb, void *cb_arg)
7043 : {
7044 : struct bdev_iostat_reset_ctx *ctx;
7045 :
7046 0 : assert(bdev != NULL);
7047 0 : assert(cb != NULL);
7048 :
7049 0 : ctx = calloc(1, sizeof(*ctx));
7050 0 : if (ctx == NULL) {
7051 0 : SPDK_ERRLOG("Unable to allocate bdev_iostat_reset_ctx.\n");
7052 0 : cb(bdev, cb_arg, -ENOMEM);
7053 0 : return;
7054 : }
7055 :
7056 0 : ctx->mode = mode;
7057 0 : ctx->cb = cb;
7058 0 : ctx->cb_arg = cb_arg;
7059 :
7060 0 : spdk_spin_lock(&bdev->internal.spinlock);
7061 0 : spdk_bdev_reset_io_stat(bdev->internal.stat, mode);
7062 0 : spdk_spin_unlock(&bdev->internal.spinlock);
7063 :
7064 0 : spdk_bdev_for_each_channel(bdev,
7065 : bdev_reset_each_channel_stat,
7066 0 : ctx,
7067 : bdev_reset_device_stat_done);
7068 0 : }
7069 :
7070 : int
7071 1 : spdk_bdev_nvme_admin_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
7072 : const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
7073 : spdk_bdev_io_completion_cb cb, void *cb_arg)
7074 : {
7075 1 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7076 : struct spdk_bdev_io *bdev_io;
7077 1 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7078 :
7079 1 : if (!desc->write) {
7080 0 : return -EBADF;
7081 : }
7082 :
7083 1 : if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN))) {
7084 1 : return -ENOTSUP;
7085 : }
7086 :
7087 0 : bdev_io = bdev_channel_get_io(channel);
7088 0 : if (!bdev_io) {
7089 0 : return -ENOMEM;
7090 : }
7091 :
7092 0 : bdev_io->internal.ch = channel;
7093 0 : bdev_io->internal.desc = desc;
7094 0 : bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
7095 0 : bdev_io->u.nvme_passthru.cmd = *cmd;
7096 0 : bdev_io->u.nvme_passthru.buf = buf;
7097 0 : bdev_io->u.nvme_passthru.nbytes = nbytes;
7098 0 : bdev_io->u.nvme_passthru.md_buf = NULL;
7099 0 : bdev_io->u.nvme_passthru.md_len = 0;
7100 :
7101 0 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
7102 :
7103 0 : bdev_io_submit(bdev_io);
7104 0 : return 0;
7105 1 : }
7106 :
7107 : int
7108 1 : spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
7109 : const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
7110 : spdk_bdev_io_completion_cb cb, void *cb_arg)
7111 : {
7112 1 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7113 : struct spdk_bdev_io *bdev_io;
7114 1 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7115 :
7116 1 : if (!desc->write) {
7117 : /*
7118 : * Do not try to parse the NVMe command - we could maybe use bits in the opcode
7119 : * to easily determine if the command is a read or write, but for now just
7120 : * do not allow io_passthru with a read-only descriptor.
7121 : */
7122 0 : return -EBADF;
7123 : }
7124 :
7125 1 : if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO))) {
7126 1 : return -ENOTSUP;
7127 : }
7128 :
7129 0 : bdev_io = bdev_channel_get_io(channel);
7130 0 : if (!bdev_io) {
7131 0 : return -ENOMEM;
7132 : }
7133 :
7134 0 : bdev_io->internal.ch = channel;
7135 0 : bdev_io->internal.desc = desc;
7136 0 : bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO;
7137 0 : bdev_io->u.nvme_passthru.cmd = *cmd;
7138 0 : bdev_io->u.nvme_passthru.buf = buf;
7139 0 : bdev_io->u.nvme_passthru.nbytes = nbytes;
7140 0 : bdev_io->u.nvme_passthru.md_buf = NULL;
7141 0 : bdev_io->u.nvme_passthru.md_len = 0;
7142 :
7143 0 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
7144 :
7145 0 : bdev_io_submit(bdev_io);
7146 0 : return 0;
7147 1 : }
7148 :
7149 : int
7150 1 : spdk_bdev_nvme_io_passthru_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
7151 : const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len,
7152 : spdk_bdev_io_completion_cb cb, void *cb_arg)
7153 : {
7154 1 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7155 : struct spdk_bdev_io *bdev_io;
7156 1 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7157 :
7158 1 : if (!desc->write) {
7159 : /*
7160 : * Do not try to parse the NVMe command - we could maybe use bits in the opcode
7161 : * to easily determine if the command is a read or write, but for now just
7162 : * do not allow io_passthru with a read-only descriptor.
7163 : */
7164 0 : return -EBADF;
7165 : }
7166 :
7167 1 : if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD))) {
7168 1 : return -ENOTSUP;
7169 : }
7170 :
7171 0 : bdev_io = bdev_channel_get_io(channel);
7172 0 : if (!bdev_io) {
7173 0 : return -ENOMEM;
7174 : }
7175 :
7176 0 : bdev_io->internal.ch = channel;
7177 0 : bdev_io->internal.desc = desc;
7178 0 : bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO_MD;
7179 0 : bdev_io->u.nvme_passthru.cmd = *cmd;
7180 0 : bdev_io->u.nvme_passthru.buf = buf;
7181 0 : bdev_io->u.nvme_passthru.nbytes = nbytes;
7182 0 : bdev_io->u.nvme_passthru.md_buf = md_buf;
7183 0 : bdev_io->u.nvme_passthru.md_len = md_len;
7184 :
7185 0 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
7186 :
7187 0 : bdev_io_submit(bdev_io);
7188 0 : return 0;
7189 1 : }
7190 :
7191 : int
7192 0 : spdk_bdev_nvme_iov_passthru_md(struct spdk_bdev_desc *desc,
7193 : struct spdk_io_channel *ch,
7194 : const struct spdk_nvme_cmd *cmd,
7195 : struct iovec *iov, int iovcnt, size_t nbytes,
7196 : void *md_buf, size_t md_len,
7197 : spdk_bdev_io_completion_cb cb, void *cb_arg)
7198 : {
7199 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7200 : struct spdk_bdev_io *bdev_io;
7201 0 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7202 :
7203 0 : if (!desc->write) {
7204 : /*
7205 : * Do not try to parse the NVMe command - we could maybe use bits in the opcode
7206 : * to easily determine if the command is a read or write, but for now just
7207 : * do not allow io_passthru with a read-only descriptor.
7208 : */
7209 0 : return -EBADF;
7210 : }
7211 :
7212 0 : if (md_buf && spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD))) {
7213 0 : return -ENOTSUP;
7214 0 : } else if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO))) {
7215 0 : return -ENOTSUP;
7216 : }
7217 :
7218 0 : bdev_io = bdev_channel_get_io(channel);
7219 0 : if (!bdev_io) {
7220 0 : return -ENOMEM;
7221 : }
7222 :
7223 0 : bdev_io->internal.ch = channel;
7224 0 : bdev_io->internal.desc = desc;
7225 0 : bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IOV_MD;
7226 0 : bdev_io->u.nvme_passthru.cmd = *cmd;
7227 0 : bdev_io->u.nvme_passthru.iovs = iov;
7228 0 : bdev_io->u.nvme_passthru.iovcnt = iovcnt;
7229 0 : bdev_io->u.nvme_passthru.nbytes = nbytes;
7230 0 : bdev_io->u.nvme_passthru.md_buf = md_buf;
7231 0 : bdev_io->u.nvme_passthru.md_len = md_len;
7232 :
7233 0 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
7234 :
7235 0 : bdev_io_submit(bdev_io);
7236 0 : return 0;
7237 0 : }
7238 :
7239 : static void bdev_abort_retry(void *ctx);
7240 : static void bdev_abort(struct spdk_bdev_io *parent_io);
7241 :
7242 : static void
7243 22 : bdev_abort_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
7244 : {
7245 22 : struct spdk_bdev_channel *channel = bdev_io->internal.ch;
7246 22 : struct spdk_bdev_io *parent_io = cb_arg;
7247 : struct spdk_bdev_io *bio_to_abort, *tmp_io;
7248 :
7249 22 : bio_to_abort = bdev_io->u.abort.bio_to_abort;
7250 :
7251 22 : spdk_bdev_free_io(bdev_io);
7252 :
7253 22 : if (!success) {
7254 : /* Check if the target I/O completed in the meantime. */
7255 2 : TAILQ_FOREACH(tmp_io, &channel->io_submitted, internal.ch_link) {
7256 1 : if (tmp_io == bio_to_abort) {
7257 0 : break;
7258 : }
7259 1 : }
7260 :
7261 : /* If the target I/O still exists, set the parent to failed. */
7262 1 : if (tmp_io != NULL) {
7263 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7264 0 : }
7265 1 : }
7266 :
7267 22 : assert(parent_io->internal.f.split);
7268 :
7269 22 : parent_io->internal.split.outstanding--;
7270 22 : if (parent_io->internal.split.outstanding == 0) {
7271 16 : if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
7272 0 : bdev_abort_retry(parent_io);
7273 0 : } else {
7274 16 : bdev_io_complete(parent_io);
7275 : }
7276 16 : }
7277 22 : }
7278 :
7279 : static int
7280 23 : bdev_abort_io(struct spdk_bdev_desc *desc, struct spdk_bdev_channel *channel,
7281 : struct spdk_bdev_io *bio_to_abort,
7282 : spdk_bdev_io_completion_cb cb, void *cb_arg)
7283 : {
7284 23 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7285 : struct spdk_bdev_io *bdev_io;
7286 :
7287 23 : if (bio_to_abort->type == SPDK_BDEV_IO_TYPE_ABORT ||
7288 23 : bio_to_abort->type == SPDK_BDEV_IO_TYPE_RESET) {
7289 : /* TODO: Abort reset or abort request. */
7290 0 : return -ENOTSUP;
7291 : }
7292 :
7293 23 : bdev_io = bdev_channel_get_io(channel);
7294 23 : if (bdev_io == NULL) {
7295 1 : return -ENOMEM;
7296 : }
7297 :
7298 22 : bdev_io->internal.ch = channel;
7299 22 : bdev_io->internal.desc = desc;
7300 22 : bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
7301 22 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
7302 :
7303 22 : if (bio_to_abort->internal.f.split) {
7304 6 : assert(bdev_io_should_split(bio_to_abort));
7305 6 : bdev_io->u.bdev.abort.bio_cb_arg = bio_to_abort;
7306 :
7307 : /* Parent abort request is not submitted directly, but to manage its
7308 : * execution add it to the submitted list here.
7309 : */
7310 6 : bdev_io->internal.submit_tsc = spdk_get_ticks();
7311 6 : bdev_ch_add_to_io_submitted(bdev_io);
7312 :
7313 6 : bdev_abort(bdev_io);
7314 :
7315 6 : return 0;
7316 : }
7317 :
7318 16 : bdev_io->u.abort.bio_to_abort = bio_to_abort;
7319 :
7320 : /* Submit the abort request to the underlying bdev module. */
7321 16 : bdev_io_submit(bdev_io);
7322 :
7323 16 : return 0;
7324 23 : }
7325 :
7326 : static bool
7327 46 : bdev_io_on_tailq(struct spdk_bdev_io *bdev_io, bdev_io_tailq_t *tailq)
7328 : {
7329 : struct spdk_bdev_io *iter;
7330 :
7331 46 : TAILQ_FOREACH(iter, tailq, internal.link) {
7332 0 : if (iter == bdev_io) {
7333 0 : return true;
7334 : }
7335 0 : }
7336 :
7337 46 : return false;
7338 46 : }
7339 :
7340 : static uint32_t
7341 18 : _bdev_abort(struct spdk_bdev_io *parent_io)
7342 : {
7343 18 : struct spdk_bdev_desc *desc = parent_io->internal.desc;
7344 18 : struct spdk_bdev_channel *channel = parent_io->internal.ch;
7345 : void *bio_cb_arg;
7346 : struct spdk_bdev_io *bio_to_abort;
7347 : uint32_t matched_ios;
7348 : int rc;
7349 :
7350 18 : bio_cb_arg = parent_io->u.bdev.abort.bio_cb_arg;
7351 :
7352 : /* matched_ios is returned and will be kept by the caller.
7353 : *
7354 : * This function will be used for two cases, 1) the same cb_arg is used for
7355 : * multiple I/Os, 2) a single large I/O is split into smaller ones.
7356 : * Incrementing split_outstanding directly here may confuse readers especially
7357 : * for the 1st case.
7358 : *
7359 : * Completion of I/O abort is processed after stack unwinding. Hence this trick
7360 : * works as expected.
7361 : */
7362 18 : matched_ios = 0;
7363 18 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
7364 :
7365 105 : TAILQ_FOREACH(bio_to_abort, &channel->io_submitted, internal.ch_link) {
7366 88 : if (bio_to_abort->internal.caller_ctx != bio_cb_arg) {
7367 65 : continue;
7368 : }
7369 :
7370 23 : if (bio_to_abort->internal.submit_tsc > parent_io->internal.submit_tsc) {
7371 : /* Any I/O which was submitted after this abort command should be excluded. */
7372 0 : continue;
7373 : }
7374 :
7375 : /* We can't abort a request that's being pushed/pulled or executed by accel */
7376 23 : if (bdev_io_on_tailq(bio_to_abort, &channel->io_accel_exec) ||
7377 23 : bdev_io_on_tailq(bio_to_abort, &channel->io_memory_domain)) {
7378 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7379 0 : break;
7380 : }
7381 :
7382 23 : rc = bdev_abort_io(desc, channel, bio_to_abort, bdev_abort_io_done, parent_io);
7383 23 : if (rc != 0) {
7384 1 : if (rc == -ENOMEM) {
7385 1 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_NOMEM;
7386 1 : } else {
7387 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7388 : }
7389 1 : break;
7390 : }
7391 22 : matched_ios++;
7392 22 : }
7393 :
7394 18 : return matched_ios;
7395 : }
7396 :
7397 : static void
7398 1 : bdev_abort_retry(void *ctx)
7399 : {
7400 1 : struct spdk_bdev_io *parent_io = ctx;
7401 : uint32_t matched_ios;
7402 :
7403 1 : matched_ios = _bdev_abort(parent_io);
7404 :
7405 1 : if (matched_ios == 0) {
7406 0 : if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
7407 0 : bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
7408 0 : } else {
7409 : /* For retry, the case that no target I/O was found is success
7410 : * because it means target I/Os completed in the meantime.
7411 : */
7412 0 : bdev_io_complete(parent_io);
7413 : }
7414 0 : return;
7415 : }
7416 :
7417 : /* Use split_outstanding to manage the progress of aborting I/Os. */
7418 1 : parent_io->internal.f.split = true;
7419 1 : parent_io->internal.split.outstanding = matched_ios;
7420 1 : }
7421 :
7422 : static void
7423 17 : bdev_abort(struct spdk_bdev_io *parent_io)
7424 : {
7425 : uint32_t matched_ios;
7426 :
7427 17 : matched_ios = _bdev_abort(parent_io);
7428 :
7429 17 : if (matched_ios == 0) {
7430 2 : if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
7431 1 : bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
7432 1 : } else {
7433 : /* The case the no target I/O was found is failure. */
7434 1 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7435 1 : bdev_io_complete(parent_io);
7436 : }
7437 2 : return;
7438 : }
7439 :
7440 : /* Use split_outstanding to manage the progress of aborting I/Os. */
7441 15 : parent_io->internal.f.split = true;
7442 15 : parent_io->internal.split.outstanding = matched_ios;
7443 17 : }
7444 :
7445 : int
7446 12 : spdk_bdev_abort(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
7447 : void *bio_cb_arg,
7448 : spdk_bdev_io_completion_cb cb, void *cb_arg)
7449 : {
7450 12 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7451 12 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7452 : struct spdk_bdev_io *bdev_io;
7453 :
7454 12 : if (bio_cb_arg == NULL) {
7455 0 : return -EINVAL;
7456 : }
7457 :
7458 12 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ABORT)) {
7459 1 : return -ENOTSUP;
7460 : }
7461 :
7462 11 : bdev_io = bdev_channel_get_io(channel);
7463 11 : if (bdev_io == NULL) {
7464 0 : return -ENOMEM;
7465 : }
7466 :
7467 11 : bdev_io->internal.ch = channel;
7468 11 : bdev_io->internal.desc = desc;
7469 11 : bdev_io->internal.submit_tsc = spdk_get_ticks();
7470 11 : bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
7471 11 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
7472 :
7473 11 : bdev_io->u.bdev.abort.bio_cb_arg = bio_cb_arg;
7474 :
7475 : /* Parent abort request is not submitted directly, but to manage its execution,
7476 : * add it to the submitted list here.
7477 : */
7478 11 : bdev_ch_add_to_io_submitted(bdev_io);
7479 :
7480 11 : bdev_abort(bdev_io);
7481 :
7482 11 : return 0;
7483 12 : }
7484 :
7485 : int
7486 4 : spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
7487 : struct spdk_bdev_io_wait_entry *entry)
7488 : {
7489 4 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7490 4 : struct spdk_bdev_mgmt_channel *mgmt_ch = channel->shared_resource->mgmt_ch;
7491 :
7492 4 : if (bdev != entry->bdev) {
7493 0 : SPDK_ERRLOG("bdevs do not match\n");
7494 0 : return -EINVAL;
7495 : }
7496 :
7497 4 : if (mgmt_ch->per_thread_cache_count > 0) {
7498 0 : SPDK_ERRLOG("Cannot queue io_wait if spdk_bdev_io available in per-thread cache\n");
7499 0 : return -EINVAL;
7500 : }
7501 :
7502 4 : TAILQ_INSERT_TAIL(&mgmt_ch->io_wait_queue, entry, link);
7503 4 : return 0;
7504 4 : }
7505 :
7506 : static inline void
7507 612 : bdev_io_update_io_stat(struct spdk_bdev_io *bdev_io, uint64_t tsc_diff)
7508 : {
7509 612 : enum spdk_bdev_io_status io_status = bdev_io->internal.status;
7510 612 : struct spdk_bdev_io_stat *io_stat = bdev_io->internal.ch->stat;
7511 612 : uint64_t num_blocks = bdev_io->u.bdev.num_blocks;
7512 612 : uint32_t blocklen = bdev_io->bdev->blocklen;
7513 :
7514 612 : if (spdk_likely(io_status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7515 519 : switch (bdev_io->type) {
7516 : case SPDK_BDEV_IO_TYPE_READ:
7517 321 : io_stat->bytes_read += num_blocks * blocklen;
7518 321 : io_stat->num_read_ops++;
7519 321 : io_stat->read_latency_ticks += tsc_diff;
7520 321 : if (io_stat->max_read_latency_ticks < tsc_diff) {
7521 7 : io_stat->max_read_latency_ticks = tsc_diff;
7522 7 : }
7523 321 : if (io_stat->min_read_latency_ticks > tsc_diff) {
7524 42 : io_stat->min_read_latency_ticks = tsc_diff;
7525 42 : }
7526 321 : break;
7527 : case SPDK_BDEV_IO_TYPE_WRITE:
7528 75 : io_stat->bytes_written += num_blocks * blocklen;
7529 75 : io_stat->num_write_ops++;
7530 75 : io_stat->write_latency_ticks += tsc_diff;
7531 75 : if (io_stat->max_write_latency_ticks < tsc_diff) {
7532 4 : io_stat->max_write_latency_ticks = tsc_diff;
7533 4 : }
7534 75 : if (io_stat->min_write_latency_ticks > tsc_diff) {
7535 25 : io_stat->min_write_latency_ticks = tsc_diff;
7536 25 : }
7537 75 : break;
7538 : case SPDK_BDEV_IO_TYPE_UNMAP:
7539 20 : io_stat->bytes_unmapped += num_blocks * blocklen;
7540 20 : io_stat->num_unmap_ops++;
7541 20 : io_stat->unmap_latency_ticks += tsc_diff;
7542 20 : if (io_stat->max_unmap_latency_ticks < tsc_diff) {
7543 0 : io_stat->max_unmap_latency_ticks = tsc_diff;
7544 0 : }
7545 20 : if (io_stat->min_unmap_latency_ticks > tsc_diff) {
7546 3 : io_stat->min_unmap_latency_ticks = tsc_diff;
7547 3 : }
7548 20 : break;
7549 : case SPDK_BDEV_IO_TYPE_ZCOPY:
7550 : /* Track the data in the start phase only */
7551 4 : if (bdev_io->u.bdev.zcopy.start) {
7552 2 : if (bdev_io->u.bdev.zcopy.populate) {
7553 1 : io_stat->bytes_read += num_blocks * blocklen;
7554 1 : io_stat->num_read_ops++;
7555 1 : io_stat->read_latency_ticks += tsc_diff;
7556 1 : if (io_stat->max_read_latency_ticks < tsc_diff) {
7557 0 : io_stat->max_read_latency_ticks = tsc_diff;
7558 0 : }
7559 1 : if (io_stat->min_read_latency_ticks > tsc_diff) {
7560 1 : io_stat->min_read_latency_ticks = tsc_diff;
7561 1 : }
7562 1 : } else {
7563 1 : io_stat->bytes_written += num_blocks * blocklen;
7564 1 : io_stat->num_write_ops++;
7565 1 : io_stat->write_latency_ticks += tsc_diff;
7566 1 : if (io_stat->max_write_latency_ticks < tsc_diff) {
7567 0 : io_stat->max_write_latency_ticks = tsc_diff;
7568 0 : }
7569 1 : if (io_stat->min_write_latency_ticks > tsc_diff) {
7570 1 : io_stat->min_write_latency_ticks = tsc_diff;
7571 1 : }
7572 : }
7573 2 : }
7574 4 : break;
7575 : case SPDK_BDEV_IO_TYPE_COPY:
7576 21 : io_stat->bytes_copied += num_blocks * blocklen;
7577 21 : io_stat->num_copy_ops++;
7578 21 : bdev_io->internal.ch->stat->copy_latency_ticks += tsc_diff;
7579 21 : if (io_stat->max_copy_latency_ticks < tsc_diff) {
7580 0 : io_stat->max_copy_latency_ticks = tsc_diff;
7581 0 : }
7582 21 : if (io_stat->min_copy_latency_ticks > tsc_diff) {
7583 4 : io_stat->min_copy_latency_ticks = tsc_diff;
7584 4 : }
7585 21 : break;
7586 : default:
7587 78 : break;
7588 : }
7589 612 : } else if (io_status <= SPDK_BDEV_IO_STATUS_FAILED && io_status >= SPDK_MIN_BDEV_IO_STATUS) {
7590 93 : io_stat = bdev_io->bdev->internal.stat;
7591 93 : assert(io_stat->io_error != NULL);
7592 :
7593 93 : spdk_spin_lock(&bdev_io->bdev->internal.spinlock);
7594 93 : io_stat->io_error->error_status[-io_status - 1]++;
7595 93 : spdk_spin_unlock(&bdev_io->bdev->internal.spinlock);
7596 93 : }
7597 :
7598 : #ifdef SPDK_CONFIG_VTUNE
7599 : uint64_t now_tsc = spdk_get_ticks();
7600 : if (now_tsc > (bdev_io->internal.ch->start_tsc + bdev_io->internal.ch->interval_tsc)) {
7601 : uint64_t data[5];
7602 : struct spdk_bdev_io_stat *prev_stat = bdev_io->internal.ch->prev_stat;
7603 :
7604 : data[0] = io_stat->num_read_ops - prev_stat->num_read_ops;
7605 : data[1] = io_stat->bytes_read - prev_stat->bytes_read;
7606 : data[2] = io_stat->num_write_ops - prev_stat->num_write_ops;
7607 : data[3] = io_stat->bytes_written - prev_stat->bytes_written;
7608 : data[4] = bdev_io->bdev->fn_table->get_spin_time ?
7609 : bdev_io->bdev->fn_table->get_spin_time(spdk_bdev_io_get_io_channel(bdev_io)) : 0;
7610 :
7611 : __itt_metadata_add(g_bdev_mgr.domain, __itt_null, bdev_io->internal.ch->handle,
7612 : __itt_metadata_u64, 5, data);
7613 :
7614 : memcpy(prev_stat, io_stat, sizeof(struct spdk_bdev_io_stat));
7615 : bdev_io->internal.ch->start_tsc = now_tsc;
7616 : }
7617 : #endif
7618 612 : }
7619 :
7620 : static inline void
7621 612 : _bdev_io_complete(void *ctx)
7622 : {
7623 612 : struct spdk_bdev_io *bdev_io = ctx;
7624 :
7625 612 : if (spdk_unlikely(bdev_io_use_accel_sequence(bdev_io))) {
7626 0 : assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_SUCCESS);
7627 0 : spdk_accel_sequence_abort(bdev_io->internal.accel_sequence);
7628 0 : }
7629 :
7630 612 : assert(bdev_io->internal.cb != NULL);
7631 612 : assert(spdk_get_thread() == spdk_bdev_io_get_thread(bdev_io));
7632 :
7633 1224 : bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
7634 612 : bdev_io->internal.caller_ctx);
7635 612 : }
7636 :
7637 : static inline void
7638 620 : bdev_io_complete(void *ctx)
7639 : {
7640 620 : struct spdk_bdev_io *bdev_io = ctx;
7641 620 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
7642 : uint64_t tsc, tsc_diff;
7643 :
7644 620 : if (spdk_unlikely(bdev_io->internal.f.in_submit_request)) {
7645 : /*
7646 : * Defer completion to avoid potential infinite recursion if the
7647 : * user's completion callback issues a new I/O.
7648 : */
7649 16 : spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
7650 8 : bdev_io_complete, bdev_io);
7651 8 : return;
7652 : }
7653 :
7654 612 : tsc = spdk_get_ticks();
7655 612 : tsc_diff = tsc - bdev_io->internal.submit_tsc;
7656 :
7657 612 : bdev_ch_remove_from_io_submitted(bdev_io);
7658 612 : spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, bdev_ch->trace_id, 0, (uintptr_t)bdev_io,
7659 : bdev_io->internal.caller_ctx, bdev_ch->queue_depth);
7660 :
7661 612 : if (bdev_ch->histogram) {
7662 4 : if (bdev_io->bdev->internal.histogram_io_type == 0 ||
7663 0 : bdev_io->bdev->internal.histogram_io_type == bdev_io->type) {
7664 : /*
7665 : * Tally all I/O types if the histogram_io_type is set to 0.
7666 : */
7667 4 : spdk_histogram_data_tally(bdev_ch->histogram, tsc_diff);
7668 4 : }
7669 4 : }
7670 :
7671 612 : bdev_io_update_io_stat(bdev_io, tsc_diff);
7672 612 : _bdev_io_complete(bdev_io);
7673 620 : }
7674 :
7675 : /* The difference between this function and bdev_io_complete() is that this should be called to
7676 : * complete IOs that haven't been submitted via bdev_io_submit(), as they weren't added onto the
7677 : * io_submitted list and don't have submit_tsc updated.
7678 : */
7679 : static inline void
7680 0 : bdev_io_complete_unsubmitted(struct spdk_bdev_io *bdev_io)
7681 : {
7682 : /* Since the IO hasn't been submitted it's bound to be failed */
7683 0 : assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_SUCCESS);
7684 :
7685 : /* At this point we don't know if the IO is completed from submission context or not, but,
7686 : * since this is an error path, we can always do an spdk_thread_send_msg(). */
7687 0 : spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
7688 0 : _bdev_io_complete, bdev_io);
7689 0 : }
7690 :
7691 : static void bdev_destroy_cb(void *io_device);
7692 :
7693 : static inline void
7694 18 : _bdev_reset_complete(void *ctx)
7695 : {
7696 18 : struct spdk_bdev_io *bdev_io = ctx;
7697 :
7698 : /* Put the channel reference we got in submission. */
7699 18 : assert(bdev_io->u.reset.ch_ref != NULL);
7700 18 : spdk_put_io_channel(bdev_io->u.reset.ch_ref);
7701 18 : bdev_io->u.reset.ch_ref = NULL;
7702 :
7703 18 : bdev_io_complete(bdev_io);
7704 18 : }
7705 :
7706 : static void
7707 16 : bdev_reset_complete(struct spdk_bdev *bdev, void *_ctx, int status)
7708 : {
7709 16 : struct spdk_bdev_io *bdev_io = _ctx;
7710 : bdev_io_tailq_t queued_resets;
7711 : struct spdk_bdev_io *queued_reset;
7712 :
7713 16 : assert(bdev_io == bdev->internal.reset_in_progress);
7714 :
7715 16 : TAILQ_INIT(&queued_resets);
7716 :
7717 16 : spdk_spin_lock(&bdev->internal.spinlock);
7718 16 : TAILQ_SWAP(&bdev->internal.queued_resets, &queued_resets,
7719 : spdk_bdev_io, internal.link);
7720 16 : bdev->internal.reset_in_progress = NULL;
7721 16 : spdk_spin_unlock(&bdev->internal.spinlock);
7722 :
7723 18 : while (!TAILQ_EMPTY(&queued_resets)) {
7724 2 : queued_reset = TAILQ_FIRST(&queued_resets);
7725 2 : TAILQ_REMOVE(&queued_resets, queued_reset, internal.link);
7726 2 : queued_reset->internal.status = bdev_io->internal.status;
7727 4 : spdk_thread_send_msg(spdk_bdev_io_get_thread(queued_reset),
7728 2 : _bdev_reset_complete, queued_reset);
7729 : }
7730 :
7731 16 : _bdev_reset_complete(bdev_io);
7732 :
7733 16 : if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING &&
7734 1 : TAILQ_EMPTY(&bdev->internal.open_descs)) {
7735 1 : spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
7736 1 : }
7737 16 : }
7738 :
7739 : static void
7740 20 : bdev_unfreeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
7741 : struct spdk_io_channel *_ch, void *_ctx)
7742 : {
7743 20 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
7744 :
7745 20 : ch->flags &= ~BDEV_CH_RESET_IN_PROGRESS;
7746 :
7747 20 : spdk_bdev_for_each_channel_continue(i, 0);
7748 20 : }
7749 :
7750 : static void
7751 0 : bdev_io_complete_sequence_cb(void *ctx, int status)
7752 : {
7753 0 : struct spdk_bdev_io *bdev_io = ctx;
7754 :
7755 : /* u.bdev.accel_sequence should have already been cleared at this point */
7756 0 : assert(bdev_io->u.bdev.accel_sequence == NULL);
7757 0 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
7758 0 : bdev_io->internal.f.has_accel_sequence = false;
7759 :
7760 0 : if (spdk_unlikely(status != 0)) {
7761 0 : SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
7762 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7763 0 : }
7764 :
7765 0 : bdev_io_complete(bdev_io);
7766 0 : }
7767 :
7768 : void
7769 598 : spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
7770 : {
7771 598 : struct spdk_bdev *bdev = bdev_io->bdev;
7772 598 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
7773 598 : struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
7774 :
7775 598 : if (spdk_unlikely(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING)) {
7776 0 : SPDK_ERRLOG("Unexpected completion on IO from %s module, status was %s\n",
7777 : spdk_bdev_get_module_name(bdev),
7778 : bdev_io_status_get_string(bdev_io->internal.status));
7779 0 : assert(false);
7780 : }
7781 598 : bdev_io->internal.status = status;
7782 :
7783 598 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_RESET)) {
7784 16 : assert(bdev_io == bdev->internal.reset_in_progress);
7785 16 : spdk_bdev_for_each_channel(bdev, bdev_unfreeze_channel, bdev_io,
7786 : bdev_reset_complete);
7787 16 : return;
7788 : } else {
7789 582 : bdev_io_decrement_outstanding(bdev_ch, shared_resource);
7790 582 : if (spdk_likely(status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7791 485 : if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io)) {
7792 0 : bdev_io_exec_sequence(bdev_io, bdev_io_complete_sequence_cb);
7793 0 : return;
7794 485 : } else if (spdk_unlikely(bdev_io->internal.f.has_bounce_buf &&
7795 : !bdev_io_use_accel_sequence(bdev_io))) {
7796 26 : _bdev_io_push_bounce_data_buffer(bdev_io,
7797 : _bdev_io_complete_push_bounce_done);
7798 : /* bdev IO will be completed in the callback */
7799 26 : return;
7800 : }
7801 459 : }
7802 :
7803 556 : if (spdk_unlikely(_bdev_io_handle_no_mem(bdev_io, BDEV_IO_RETRY_STATE_SUBMIT))) {
7804 5 : return;
7805 : }
7806 : }
7807 :
7808 551 : bdev_io_complete(bdev_io);
7809 598 : }
7810 :
7811 : void
7812 0 : spdk_bdev_io_complete_scsi_status(struct spdk_bdev_io *bdev_io, enum spdk_scsi_status sc,
7813 : enum spdk_scsi_sense sk, uint8_t asc, uint8_t ascq)
7814 : {
7815 : enum spdk_bdev_io_status status;
7816 :
7817 0 : if (sc == SPDK_SCSI_STATUS_GOOD) {
7818 0 : status = SPDK_BDEV_IO_STATUS_SUCCESS;
7819 0 : } else {
7820 0 : status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
7821 0 : bdev_io->internal.error.scsi.sc = sc;
7822 0 : bdev_io->internal.error.scsi.sk = sk;
7823 0 : bdev_io->internal.error.scsi.asc = asc;
7824 0 : bdev_io->internal.error.scsi.ascq = ascq;
7825 : }
7826 :
7827 0 : spdk_bdev_io_complete(bdev_io, status);
7828 0 : }
7829 :
7830 : void
7831 0 : spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
7832 : int *sc, int *sk, int *asc, int *ascq)
7833 : {
7834 0 : assert(sc != NULL);
7835 0 : assert(sk != NULL);
7836 0 : assert(asc != NULL);
7837 0 : assert(ascq != NULL);
7838 :
7839 0 : switch (bdev_io->internal.status) {
7840 : case SPDK_BDEV_IO_STATUS_SUCCESS:
7841 0 : *sc = SPDK_SCSI_STATUS_GOOD;
7842 0 : *sk = SPDK_SCSI_SENSE_NO_SENSE;
7843 0 : *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
7844 0 : *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
7845 0 : break;
7846 : case SPDK_BDEV_IO_STATUS_NVME_ERROR:
7847 0 : spdk_scsi_nvme_translate(bdev_io, sc, sk, asc, ascq);
7848 0 : break;
7849 : case SPDK_BDEV_IO_STATUS_MISCOMPARE:
7850 0 : *sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
7851 0 : *sk = SPDK_SCSI_SENSE_MISCOMPARE;
7852 0 : *asc = SPDK_SCSI_ASC_MISCOMPARE_DURING_VERIFY_OPERATION;
7853 0 : *ascq = bdev_io->internal.error.scsi.ascq;
7854 0 : break;
7855 : case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
7856 0 : *sc = bdev_io->internal.error.scsi.sc;
7857 0 : *sk = bdev_io->internal.error.scsi.sk;
7858 0 : *asc = bdev_io->internal.error.scsi.asc;
7859 0 : *ascq = bdev_io->internal.error.scsi.ascq;
7860 0 : break;
7861 : default:
7862 0 : *sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
7863 0 : *sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
7864 0 : *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
7865 0 : *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
7866 0 : break;
7867 : }
7868 0 : }
7869 :
7870 : void
7871 0 : spdk_bdev_io_complete_aio_status(struct spdk_bdev_io *bdev_io, int aio_result)
7872 : {
7873 : enum spdk_bdev_io_status status;
7874 :
7875 0 : if (aio_result == 0) {
7876 0 : status = SPDK_BDEV_IO_STATUS_SUCCESS;
7877 0 : } else {
7878 0 : status = SPDK_BDEV_IO_STATUS_AIO_ERROR;
7879 : }
7880 :
7881 0 : bdev_io->internal.error.aio_result = aio_result;
7882 :
7883 0 : spdk_bdev_io_complete(bdev_io, status);
7884 0 : }
7885 :
7886 : void
7887 0 : spdk_bdev_io_get_aio_status(const struct spdk_bdev_io *bdev_io, int *aio_result)
7888 : {
7889 0 : assert(aio_result != NULL);
7890 :
7891 0 : if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_AIO_ERROR) {
7892 0 : *aio_result = bdev_io->internal.error.aio_result;
7893 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7894 0 : *aio_result = 0;
7895 0 : } else {
7896 0 : *aio_result = -EIO;
7897 : }
7898 0 : }
7899 :
7900 : void
7901 0 : spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
7902 : {
7903 : enum spdk_bdev_io_status status;
7904 :
7905 0 : if (spdk_likely(sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS)) {
7906 0 : status = SPDK_BDEV_IO_STATUS_SUCCESS;
7907 0 : } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
7908 0 : status = SPDK_BDEV_IO_STATUS_ABORTED;
7909 0 : } else {
7910 0 : status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
7911 : }
7912 :
7913 0 : bdev_io->internal.error.nvme.cdw0 = cdw0;
7914 0 : bdev_io->internal.error.nvme.sct = sct;
7915 0 : bdev_io->internal.error.nvme.sc = sc;
7916 :
7917 0 : spdk_bdev_io_complete(bdev_io, status);
7918 0 : }
7919 :
7920 : void
7921 0 : spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc)
7922 : {
7923 0 : assert(sct != NULL);
7924 0 : assert(sc != NULL);
7925 0 : assert(cdw0 != NULL);
7926 :
7927 0 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
7928 0 : *sct = SPDK_NVME_SCT_GENERIC;
7929 0 : *sc = SPDK_NVME_SC_SUCCESS;
7930 0 : if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7931 0 : *cdw0 = 0;
7932 0 : } else {
7933 0 : *cdw0 = 1U;
7934 : }
7935 0 : return;
7936 : }
7937 :
7938 0 : if (spdk_likely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7939 0 : *sct = SPDK_NVME_SCT_GENERIC;
7940 0 : *sc = SPDK_NVME_SC_SUCCESS;
7941 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
7942 0 : *sct = bdev_io->internal.error.nvme.sct;
7943 0 : *sc = bdev_io->internal.error.nvme.sc;
7944 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
7945 0 : *sct = SPDK_NVME_SCT_GENERIC;
7946 0 : *sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7947 0 : } else {
7948 0 : *sct = SPDK_NVME_SCT_GENERIC;
7949 0 : *sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7950 : }
7951 :
7952 0 : *cdw0 = bdev_io->internal.error.nvme.cdw0;
7953 0 : }
7954 :
7955 : void
7956 0 : spdk_bdev_io_get_nvme_fused_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0,
7957 : int *first_sct, int *first_sc, int *second_sct, int *second_sc)
7958 : {
7959 0 : assert(first_sct != NULL);
7960 0 : assert(first_sc != NULL);
7961 0 : assert(second_sct != NULL);
7962 0 : assert(second_sc != NULL);
7963 0 : assert(cdw0 != NULL);
7964 :
7965 0 : if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
7966 0 : if (bdev_io->internal.error.nvme.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
7967 0 : bdev_io->internal.error.nvme.sc == SPDK_NVME_SC_COMPARE_FAILURE) {
7968 0 : *first_sct = bdev_io->internal.error.nvme.sct;
7969 0 : *first_sc = bdev_io->internal.error.nvme.sc;
7970 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7971 0 : *second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7972 0 : } else {
7973 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7974 0 : *first_sc = SPDK_NVME_SC_SUCCESS;
7975 0 : *second_sct = bdev_io->internal.error.nvme.sct;
7976 0 : *second_sc = bdev_io->internal.error.nvme.sc;
7977 : }
7978 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
7979 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7980 0 : *first_sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7981 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7982 0 : *second_sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7983 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7984 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7985 0 : *first_sc = SPDK_NVME_SC_SUCCESS;
7986 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7987 0 : *second_sc = SPDK_NVME_SC_SUCCESS;
7988 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED) {
7989 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7990 0 : *first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7991 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7992 0 : *second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7993 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_MISCOMPARE) {
7994 0 : *first_sct = SPDK_NVME_SCT_MEDIA_ERROR;
7995 0 : *first_sc = SPDK_NVME_SC_COMPARE_FAILURE;
7996 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7997 0 : *second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7998 0 : } else {
7999 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
8000 0 : *first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
8001 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
8002 0 : *second_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
8003 : }
8004 :
8005 0 : *cdw0 = bdev_io->internal.error.nvme.cdw0;
8006 0 : }
8007 :
8008 : void
8009 0 : spdk_bdev_io_complete_base_io_status(struct spdk_bdev_io *bdev_io,
8010 : const struct spdk_bdev_io *base_io)
8011 : {
8012 0 : switch (base_io->internal.status) {
8013 : case SPDK_BDEV_IO_STATUS_NVME_ERROR:
8014 0 : spdk_bdev_io_complete_nvme_status(bdev_io,
8015 0 : base_io->internal.error.nvme.cdw0,
8016 0 : base_io->internal.error.nvme.sct,
8017 0 : base_io->internal.error.nvme.sc);
8018 0 : break;
8019 : case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
8020 0 : spdk_bdev_io_complete_scsi_status(bdev_io,
8021 0 : base_io->internal.error.scsi.sc,
8022 0 : base_io->internal.error.scsi.sk,
8023 0 : base_io->internal.error.scsi.asc,
8024 0 : base_io->internal.error.scsi.ascq);
8025 0 : break;
8026 : case SPDK_BDEV_IO_STATUS_AIO_ERROR:
8027 0 : spdk_bdev_io_complete_aio_status(bdev_io, base_io->internal.error.aio_result);
8028 0 : break;
8029 : default:
8030 0 : spdk_bdev_io_complete(bdev_io, base_io->internal.status);
8031 0 : break;
8032 : }
8033 0 : }
8034 :
8035 : struct spdk_thread *
8036 664 : spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
8037 : {
8038 664 : return spdk_io_channel_get_thread(bdev_io->internal.ch->channel);
8039 : }
8040 :
8041 : struct spdk_io_channel *
8042 70 : spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
8043 : {
8044 70 : return bdev_io->internal.ch->channel;
8045 : }
8046 :
8047 : static int
8048 132 : bdev_register(struct spdk_bdev *bdev)
8049 : {
8050 : char *bdev_name;
8051 : char uuid[SPDK_UUID_STRING_LEN];
8052 : struct spdk_iobuf_opts iobuf_opts;
8053 : int ret;
8054 :
8055 132 : assert(bdev->module != NULL);
8056 :
8057 132 : if (!bdev->name) {
8058 0 : SPDK_ERRLOG("Bdev name is NULL\n");
8059 0 : return -EINVAL;
8060 : }
8061 :
8062 132 : if (!strlen(bdev->name)) {
8063 0 : SPDK_ERRLOG("Bdev name must not be an empty string\n");
8064 0 : return -EINVAL;
8065 : }
8066 :
8067 : /* Users often register their own I/O devices using the bdev name. In
8068 : * order to avoid conflicts, prepend bdev_. */
8069 132 : bdev_name = spdk_sprintf_alloc("bdev_%s", bdev->name);
8070 132 : if (!bdev_name) {
8071 0 : SPDK_ERRLOG("Unable to allocate memory for internal bdev name.\n");
8072 0 : return -ENOMEM;
8073 : }
8074 :
8075 132 : bdev->internal.stat = bdev_alloc_io_stat(true);
8076 132 : if (!bdev->internal.stat) {
8077 0 : SPDK_ERRLOG("Unable to allocate I/O statistics structure.\n");
8078 0 : free(bdev_name);
8079 0 : return -ENOMEM;
8080 : }
8081 :
8082 132 : bdev->internal.status = SPDK_BDEV_STATUS_READY;
8083 132 : bdev->internal.measured_queue_depth = UINT64_MAX;
8084 132 : bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
8085 132 : memset(&bdev->internal.claim, 0, sizeof(bdev->internal.claim));
8086 132 : bdev->internal.qd_poller = NULL;
8087 132 : bdev->internal.qos = NULL;
8088 :
8089 132 : TAILQ_INIT(&bdev->internal.open_descs);
8090 132 : TAILQ_INIT(&bdev->internal.locked_ranges);
8091 132 : TAILQ_INIT(&bdev->internal.pending_locked_ranges);
8092 132 : TAILQ_INIT(&bdev->internal.queued_resets);
8093 132 : TAILQ_INIT(&bdev->aliases);
8094 :
8095 : /* UUID may be specified by the user or defined by bdev itself.
8096 : * Otherwise it will be generated here, so this field will never be empty. */
8097 132 : if (spdk_uuid_is_null(&bdev->uuid)) {
8098 43 : spdk_uuid_generate(&bdev->uuid);
8099 43 : }
8100 :
8101 : /* Add the UUID alias only if it's different than the name */
8102 132 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
8103 132 : if (strcmp(bdev->name, uuid) != 0) {
8104 131 : ret = spdk_bdev_alias_add(bdev, uuid);
8105 131 : if (ret != 0) {
8106 2 : SPDK_ERRLOG("Unable to add uuid:%s alias for bdev %s\n", uuid, bdev->name);
8107 2 : bdev_free_io_stat(bdev->internal.stat);
8108 2 : free(bdev_name);
8109 2 : return ret;
8110 : }
8111 129 : }
8112 :
8113 130 : spdk_iobuf_get_opts(&iobuf_opts, sizeof(iobuf_opts));
8114 130 : if (spdk_bdev_get_buf_align(bdev) > 1) {
8115 0 : bdev->max_rw_size = spdk_min(bdev->max_rw_size ? bdev->max_rw_size : UINT32_MAX,
8116 : iobuf_opts.large_bufsize / bdev->blocklen);
8117 0 : }
8118 :
8119 : /* If the user didn't specify a write unit size, set it to one. */
8120 130 : if (bdev->write_unit_size == 0) {
8121 126 : bdev->write_unit_size = 1;
8122 126 : }
8123 :
8124 : /* Set ACWU value to the write unit size if bdev module did not set it (does not support it natively) */
8125 130 : if (bdev->acwu == 0) {
8126 126 : bdev->acwu = bdev->write_unit_size;
8127 126 : }
8128 :
8129 130 : if (bdev->phys_blocklen == 0) {
8130 126 : bdev->phys_blocklen = spdk_bdev_get_data_block_size(bdev);
8131 126 : }
8132 :
8133 130 : if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY)) {
8134 0 : bdev->max_copy = bdev_get_max_write(bdev, iobuf_opts.large_bufsize);
8135 0 : }
8136 :
8137 130 : if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
8138 0 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
8139 0 : }
8140 :
8141 130 : bdev->internal.reset_in_progress = NULL;
8142 130 : bdev->internal.qd_poll_in_progress = false;
8143 130 : bdev->internal.period = 0;
8144 130 : bdev->internal.new_period = 0;
8145 130 : bdev->internal.trace_id = spdk_trace_register_owner(OWNER_TYPE_BDEV, bdev_name);
8146 :
8147 : /*
8148 : * Initialize spinlock before registering IO device because spinlock is used in
8149 : * bdev_channel_create
8150 : */
8151 130 : spdk_spin_init(&bdev->internal.spinlock);
8152 :
8153 260 : spdk_io_device_register(__bdev_to_io_dev(bdev),
8154 : bdev_channel_create, bdev_channel_destroy,
8155 : sizeof(struct spdk_bdev_channel),
8156 130 : bdev_name);
8157 :
8158 : /*
8159 : * Register bdev name only after the bdev object is ready.
8160 : * After bdev_name_add returns, it is possible for other threads to start using the bdev,
8161 : * create IO channels...
8162 : */
8163 130 : ret = bdev_name_add(&bdev->internal.bdev_name, bdev, bdev->name);
8164 130 : if (ret != 0) {
8165 0 : spdk_io_device_unregister(__bdev_to_io_dev(bdev), NULL);
8166 0 : bdev_free_io_stat(bdev->internal.stat);
8167 0 : spdk_spin_destroy(&bdev->internal.spinlock);
8168 0 : free(bdev_name);
8169 0 : return ret;
8170 : }
8171 :
8172 130 : free(bdev_name);
8173 :
8174 130 : SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name);
8175 130 : TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link);
8176 :
8177 130 : return 0;
8178 132 : }
8179 :
8180 : static void
8181 131 : bdev_destroy_cb(void *io_device)
8182 : {
8183 : int rc;
8184 : struct spdk_bdev *bdev;
8185 : spdk_bdev_unregister_cb cb_fn;
8186 : void *cb_arg;
8187 :
8188 131 : bdev = __bdev_from_io_dev(io_device);
8189 :
8190 131 : if (bdev->internal.unregister_td != spdk_get_thread()) {
8191 1 : spdk_thread_send_msg(bdev->internal.unregister_td, bdev_destroy_cb, io_device);
8192 1 : return;
8193 : }
8194 :
8195 130 : cb_fn = bdev->internal.unregister_cb;
8196 130 : cb_arg = bdev->internal.unregister_ctx;
8197 :
8198 130 : spdk_spin_destroy(&bdev->internal.spinlock);
8199 130 : free(bdev->internal.qos);
8200 130 : bdev_free_io_stat(bdev->internal.stat);
8201 130 : spdk_trace_unregister_owner(bdev->internal.trace_id);
8202 :
8203 130 : rc = bdev->fn_table->destruct(bdev->ctxt);
8204 130 : if (rc < 0) {
8205 0 : SPDK_ERRLOG("destruct failed\n");
8206 0 : }
8207 130 : if (rc <= 0 && cb_fn != NULL) {
8208 10 : cb_fn(cb_arg, rc);
8209 10 : }
8210 131 : }
8211 :
8212 : void
8213 2 : spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
8214 : {
8215 2 : if (bdev->internal.unregister_cb != NULL) {
8216 0 : bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
8217 0 : }
8218 2 : }
8219 :
8220 : static void
8221 19 : _remove_notify(void *arg)
8222 : {
8223 19 : struct spdk_bdev_desc *desc = arg;
8224 :
8225 19 : _event_notify(desc, SPDK_BDEV_EVENT_REMOVE);
8226 19 : }
8227 :
8228 : /* returns: 0 - bdev removed and ready to be destructed.
8229 : * -EBUSY - bdev can't be destructed yet. */
8230 : static int
8231 145 : bdev_unregister_unsafe(struct spdk_bdev *bdev)
8232 : {
8233 : struct spdk_bdev_desc *desc, *tmp;
8234 : struct spdk_bdev_alias *alias;
8235 145 : int rc = 0;
8236 : char uuid[SPDK_UUID_STRING_LEN];
8237 :
8238 145 : assert(spdk_spin_held(&g_bdev_mgr.spinlock));
8239 145 : assert(spdk_spin_held(&bdev->internal.spinlock));
8240 :
8241 : /* Notify each descriptor about hotremoval */
8242 164 : TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
8243 19 : rc = -EBUSY;
8244 : /*
8245 : * Defer invocation of the event_cb to a separate message that will
8246 : * run later on its thread. This ensures this context unwinds and
8247 : * we don't recursively unregister this bdev again if the event_cb
8248 : * immediately closes its descriptor.
8249 : */
8250 19 : event_notify(desc, _remove_notify);
8251 19 : }
8252 :
8253 : /* If there are no descriptors, proceed removing the bdev */
8254 145 : if (rc == 0) {
8255 130 : bdev_examine_allowlist_remove(bdev->name);
8256 258 : TAILQ_FOREACH(alias, &bdev->aliases, tailq) {
8257 128 : bdev_examine_allowlist_remove(alias->alias.name);
8258 128 : }
8259 130 : TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
8260 130 : SPDK_DEBUGLOG(bdev, "Removing bdev %s from list done\n", bdev->name);
8261 :
8262 : /* Delete the name and the UUID alias */
8263 130 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
8264 130 : bdev_name_del_unsafe(&bdev->internal.bdev_name);
8265 130 : bdev_alias_del(bdev, uuid, bdev_name_del_unsafe);
8266 :
8267 130 : spdk_notify_send("bdev_unregister", spdk_bdev_get_name(bdev));
8268 :
8269 130 : if (bdev->internal.reset_in_progress != NULL) {
8270 : /* If reset is in progress, let the completion callback for reset
8271 : * unregister the bdev.
8272 : */
8273 1 : rc = -EBUSY;
8274 1 : }
8275 130 : }
8276 :
8277 145 : return rc;
8278 : }
8279 :
8280 : static void
8281 4 : bdev_unregister_abort_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
8282 : struct spdk_io_channel *io_ch, void *_ctx)
8283 : {
8284 4 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
8285 :
8286 4 : bdev_channel_abort_queued_ios(bdev_ch);
8287 4 : spdk_bdev_for_each_channel_continue(i, 0);
8288 4 : }
8289 :
8290 : static void
8291 130 : bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status)
8292 : {
8293 : int rc;
8294 :
8295 130 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8296 130 : spdk_spin_lock(&bdev->internal.spinlock);
8297 : /*
8298 : * Set the status to REMOVING after completing to abort channels. Otherwise,
8299 : * the last spdk_bdev_close() may call spdk_io_device_unregister() while
8300 : * spdk_bdev_for_each_channel() is executed and spdk_io_device_unregister()
8301 : * may fail.
8302 : */
8303 130 : bdev->internal.status = SPDK_BDEV_STATUS_REMOVING;
8304 130 : rc = bdev_unregister_unsafe(bdev);
8305 130 : spdk_spin_unlock(&bdev->internal.spinlock);
8306 130 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8307 :
8308 130 : if (rc == 0) {
8309 114 : spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
8310 114 : }
8311 130 : }
8312 :
8313 : void
8314 137 : spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
8315 : {
8316 : struct spdk_thread *thread;
8317 :
8318 137 : SPDK_DEBUGLOG(bdev, "Removing bdev %s from list\n", bdev->name);
8319 :
8320 137 : thread = spdk_get_thread();
8321 137 : if (!thread) {
8322 : /* The user called this from a non-SPDK thread. */
8323 0 : if (cb_fn != NULL) {
8324 0 : cb_fn(cb_arg, -ENOTSUP);
8325 0 : }
8326 0 : return;
8327 : }
8328 :
8329 137 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8330 137 : if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
8331 137 : bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
8332 7 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8333 7 : if (cb_fn) {
8334 0 : cb_fn(cb_arg, -EBUSY);
8335 0 : }
8336 7 : return;
8337 : }
8338 :
8339 130 : spdk_spin_lock(&bdev->internal.spinlock);
8340 130 : bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING;
8341 130 : bdev->internal.unregister_cb = cb_fn;
8342 130 : bdev->internal.unregister_ctx = cb_arg;
8343 130 : bdev->internal.unregister_td = thread;
8344 130 : spdk_spin_unlock(&bdev->internal.spinlock);
8345 130 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8346 :
8347 130 : spdk_bdev_set_qd_sampling_period(bdev, 0);
8348 :
8349 130 : spdk_bdev_for_each_channel(bdev, bdev_unregister_abort_channel, bdev,
8350 : bdev_unregister);
8351 137 : }
8352 :
8353 : int
8354 4 : spdk_bdev_unregister_by_name(const char *bdev_name, struct spdk_bdev_module *module,
8355 : spdk_bdev_unregister_cb cb_fn, void *cb_arg)
8356 : {
8357 : struct spdk_bdev_desc *desc;
8358 : struct spdk_bdev *bdev;
8359 : int rc;
8360 :
8361 4 : rc = spdk_bdev_open_ext(bdev_name, false, _tmp_bdev_event_cb, NULL, &desc);
8362 4 : if (rc != 0) {
8363 1 : SPDK_ERRLOG("Failed to open bdev with name: %s\n", bdev_name);
8364 1 : return rc;
8365 : }
8366 :
8367 3 : bdev = spdk_bdev_desc_get_bdev(desc);
8368 :
8369 3 : if (bdev->module != module) {
8370 1 : spdk_bdev_close(desc);
8371 1 : SPDK_ERRLOG("Bdev %s was not registered by the specified module.\n",
8372 : bdev_name);
8373 1 : return -ENODEV;
8374 : }
8375 :
8376 2 : spdk_bdev_unregister(bdev, cb_fn, cb_arg);
8377 :
8378 2 : spdk_bdev_close(desc);
8379 :
8380 2 : return 0;
8381 4 : }
8382 :
8383 : static int
8384 269 : bdev_start_qos(struct spdk_bdev *bdev)
8385 : {
8386 : struct set_qos_limit_ctx *ctx;
8387 :
8388 : /* Enable QoS */
8389 269 : if (bdev->internal.qos && bdev->internal.qos->thread == NULL) {
8390 2 : ctx = calloc(1, sizeof(*ctx));
8391 2 : if (ctx == NULL) {
8392 0 : SPDK_ERRLOG("Failed to allocate memory for QoS context\n");
8393 0 : return -ENOMEM;
8394 : }
8395 2 : ctx->bdev = bdev;
8396 2 : spdk_bdev_for_each_channel(bdev, bdev_enable_qos_msg, ctx, bdev_enable_qos_done);
8397 2 : }
8398 :
8399 269 : return 0;
8400 269 : }
8401 :
8402 : static void
8403 25 : log_already_claimed(enum spdk_log_level level, const int line, const char *func, const char *detail,
8404 : struct spdk_bdev *bdev)
8405 : {
8406 : enum spdk_bdev_claim_type type;
8407 : const char *typename, *modname;
8408 : extern struct spdk_log_flag SPDK_LOG_bdev;
8409 :
8410 25 : assert(spdk_spin_held(&bdev->internal.spinlock));
8411 :
8412 25 : if (level >= SPDK_LOG_INFO && !SPDK_LOG_bdev.enabled) {
8413 0 : return;
8414 : }
8415 :
8416 25 : type = bdev->internal.claim_type;
8417 25 : typename = spdk_bdev_claim_get_name(type);
8418 :
8419 25 : if (type == SPDK_BDEV_CLAIM_EXCL_WRITE) {
8420 6 : modname = bdev->internal.claim.v1.module->name;
8421 12 : spdk_log(level, __FILE__, line, func, "bdev %s %s: type %s by module %s\n",
8422 6 : bdev->name, detail, typename, modname);
8423 6 : return;
8424 : }
8425 :
8426 19 : if (claim_type_is_v2(type)) {
8427 : struct spdk_bdev_module_claim *claim;
8428 :
8429 38 : TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) {
8430 19 : modname = claim->module->name;
8431 38 : spdk_log(level, __FILE__, line, func, "bdev %s %s: type %s by module %s\n",
8432 19 : bdev->name, detail, typename, modname);
8433 19 : }
8434 19 : return;
8435 : }
8436 :
8437 0 : assert(false);
8438 25 : }
8439 :
8440 : static int
8441 278 : bdev_open(struct spdk_bdev *bdev, bool write, struct spdk_bdev_desc *desc)
8442 : {
8443 : struct spdk_thread *thread;
8444 278 : int rc = 0;
8445 :
8446 278 : thread = spdk_get_thread();
8447 278 : if (!thread) {
8448 0 : SPDK_ERRLOG("Cannot open bdev from non-SPDK thread.\n");
8449 0 : return -ENOTSUP;
8450 : }
8451 :
8452 278 : SPDK_DEBUGLOG(bdev, "Opening descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
8453 : spdk_get_thread());
8454 :
8455 278 : desc->bdev = bdev;
8456 278 : desc->thread = thread;
8457 278 : desc->write = write;
8458 :
8459 278 : spdk_spin_lock(&bdev->internal.spinlock);
8460 278 : if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
8461 278 : bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
8462 3 : spdk_spin_unlock(&bdev->internal.spinlock);
8463 3 : return -ENODEV;
8464 : }
8465 :
8466 275 : if (write && bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
8467 6 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8468 6 : spdk_spin_unlock(&bdev->internal.spinlock);
8469 6 : return -EPERM;
8470 : }
8471 :
8472 269 : rc = bdev_start_qos(bdev);
8473 269 : if (rc != 0) {
8474 0 : SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name);
8475 0 : spdk_spin_unlock(&bdev->internal.spinlock);
8476 0 : return rc;
8477 : }
8478 :
8479 269 : TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link);
8480 :
8481 269 : spdk_spin_unlock(&bdev->internal.spinlock);
8482 :
8483 269 : return 0;
8484 278 : }
8485 :
8486 : static void
8487 279 : bdev_open_opts_get_defaults(struct spdk_bdev_open_opts *opts, size_t opts_size)
8488 : {
8489 279 : if (!opts) {
8490 0 : SPDK_ERRLOG("opts should not be NULL.\n");
8491 0 : return;
8492 : }
8493 :
8494 279 : if (!opts_size) {
8495 0 : SPDK_ERRLOG("opts_size should not be zero.\n");
8496 0 : return;
8497 : }
8498 :
8499 279 : memset(opts, 0, opts_size);
8500 279 : opts->size = opts_size;
8501 :
8502 : #define FIELD_OK(field) \
8503 : offsetof(struct spdk_bdev_open_opts, field) + sizeof(opts->field) <= opts_size
8504 :
8505 : #define SET_FIELD(field, value) \
8506 : if (FIELD_OK(field)) { \
8507 : opts->field = value; \
8508 : } \
8509 :
8510 279 : SET_FIELD(hide_metadata, false);
8511 :
8512 : #undef FIELD_OK
8513 : #undef SET_FIELD
8514 279 : }
8515 :
8516 : static void
8517 2 : bdev_open_opts_copy(struct spdk_bdev_open_opts *opts,
8518 : const struct spdk_bdev_open_opts *opts_src, size_t opts_size)
8519 : {
8520 2 : assert(opts);
8521 2 : assert(opts_src);
8522 :
8523 : #define SET_FIELD(field) \
8524 : if (offsetof(struct spdk_bdev_open_opts, field) + sizeof(opts->field) <= opts_size) { \
8525 : opts->field = opts_src->field; \
8526 : } \
8527 :
8528 2 : SET_FIELD(hide_metadata);
8529 :
8530 2 : opts->size = opts_src->size;
8531 :
8532 : /* We should not remove this statement, but need to update the assert statement
8533 : * if we add a new field, and also add a corresponding SET_FIELD statement.
8534 : */
8535 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_open_opts) == 16, "Incorrect size");
8536 :
8537 : #undef SET_FIELD
8538 2 : }
8539 :
8540 : void
8541 1 : spdk_bdev_open_opts_init(struct spdk_bdev_open_opts *opts, size_t opts_size)
8542 : {
8543 : struct spdk_bdev_open_opts opts_local;
8544 :
8545 1 : bdev_open_opts_get_defaults(&opts_local, sizeof(opts_local));
8546 1 : bdev_open_opts_copy(opts, &opts_local, opts_size);
8547 1 : }
8548 :
8549 : static int
8550 278 : bdev_desc_alloc(struct spdk_bdev *bdev, spdk_bdev_event_cb_t event_cb, void *event_ctx,
8551 : struct spdk_bdev_open_opts *user_opts, struct spdk_bdev_desc **_desc)
8552 : {
8553 : struct spdk_bdev_desc *desc;
8554 : struct spdk_bdev_open_opts opts;
8555 : unsigned int i;
8556 :
8557 278 : bdev_open_opts_get_defaults(&opts, sizeof(opts));
8558 278 : if (user_opts != NULL) {
8559 1 : bdev_open_opts_copy(&opts, user_opts, user_opts->size);
8560 1 : }
8561 :
8562 278 : desc = calloc(1, sizeof(*desc));
8563 278 : if (desc == NULL) {
8564 0 : SPDK_ERRLOG("Failed to allocate memory for bdev descriptor\n");
8565 0 : return -ENOMEM;
8566 : }
8567 :
8568 278 : desc->opts = opts;
8569 :
8570 278 : TAILQ_INIT(&desc->pending_media_events);
8571 278 : TAILQ_INIT(&desc->free_media_events);
8572 :
8573 278 : desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0;
8574 278 : desc->callback.event_fn = event_cb;
8575 278 : desc->callback.ctx = event_ctx;
8576 278 : spdk_spin_init(&desc->spinlock);
8577 :
8578 278 : if (desc->opts.hide_metadata) {
8579 1 : if (spdk_bdev_is_md_separate(bdev)) {
8580 0 : SPDK_ERRLOG("hide_metadata option is not supported with separate metadata.\n");
8581 0 : bdev_desc_free(desc);
8582 0 : return -EINVAL;
8583 : }
8584 1 : }
8585 :
8586 278 : if (bdev->media_events) {
8587 0 : desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE,
8588 : sizeof(*desc->media_events_buffer));
8589 0 : if (desc->media_events_buffer == NULL) {
8590 0 : SPDK_ERRLOG("Failed to initialize media event pool\n");
8591 0 : bdev_desc_free(desc);
8592 0 : return -ENOMEM;
8593 : }
8594 :
8595 0 : for (i = 0; i < MEDIA_EVENT_POOL_SIZE; ++i) {
8596 0 : TAILQ_INSERT_TAIL(&desc->free_media_events,
8597 : &desc->media_events_buffer[i], tailq);
8598 0 : }
8599 0 : }
8600 :
8601 278 : if (bdev->fn_table->accel_sequence_supported != NULL) {
8602 0 : for (i = 0; i < SPDK_BDEV_NUM_IO_TYPES; ++i) {
8603 0 : desc->accel_sequence_supported[i] =
8604 0 : bdev->fn_table->accel_sequence_supported(bdev->ctxt,
8605 0 : (enum spdk_bdev_io_type)i);
8606 0 : }
8607 0 : }
8608 :
8609 278 : *_desc = desc;
8610 :
8611 278 : return 0;
8612 278 : }
8613 :
8614 : static int
8615 136 : bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8616 : void *event_ctx, struct spdk_bdev_open_opts *opts,
8617 : struct spdk_bdev_desc **_desc)
8618 : {
8619 : struct spdk_bdev_desc *desc;
8620 : struct spdk_bdev *bdev;
8621 : int rc;
8622 :
8623 136 : bdev = bdev_get_by_name(bdev_name);
8624 :
8625 136 : if (bdev == NULL) {
8626 1 : SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name);
8627 1 : return -ENODEV;
8628 : }
8629 :
8630 135 : rc = bdev_desc_alloc(bdev, event_cb, event_ctx, opts, &desc);
8631 135 : if (rc != 0) {
8632 0 : return rc;
8633 : }
8634 :
8635 135 : rc = bdev_open(bdev, write, desc);
8636 135 : if (rc != 0) {
8637 7 : bdev_desc_free(desc);
8638 7 : desc = NULL;
8639 7 : }
8640 :
8641 135 : *_desc = desc;
8642 :
8643 135 : return rc;
8644 136 : }
8645 :
8646 : int
8647 138 : spdk_bdev_open_ext_v2(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8648 : void *event_ctx, struct spdk_bdev_open_opts *opts,
8649 : struct spdk_bdev_desc **_desc)
8650 : {
8651 : int rc;
8652 :
8653 138 : if (event_cb == NULL) {
8654 2 : SPDK_ERRLOG("Missing event callback function\n");
8655 2 : return -EINVAL;
8656 : }
8657 :
8658 136 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8659 136 : rc = bdev_open_ext(bdev_name, write, event_cb, event_ctx, opts, _desc);
8660 136 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8661 :
8662 136 : return rc;
8663 138 : }
8664 :
8665 : int
8666 136 : spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8667 : void *event_ctx, struct spdk_bdev_desc **_desc)
8668 : {
8669 136 : return spdk_bdev_open_ext_v2(bdev_name, write, event_cb, event_ctx, NULL, _desc);
8670 : }
8671 :
8672 : struct spdk_bdev_open_async_ctx {
8673 : char *bdev_name;
8674 : spdk_bdev_event_cb_t event_cb;
8675 : void *event_ctx;
8676 : bool write;
8677 : int rc;
8678 : spdk_bdev_open_async_cb_t cb_fn;
8679 : void *cb_arg;
8680 : struct spdk_bdev_desc *desc;
8681 : struct spdk_bdev_open_async_opts opts;
8682 : uint64_t start_ticks;
8683 : struct spdk_thread *orig_thread;
8684 : struct spdk_poller *poller;
8685 : TAILQ_ENTRY(spdk_bdev_open_async_ctx) tailq;
8686 : };
8687 :
8688 : static void
8689 0 : bdev_open_async_done(void *arg)
8690 : {
8691 0 : struct spdk_bdev_open_async_ctx *ctx = arg;
8692 :
8693 0 : ctx->cb_fn(ctx->desc, ctx->rc, ctx->cb_arg);
8694 :
8695 0 : free(ctx->bdev_name);
8696 0 : free(ctx);
8697 0 : }
8698 :
8699 : static void
8700 0 : bdev_open_async_cancel(void *arg)
8701 : {
8702 0 : struct spdk_bdev_open_async_ctx *ctx = arg;
8703 :
8704 0 : assert(ctx->rc == -ESHUTDOWN);
8705 :
8706 0 : spdk_poller_unregister(&ctx->poller);
8707 :
8708 0 : bdev_open_async_done(ctx);
8709 0 : }
8710 :
8711 : /* This is called when the bdev library finishes at shutdown. */
8712 : static void
8713 68 : bdev_open_async_fini(void)
8714 : {
8715 : struct spdk_bdev_open_async_ctx *ctx, *tmp_ctx;
8716 :
8717 68 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8718 68 : TAILQ_FOREACH_SAFE(ctx, &g_bdev_mgr.async_bdev_opens, tailq, tmp_ctx) {
8719 0 : TAILQ_REMOVE(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8720 : /*
8721 : * We have to move to ctx->orig_thread to unregister ctx->poller.
8722 : * However, there is a chance that ctx->poller is executed before
8723 : * message is executed, which could result in bdev_open_async_done()
8724 : * being called twice. To avoid such race condition, set ctx->rc to
8725 : * -ESHUTDOWN.
8726 : */
8727 0 : ctx->rc = -ESHUTDOWN;
8728 0 : spdk_thread_send_msg(ctx->orig_thread, bdev_open_async_cancel, ctx);
8729 0 : }
8730 68 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8731 68 : }
8732 :
8733 : static int bdev_open_async(void *arg);
8734 :
8735 : static void
8736 0 : _bdev_open_async(struct spdk_bdev_open_async_ctx *ctx)
8737 : {
8738 : uint64_t timeout_ticks;
8739 :
8740 0 : if (ctx->rc == -ESHUTDOWN) {
8741 : /* This context is being canceled. Do nothing. */
8742 0 : return;
8743 : }
8744 :
8745 0 : ctx->rc = bdev_open_ext(ctx->bdev_name, ctx->write, ctx->event_cb, ctx->event_ctx,
8746 0 : NULL, &ctx->desc);
8747 0 : if (ctx->rc == 0 || ctx->opts.timeout_ms == 0) {
8748 0 : goto exit;
8749 : }
8750 :
8751 0 : timeout_ticks = ctx->start_ticks + ctx->opts.timeout_ms * spdk_get_ticks_hz() / 1000ull;
8752 0 : if (spdk_get_ticks() >= timeout_ticks) {
8753 0 : SPDK_ERRLOG("Timed out while waiting for bdev '%s' to appear\n", ctx->bdev_name);
8754 0 : ctx->rc = -ETIMEDOUT;
8755 0 : goto exit;
8756 : }
8757 :
8758 0 : return;
8759 :
8760 : exit:
8761 0 : spdk_poller_unregister(&ctx->poller);
8762 0 : TAILQ_REMOVE(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8763 :
8764 : /* Completion callback is processed after stack unwinding. */
8765 0 : spdk_thread_send_msg(ctx->orig_thread, bdev_open_async_done, ctx);
8766 0 : }
8767 :
8768 : static int
8769 0 : bdev_open_async(void *arg)
8770 : {
8771 0 : struct spdk_bdev_open_async_ctx *ctx = arg;
8772 :
8773 0 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8774 :
8775 0 : _bdev_open_async(ctx);
8776 :
8777 0 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8778 :
8779 0 : return SPDK_POLLER_BUSY;
8780 : }
8781 :
8782 : static void
8783 0 : bdev_open_async_opts_copy(struct spdk_bdev_open_async_opts *opts,
8784 : struct spdk_bdev_open_async_opts *opts_src,
8785 : size_t size)
8786 : {
8787 0 : assert(opts);
8788 0 : assert(opts_src);
8789 :
8790 0 : opts->size = size;
8791 :
8792 : #define SET_FIELD(field) \
8793 : if (offsetof(struct spdk_bdev_open_async_opts, field) + sizeof(opts->field) <= size) { \
8794 : opts->field = opts_src->field; \
8795 : } \
8796 :
8797 0 : SET_FIELD(timeout_ms);
8798 :
8799 : /* Do not remove this statement, you should always update this statement when you adding a new field,
8800 : * and do not forget to add the SET_FIELD statement for your added field. */
8801 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_open_async_opts) == 16, "Incorrect size");
8802 :
8803 : #undef SET_FIELD
8804 0 : }
8805 :
8806 : static void
8807 0 : bdev_open_async_opts_get_default(struct spdk_bdev_open_async_opts *opts, size_t size)
8808 : {
8809 0 : assert(opts);
8810 :
8811 0 : opts->size = size;
8812 :
8813 : #define SET_FIELD(field, value) \
8814 : if (offsetof(struct spdk_bdev_open_async_opts, field) + sizeof(opts->field) <= size) { \
8815 : opts->field = value; \
8816 : } \
8817 :
8818 0 : SET_FIELD(timeout_ms, 0);
8819 :
8820 : #undef SET_FIELD
8821 0 : }
8822 :
8823 : int
8824 0 : spdk_bdev_open_async(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8825 : void *event_ctx, struct spdk_bdev_open_async_opts *opts,
8826 : spdk_bdev_open_async_cb_t open_cb, void *open_cb_arg)
8827 : {
8828 : struct spdk_bdev_open_async_ctx *ctx;
8829 :
8830 0 : if (event_cb == NULL) {
8831 0 : SPDK_ERRLOG("Missing event callback function\n");
8832 0 : return -EINVAL;
8833 : }
8834 :
8835 0 : if (open_cb == NULL) {
8836 0 : SPDK_ERRLOG("Missing open callback function\n");
8837 0 : return -EINVAL;
8838 : }
8839 :
8840 0 : if (opts != NULL && opts->size == 0) {
8841 0 : SPDK_ERRLOG("size in the options structure should not be zero\n");
8842 0 : return -EINVAL;
8843 : }
8844 :
8845 0 : ctx = calloc(1, sizeof(*ctx));
8846 0 : if (ctx == NULL) {
8847 0 : SPDK_ERRLOG("Failed to allocate open context\n");
8848 0 : return -ENOMEM;
8849 : }
8850 :
8851 0 : ctx->bdev_name = strdup(bdev_name);
8852 0 : if (ctx->bdev_name == NULL) {
8853 0 : SPDK_ERRLOG("Failed to duplicate bdev_name\n");
8854 0 : free(ctx);
8855 0 : return -ENOMEM;
8856 : }
8857 :
8858 0 : ctx->poller = SPDK_POLLER_REGISTER(bdev_open_async, ctx, 100 * 1000);
8859 0 : if (ctx->poller == NULL) {
8860 0 : SPDK_ERRLOG("Failed to register bdev_open_async poller\n");
8861 0 : free(ctx->bdev_name);
8862 0 : free(ctx);
8863 0 : return -ENOMEM;
8864 : }
8865 :
8866 0 : ctx->cb_fn = open_cb;
8867 0 : ctx->cb_arg = open_cb_arg;
8868 0 : ctx->write = write;
8869 0 : ctx->event_cb = event_cb;
8870 0 : ctx->event_ctx = event_ctx;
8871 0 : ctx->orig_thread = spdk_get_thread();
8872 0 : ctx->start_ticks = spdk_get_ticks();
8873 :
8874 0 : bdev_open_async_opts_get_default(&ctx->opts, sizeof(ctx->opts));
8875 0 : if (opts != NULL) {
8876 0 : bdev_open_async_opts_copy(&ctx->opts, opts, opts->size);
8877 0 : }
8878 :
8879 0 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8880 :
8881 0 : TAILQ_INSERT_TAIL(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8882 0 : _bdev_open_async(ctx);
8883 :
8884 0 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8885 :
8886 0 : return 0;
8887 0 : }
8888 :
8889 : static void
8890 269 : bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc)
8891 : {
8892 : int rc;
8893 :
8894 269 : spdk_spin_lock(&bdev->internal.spinlock);
8895 269 : spdk_spin_lock(&desc->spinlock);
8896 :
8897 269 : TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
8898 :
8899 269 : desc->closed = true;
8900 :
8901 269 : if (desc->claim != NULL) {
8902 20 : bdev_desc_release_claims(desc);
8903 20 : }
8904 :
8905 269 : if (0 == desc->refs) {
8906 258 : spdk_spin_unlock(&desc->spinlock);
8907 258 : bdev_desc_free(desc);
8908 258 : } else {
8909 11 : spdk_spin_unlock(&desc->spinlock);
8910 : }
8911 :
8912 : /* If no more descriptors, kill QoS channel */
8913 269 : if (bdev->internal.qos && TAILQ_EMPTY(&bdev->internal.open_descs)) {
8914 7 : SPDK_DEBUGLOG(bdev, "Closed last descriptor for bdev %s on thread %p. Stopping QoS.\n",
8915 : bdev->name, spdk_get_thread());
8916 :
8917 7 : if (bdev_qos_destroy(bdev)) {
8918 : /* There isn't anything we can do to recover here. Just let the
8919 : * old QoS poller keep running. The QoS handling won't change
8920 : * cores when the user allocates a new channel, but it won't break. */
8921 0 : SPDK_ERRLOG("Unable to shut down QoS poller. It will continue running on the current thread.\n");
8922 0 : }
8923 7 : }
8924 :
8925 269 : if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
8926 15 : rc = bdev_unregister_unsafe(bdev);
8927 15 : spdk_spin_unlock(&bdev->internal.spinlock);
8928 :
8929 15 : if (rc == 0) {
8930 15 : spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
8931 15 : }
8932 15 : } else {
8933 254 : spdk_spin_unlock(&bdev->internal.spinlock);
8934 : }
8935 269 : }
8936 :
8937 : void
8938 128 : spdk_bdev_close(struct spdk_bdev_desc *desc)
8939 : {
8940 128 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
8941 :
8942 128 : SPDK_DEBUGLOG(bdev, "Closing descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
8943 : spdk_get_thread());
8944 :
8945 128 : assert(desc->thread == spdk_get_thread());
8946 :
8947 128 : spdk_poller_unregister(&desc->io_timeout_poller);
8948 :
8949 128 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8950 :
8951 128 : bdev_close(bdev, desc);
8952 :
8953 128 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8954 128 : }
8955 :
8956 : int32_t
8957 3 : spdk_bdev_get_numa_id(struct spdk_bdev *bdev)
8958 : {
8959 3 : if (bdev->numa.id_valid) {
8960 2 : return bdev->numa.id;
8961 : } else {
8962 1 : return SPDK_ENV_NUMA_ID_ANY;
8963 : }
8964 3 : }
8965 :
8966 : static void
8967 130 : bdev_register_finished(void *arg)
8968 : {
8969 130 : struct spdk_bdev_desc *desc = arg;
8970 130 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
8971 :
8972 130 : spdk_notify_send("bdev_register", spdk_bdev_get_name(bdev));
8973 :
8974 130 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8975 :
8976 130 : bdev_close(bdev, desc);
8977 :
8978 130 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8979 130 : }
8980 :
8981 : int
8982 133 : spdk_bdev_register(struct spdk_bdev *bdev)
8983 : {
8984 : struct spdk_bdev_desc *desc;
8985 133 : struct spdk_thread *thread = spdk_get_thread();
8986 : int rc;
8987 :
8988 133 : if (spdk_unlikely(!spdk_thread_is_app_thread(NULL))) {
8989 1 : SPDK_ERRLOG("Cannot register bdev %s on thread %p (%s)\n", bdev->name, thread,
8990 : thread ? spdk_thread_get_name(thread) : "null");
8991 1 : return -EINVAL;
8992 : }
8993 :
8994 132 : rc = bdev_register(bdev);
8995 132 : if (rc != 0) {
8996 2 : return rc;
8997 : }
8998 :
8999 : /* A descriptor is opened to prevent bdev deletion during examination */
9000 130 : rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, NULL, &desc);
9001 130 : if (rc != 0) {
9002 0 : spdk_bdev_unregister(bdev, NULL, NULL);
9003 0 : return rc;
9004 : }
9005 :
9006 130 : rc = bdev_open(bdev, false, desc);
9007 130 : if (rc != 0) {
9008 0 : bdev_desc_free(desc);
9009 0 : spdk_bdev_unregister(bdev, NULL, NULL);
9010 0 : return rc;
9011 : }
9012 :
9013 : /* Examine configuration before initializing I/O */
9014 130 : bdev_examine(bdev);
9015 :
9016 130 : rc = spdk_bdev_wait_for_examine(bdev_register_finished, desc);
9017 130 : if (rc != 0) {
9018 0 : bdev_close(bdev, desc);
9019 0 : spdk_bdev_unregister(bdev, NULL, NULL);
9020 0 : }
9021 :
9022 130 : return rc;
9023 133 : }
9024 :
9025 : int
9026 26 : spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
9027 : struct spdk_bdev_module *module)
9028 : {
9029 26 : spdk_spin_lock(&bdev->internal.spinlock);
9030 :
9031 26 : if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
9032 6 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
9033 6 : spdk_spin_unlock(&bdev->internal.spinlock);
9034 6 : return -EPERM;
9035 : }
9036 :
9037 20 : if (desc && !desc->write) {
9038 5 : desc->write = true;
9039 5 : }
9040 :
9041 20 : bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
9042 20 : bdev->internal.claim.v1.module = module;
9043 :
9044 20 : spdk_spin_unlock(&bdev->internal.spinlock);
9045 20 : return 0;
9046 26 : }
9047 :
9048 : void
9049 8 : spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
9050 : {
9051 8 : spdk_spin_lock(&bdev->internal.spinlock);
9052 :
9053 8 : assert(bdev->internal.claim.v1.module != NULL);
9054 8 : assert(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
9055 8 : bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
9056 8 : bdev->internal.claim.v1.module = NULL;
9057 :
9058 8 : spdk_spin_unlock(&bdev->internal.spinlock);
9059 8 : }
9060 :
9061 : /*
9062 : * Start claims v2
9063 : */
9064 :
9065 : const char *
9066 25 : spdk_bdev_claim_get_name(enum spdk_bdev_claim_type type)
9067 : {
9068 25 : switch (type) {
9069 : case SPDK_BDEV_CLAIM_NONE:
9070 0 : return "not_claimed";
9071 : case SPDK_BDEV_CLAIM_EXCL_WRITE:
9072 6 : return "exclusive_write";
9073 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
9074 8 : return "read_many_write_one";
9075 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
9076 5 : return "read_many_write_none";
9077 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
9078 6 : return "read_many_write_many";
9079 : default:
9080 0 : break;
9081 : }
9082 0 : return "invalid_claim";
9083 25 : }
9084 :
9085 : static bool
9086 115 : claim_type_is_v2(enum spdk_bdev_claim_type type)
9087 : {
9088 115 : switch (type) {
9089 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
9090 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
9091 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
9092 115 : return true;
9093 : default:
9094 0 : break;
9095 : }
9096 0 : return false;
9097 115 : }
9098 :
9099 : /* Returns true if taking a claim with desc->write == false should make the descriptor writable. */
9100 : static bool
9101 17 : claim_type_promotes_to_write(enum spdk_bdev_claim_type type)
9102 : {
9103 17 : switch (type) {
9104 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
9105 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
9106 6 : return true;
9107 : default:
9108 11 : break;
9109 : }
9110 11 : return false;
9111 17 : }
9112 :
9113 : void
9114 57 : spdk_bdev_claim_opts_init(struct spdk_bdev_claim_opts *opts, size_t size)
9115 : {
9116 57 : if (opts == NULL) {
9117 0 : SPDK_ERRLOG("opts should not be NULL\n");
9118 0 : assert(opts != NULL);
9119 0 : return;
9120 : }
9121 57 : if (size == 0) {
9122 0 : SPDK_ERRLOG("size should not be zero\n");
9123 0 : assert(size != 0);
9124 0 : return;
9125 : }
9126 :
9127 57 : memset(opts, 0, size);
9128 57 : opts->opts_size = size;
9129 :
9130 : #define FIELD_OK(field) \
9131 : offsetof(struct spdk_bdev_claim_opts, field) + sizeof(opts->field) <= size
9132 :
9133 : #define SET_FIELD(field, value) \
9134 : if (FIELD_OK(field)) { \
9135 : opts->field = value; \
9136 : } \
9137 :
9138 57 : SET_FIELD(shared_claim_key, 0);
9139 :
9140 : #undef FIELD_OK
9141 : #undef SET_FIELD
9142 57 : }
9143 :
9144 : static int
9145 22 : claim_opts_copy(struct spdk_bdev_claim_opts *src, struct spdk_bdev_claim_opts *dst)
9146 : {
9147 22 : if (src->opts_size == 0) {
9148 0 : SPDK_ERRLOG("size should not be zero\n");
9149 0 : return -1;
9150 : }
9151 :
9152 22 : memset(dst, 0, sizeof(*dst));
9153 22 : dst->opts_size = src->opts_size;
9154 :
9155 : #define FIELD_OK(field) \
9156 : offsetof(struct spdk_bdev_claim_opts, field) + sizeof(src->field) <= src->opts_size
9157 :
9158 : #define SET_FIELD(field) \
9159 : if (FIELD_OK(field)) { \
9160 : dst->field = src->field; \
9161 : } \
9162 :
9163 22 : if (FIELD_OK(name)) {
9164 22 : snprintf(dst->name, sizeof(dst->name), "%s", src->name);
9165 22 : }
9166 :
9167 22 : SET_FIELD(shared_claim_key);
9168 :
9169 : /* You should not remove this statement, but need to update the assert statement
9170 : * if you add a new field, and also add a corresponding SET_FIELD statement */
9171 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_claim_opts) == 48, "Incorrect size");
9172 :
9173 : #undef FIELD_OK
9174 : #undef SET_FIELD
9175 22 : return 0;
9176 22 : }
9177 :
9178 : /* Returns 0 if a read-write-once claim can be taken. */
9179 : static int
9180 10 : claim_verify_rwo(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
9181 : struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
9182 : {
9183 10 : struct spdk_bdev *bdev = desc->bdev;
9184 : struct spdk_bdev_desc *open_desc;
9185 :
9186 10 : assert(spdk_spin_held(&bdev->internal.spinlock));
9187 10 : assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
9188 :
9189 10 : if (opts->shared_claim_key != 0) {
9190 1 : SPDK_ERRLOG("%s: key option not supported with read-write-once claims\n",
9191 : bdev->name);
9192 1 : return -EINVAL;
9193 : }
9194 9 : if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
9195 1 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
9196 1 : return -EPERM;
9197 : }
9198 8 : if (desc->claim != NULL) {
9199 0 : SPDK_NOTICELOG("%s: descriptor already claimed bdev with module %s\n",
9200 : bdev->name, desc->claim->module->name);
9201 0 : return -EPERM;
9202 : }
9203 16 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
9204 10 : if (desc != open_desc && open_desc->write) {
9205 2 : SPDK_NOTICELOG("%s: Cannot obtain read-write-once claim while "
9206 : "another descriptor is open for writing\n",
9207 : bdev->name);
9208 2 : return -EPERM;
9209 : }
9210 8 : }
9211 :
9212 6 : return 0;
9213 10 : }
9214 :
9215 : /* Returns 0 if a read-only-many claim can be taken. */
9216 : static int
9217 15 : claim_verify_rom(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
9218 : struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
9219 : {
9220 15 : struct spdk_bdev *bdev = desc->bdev;
9221 : struct spdk_bdev_desc *open_desc;
9222 :
9223 15 : assert(spdk_spin_held(&bdev->internal.spinlock));
9224 15 : assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
9225 15 : assert(desc->claim == NULL);
9226 :
9227 15 : if (desc->write) {
9228 3 : SPDK_ERRLOG("%s: Cannot obtain read-only-many claim with writable descriptor\n",
9229 : bdev->name);
9230 3 : return -EINVAL;
9231 : }
9232 12 : if (opts->shared_claim_key != 0) {
9233 1 : SPDK_ERRLOG("%s: key option not supported with read-only-may claims\n", bdev->name);
9234 1 : return -EINVAL;
9235 : }
9236 11 : if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
9237 19 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
9238 11 : if (open_desc->write) {
9239 0 : SPDK_NOTICELOG("%s: Cannot obtain read-only-many claim while "
9240 : "another descriptor is open for writing\n",
9241 : bdev->name);
9242 0 : return -EPERM;
9243 : }
9244 11 : }
9245 8 : }
9246 :
9247 11 : return 0;
9248 15 : }
9249 :
9250 : /* Returns 0 if a read-write-many claim can be taken. */
9251 : static int
9252 8 : claim_verify_rwm(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
9253 : struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
9254 : {
9255 8 : struct spdk_bdev *bdev = desc->bdev;
9256 : struct spdk_bdev_desc *open_desc;
9257 :
9258 8 : assert(spdk_spin_held(&bdev->internal.spinlock));
9259 8 : assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
9260 8 : assert(desc->claim == NULL);
9261 :
9262 8 : if (opts->shared_claim_key == 0) {
9263 2 : SPDK_ERRLOG("%s: shared_claim_key option required with read-write-may claims\n",
9264 : bdev->name);
9265 2 : return -EINVAL;
9266 : }
9267 6 : switch (bdev->internal.claim_type) {
9268 : case SPDK_BDEV_CLAIM_NONE:
9269 7 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
9270 5 : if (open_desc == desc) {
9271 3 : continue;
9272 : }
9273 2 : if (open_desc->write) {
9274 2 : SPDK_NOTICELOG("%s: Cannot obtain read-write-many claim while "
9275 : "another descriptor is open for writing without a "
9276 : "claim\n", bdev->name);
9277 2 : return -EPERM;
9278 : }
9279 0 : }
9280 2 : break;
9281 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
9282 2 : if (opts->shared_claim_key != bdev->internal.claim.v2.key) {
9283 1 : LOG_ALREADY_CLAIMED_ERROR("already claimed with another key", bdev);
9284 1 : return -EPERM;
9285 : }
9286 1 : break;
9287 : default:
9288 0 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
9289 0 : return -EBUSY;
9290 : }
9291 :
9292 3 : return 0;
9293 8 : }
9294 :
9295 : /* Updates desc and its bdev with a v2 claim. */
9296 : static int
9297 20 : claim_bdev(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
9298 : struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
9299 : {
9300 20 : struct spdk_bdev *bdev = desc->bdev;
9301 : struct spdk_bdev_module_claim *claim;
9302 :
9303 20 : assert(spdk_spin_held(&bdev->internal.spinlock));
9304 20 : assert(claim_type_is_v2(type));
9305 20 : assert(desc->claim == NULL);
9306 :
9307 20 : claim = calloc(1, sizeof(*desc->claim));
9308 20 : if (claim == NULL) {
9309 0 : SPDK_ERRLOG("%s: out of memory while allocating claim\n", bdev->name);
9310 0 : return -ENOMEM;
9311 : }
9312 20 : claim->module = module;
9313 20 : claim->desc = desc;
9314 : SPDK_STATIC_ASSERT(sizeof(claim->name) == sizeof(opts->name), "sizes must match");
9315 20 : memcpy(claim->name, opts->name, sizeof(claim->name));
9316 20 : desc->claim = claim;
9317 :
9318 20 : if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
9319 16 : bdev->internal.claim_type = type;
9320 16 : TAILQ_INIT(&bdev->internal.claim.v2.claims);
9321 16 : bdev->internal.claim.v2.key = opts->shared_claim_key;
9322 16 : }
9323 20 : assert(type == bdev->internal.claim_type);
9324 :
9325 20 : TAILQ_INSERT_TAIL(&bdev->internal.claim.v2.claims, claim, link);
9326 :
9327 20 : if (!desc->write && claim_type_promotes_to_write(type)) {
9328 6 : desc->write = true;
9329 6 : }
9330 :
9331 20 : return 0;
9332 20 : }
9333 :
9334 : int
9335 44 : spdk_bdev_module_claim_bdev_desc(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
9336 : struct spdk_bdev_claim_opts *_opts,
9337 : struct spdk_bdev_module *module)
9338 : {
9339 : struct spdk_bdev *bdev;
9340 : struct spdk_bdev_claim_opts opts;
9341 44 : int rc = 0;
9342 :
9343 44 : if (desc == NULL) {
9344 0 : SPDK_ERRLOG("descriptor must not be NULL\n");
9345 0 : return -EINVAL;
9346 : }
9347 :
9348 44 : bdev = desc->bdev;
9349 :
9350 44 : if (_opts == NULL) {
9351 22 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
9352 44 : } else if (claim_opts_copy(_opts, &opts) != 0) {
9353 0 : return -EINVAL;
9354 : }
9355 :
9356 44 : spdk_spin_lock(&bdev->internal.spinlock);
9357 :
9358 44 : if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE &&
9359 17 : bdev->internal.claim_type != type) {
9360 11 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
9361 11 : spdk_spin_unlock(&bdev->internal.spinlock);
9362 11 : return -EPERM;
9363 : }
9364 :
9365 33 : if (claim_type_is_v2(type) && desc->claim != NULL) {
9366 0 : SPDK_ERRLOG("%s: descriptor already has %s claim with name '%s'\n",
9367 : bdev->name, spdk_bdev_claim_get_name(type), desc->claim->name);
9368 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9369 0 : return -EPERM;
9370 : }
9371 :
9372 33 : switch (type) {
9373 : case SPDK_BDEV_CLAIM_EXCL_WRITE:
9374 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9375 0 : return spdk_bdev_module_claim_bdev(bdev, desc, module);
9376 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
9377 10 : rc = claim_verify_rwo(desc, type, &opts, module);
9378 10 : break;
9379 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
9380 15 : rc = claim_verify_rom(desc, type, &opts, module);
9381 15 : break;
9382 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
9383 8 : rc = claim_verify_rwm(desc, type, &opts, module);
9384 8 : break;
9385 : default:
9386 0 : SPDK_ERRLOG("%s: claim type %d not supported\n", bdev->name, type);
9387 0 : rc = -ENOTSUP;
9388 0 : }
9389 :
9390 33 : if (rc == 0) {
9391 20 : rc = claim_bdev(desc, type, &opts, module);
9392 20 : }
9393 :
9394 33 : spdk_spin_unlock(&bdev->internal.spinlock);
9395 33 : return rc;
9396 44 : }
9397 :
9398 : static void
9399 16 : claim_reset(struct spdk_bdev *bdev)
9400 : {
9401 16 : assert(spdk_spin_held(&bdev->internal.spinlock));
9402 16 : assert(claim_type_is_v2(bdev->internal.claim_type));
9403 16 : assert(TAILQ_EMPTY(&bdev->internal.claim.v2.claims));
9404 :
9405 16 : memset(&bdev->internal.claim, 0, sizeof(bdev->internal.claim));
9406 16 : bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
9407 16 : }
9408 :
9409 : static void
9410 20 : bdev_desc_release_claims(struct spdk_bdev_desc *desc)
9411 : {
9412 20 : struct spdk_bdev *bdev = desc->bdev;
9413 :
9414 20 : assert(spdk_spin_held(&bdev->internal.spinlock));
9415 20 : assert(claim_type_is_v2(bdev->internal.claim_type));
9416 :
9417 20 : if (bdev->internal.examine_in_progress == 0) {
9418 20 : TAILQ_REMOVE(&bdev->internal.claim.v2.claims, desc->claim, link);
9419 20 : free(desc->claim);
9420 20 : if (TAILQ_EMPTY(&bdev->internal.claim.v2.claims)) {
9421 16 : claim_reset(bdev);
9422 16 : }
9423 20 : } else {
9424 : /* This is a dead claim that will be cleaned up when bdev_examine() is done. */
9425 0 : desc->claim->module = NULL;
9426 0 : desc->claim->desc = NULL;
9427 : }
9428 20 : desc->claim = NULL;
9429 20 : }
9430 :
9431 : /*
9432 : * End claims v2
9433 : */
9434 :
9435 : struct spdk_bdev *
9436 1554 : spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
9437 : {
9438 1554 : assert(desc != NULL);
9439 1554 : return desc->bdev;
9440 : }
9441 :
9442 : int
9443 1 : spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
9444 : {
9445 : struct spdk_bdev *bdev, *tmp;
9446 : struct spdk_bdev_desc *desc;
9447 1 : int rc = 0;
9448 :
9449 1 : assert(fn != NULL);
9450 :
9451 1 : spdk_spin_lock(&g_bdev_mgr.spinlock);
9452 1 : bdev = spdk_bdev_first();
9453 9 : while (bdev != NULL) {
9454 8 : rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, NULL, &desc);
9455 8 : if (rc != 0) {
9456 0 : break;
9457 : }
9458 8 : rc = bdev_open(bdev, false, desc);
9459 8 : if (rc != 0) {
9460 1 : bdev_desc_free(desc);
9461 1 : if (rc == -ENODEV) {
9462 : /* Ignore the error and move to the next bdev. */
9463 1 : rc = 0;
9464 1 : bdev = spdk_bdev_next(bdev);
9465 1 : continue;
9466 : }
9467 0 : break;
9468 : }
9469 7 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
9470 :
9471 7 : rc = fn(ctx, bdev);
9472 :
9473 7 : spdk_spin_lock(&g_bdev_mgr.spinlock);
9474 7 : tmp = spdk_bdev_next(bdev);
9475 7 : bdev_close(bdev, desc);
9476 7 : if (rc != 0) {
9477 0 : break;
9478 : }
9479 7 : bdev = tmp;
9480 : }
9481 1 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
9482 :
9483 1 : return rc;
9484 : }
9485 :
9486 : int
9487 1 : spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
9488 : {
9489 : struct spdk_bdev *bdev, *tmp;
9490 : struct spdk_bdev_desc *desc;
9491 1 : int rc = 0;
9492 :
9493 1 : assert(fn != NULL);
9494 :
9495 1 : spdk_spin_lock(&g_bdev_mgr.spinlock);
9496 1 : bdev = spdk_bdev_first_leaf();
9497 6 : while (bdev != NULL) {
9498 5 : rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, NULL, &desc);
9499 5 : if (rc != 0) {
9500 0 : break;
9501 : }
9502 5 : rc = bdev_open(bdev, false, desc);
9503 5 : if (rc != 0) {
9504 1 : bdev_desc_free(desc);
9505 1 : if (rc == -ENODEV) {
9506 : /* Ignore the error and move to the next bdev. */
9507 1 : rc = 0;
9508 1 : bdev = spdk_bdev_next_leaf(bdev);
9509 1 : continue;
9510 : }
9511 0 : break;
9512 : }
9513 4 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
9514 :
9515 4 : rc = fn(ctx, bdev);
9516 :
9517 4 : spdk_spin_lock(&g_bdev_mgr.spinlock);
9518 4 : tmp = spdk_bdev_next_leaf(bdev);
9519 4 : bdev_close(bdev, desc);
9520 4 : if (rc != 0) {
9521 0 : break;
9522 : }
9523 4 : bdev = tmp;
9524 : }
9525 1 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
9526 :
9527 1 : return rc;
9528 : }
9529 :
9530 : void
9531 0 : spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
9532 : {
9533 : struct iovec *iovs;
9534 : int iovcnt;
9535 :
9536 0 : if (bdev_io == NULL) {
9537 0 : return;
9538 : }
9539 :
9540 0 : switch (bdev_io->type) {
9541 : case SPDK_BDEV_IO_TYPE_READ:
9542 : case SPDK_BDEV_IO_TYPE_WRITE:
9543 : case SPDK_BDEV_IO_TYPE_ZCOPY:
9544 0 : iovs = bdev_io->u.bdev.iovs;
9545 0 : iovcnt = bdev_io->u.bdev.iovcnt;
9546 0 : break;
9547 : default:
9548 0 : iovs = NULL;
9549 0 : iovcnt = 0;
9550 0 : break;
9551 : }
9552 :
9553 0 : if (iovp) {
9554 0 : *iovp = iovs;
9555 0 : }
9556 0 : if (iovcntp) {
9557 0 : *iovcntp = iovcnt;
9558 0 : }
9559 0 : }
9560 :
9561 : void *
9562 0 : spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
9563 : {
9564 0 : if (bdev_io == NULL) {
9565 0 : return NULL;
9566 : }
9567 :
9568 0 : if (!spdk_bdev_is_md_separate(bdev_io->bdev)) {
9569 0 : return NULL;
9570 : }
9571 :
9572 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
9573 0 : bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
9574 0 : return bdev_io->u.bdev.md_buf;
9575 : }
9576 :
9577 0 : return NULL;
9578 0 : }
9579 :
9580 : void *
9581 0 : spdk_bdev_io_get_cb_arg(struct spdk_bdev_io *bdev_io)
9582 : {
9583 0 : if (bdev_io == NULL) {
9584 0 : assert(false);
9585 : return NULL;
9586 : }
9587 :
9588 0 : return bdev_io->internal.caller_ctx;
9589 : }
9590 :
9591 : void
9592 7 : spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
9593 : {
9594 :
9595 7 : if (spdk_bdev_module_list_find(bdev_module->name)) {
9596 0 : SPDK_ERRLOG("ERROR: module '%s' already registered.\n", bdev_module->name);
9597 0 : assert(false);
9598 : }
9599 :
9600 7 : spdk_spin_init(&bdev_module->internal.spinlock);
9601 7 : TAILQ_INIT(&bdev_module->internal.quiesced_ranges);
9602 :
9603 : /*
9604 : * Modules with examine callbacks must be initialized first, so they are
9605 : * ready to handle examine callbacks from later modules that will
9606 : * register physical bdevs.
9607 : */
9608 7 : if (bdev_module->examine_config != NULL || bdev_module->examine_disk != NULL) {
9609 4 : TAILQ_INSERT_HEAD(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
9610 4 : } else {
9611 3 : TAILQ_INSERT_TAIL(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
9612 : }
9613 7 : }
9614 :
9615 : struct spdk_bdev_module *
9616 7 : spdk_bdev_module_list_find(const char *name)
9617 : {
9618 : struct spdk_bdev_module *bdev_module;
9619 :
9620 14 : TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
9621 7 : if (strcmp(name, bdev_module->name) == 0) {
9622 0 : break;
9623 : }
9624 7 : }
9625 :
9626 7 : return bdev_module;
9627 : }
9628 :
9629 : static int
9630 6 : bdev_write_zero_buffer(struct spdk_bdev_io *bdev_io)
9631 : {
9632 : uint64_t num_blocks;
9633 6 : void *md_buf = NULL;
9634 :
9635 6 : num_blocks = bdev_io->u.bdev.num_blocks;
9636 :
9637 6 : if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
9638 4 : md_buf = (char *)g_bdev_mgr.zero_buffer +
9639 2 : spdk_bdev_get_block_size(bdev_io->bdev) * num_blocks;
9640 2 : }
9641 :
9642 12 : return bdev_write_blocks_with_md(bdev_io->internal.desc,
9643 6 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
9644 6 : g_bdev_mgr.zero_buffer, md_buf,
9645 6 : bdev_io->u.bdev.offset_blocks, num_blocks,
9646 6 : bdev_write_zero_buffer_done, bdev_io);
9647 : }
9648 :
9649 : static void
9650 6 : bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
9651 : {
9652 6 : struct spdk_bdev_io *parent_io = cb_arg;
9653 :
9654 6 : spdk_bdev_free_io(bdev_io);
9655 :
9656 6 : parent_io->internal.status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
9657 6 : parent_io->internal.cb(parent_io, success, parent_io->internal.caller_ctx);
9658 6 : }
9659 :
9660 : static void
9661 10 : bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status)
9662 : {
9663 10 : spdk_spin_lock(&ctx->bdev->internal.spinlock);
9664 10 : ctx->bdev->internal.qos_mod_in_progress = false;
9665 10 : spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9666 :
9667 10 : if (ctx->cb_fn) {
9668 8 : ctx->cb_fn(ctx->cb_arg, status);
9669 8 : }
9670 10 : free(ctx);
9671 10 : }
9672 :
9673 : static void
9674 2 : bdev_disable_qos_done(void *cb_arg)
9675 : {
9676 2 : struct set_qos_limit_ctx *ctx = cb_arg;
9677 2 : struct spdk_bdev *bdev = ctx->bdev;
9678 : struct spdk_bdev_qos *qos;
9679 :
9680 2 : spdk_spin_lock(&bdev->internal.spinlock);
9681 2 : qos = bdev->internal.qos;
9682 2 : bdev->internal.qos = NULL;
9683 2 : spdk_spin_unlock(&bdev->internal.spinlock);
9684 :
9685 2 : if (qos->thread != NULL) {
9686 2 : spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
9687 2 : spdk_poller_unregister(&qos->poller);
9688 2 : }
9689 :
9690 2 : free(qos);
9691 :
9692 2 : bdev_set_qos_limit_done(ctx, 0);
9693 2 : }
9694 :
9695 : static void
9696 2 : bdev_disable_qos_msg_done(struct spdk_bdev *bdev, void *_ctx, int status)
9697 : {
9698 2 : struct set_qos_limit_ctx *ctx = _ctx;
9699 : struct spdk_thread *thread;
9700 :
9701 2 : spdk_spin_lock(&bdev->internal.spinlock);
9702 2 : thread = bdev->internal.qos->thread;
9703 2 : spdk_spin_unlock(&bdev->internal.spinlock);
9704 :
9705 2 : if (thread != NULL) {
9706 2 : spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx);
9707 2 : } else {
9708 0 : bdev_disable_qos_done(ctx);
9709 : }
9710 2 : }
9711 :
9712 : static void
9713 4 : bdev_disable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9714 : struct spdk_io_channel *ch, void *_ctx)
9715 : {
9716 4 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
9717 : struct spdk_bdev_io *bdev_io;
9718 :
9719 4 : bdev_ch->flags &= ~BDEV_CH_QOS_ENABLED;
9720 :
9721 6 : while (!TAILQ_EMPTY(&bdev_ch->qos_queued_io)) {
9722 : /* Re-submit the queued I/O. */
9723 2 : bdev_io = TAILQ_FIRST(&bdev_ch->qos_queued_io);
9724 2 : TAILQ_REMOVE(&bdev_ch->qos_queued_io, bdev_io, internal.link);
9725 2 : _bdev_io_submit(bdev_io);
9726 : }
9727 :
9728 4 : spdk_bdev_for_each_channel_continue(i, 0);
9729 4 : }
9730 :
9731 : static void
9732 1 : bdev_update_qos_rate_limit_msg(void *cb_arg)
9733 : {
9734 1 : struct set_qos_limit_ctx *ctx = cb_arg;
9735 1 : struct spdk_bdev *bdev = ctx->bdev;
9736 :
9737 1 : spdk_spin_lock(&bdev->internal.spinlock);
9738 1 : bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos);
9739 1 : spdk_spin_unlock(&bdev->internal.spinlock);
9740 :
9741 1 : bdev_set_qos_limit_done(ctx, 0);
9742 1 : }
9743 :
9744 : static void
9745 9 : bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9746 : struct spdk_io_channel *ch, void *_ctx)
9747 : {
9748 9 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
9749 :
9750 9 : spdk_spin_lock(&bdev->internal.spinlock);
9751 9 : bdev_enable_qos(bdev, bdev_ch);
9752 9 : spdk_spin_unlock(&bdev->internal.spinlock);
9753 9 : spdk_bdev_for_each_channel_continue(i, 0);
9754 9 : }
9755 :
9756 : static void
9757 6 : bdev_enable_qos_done(struct spdk_bdev *bdev, void *_ctx, int status)
9758 : {
9759 6 : struct set_qos_limit_ctx *ctx = _ctx;
9760 :
9761 6 : bdev_set_qos_limit_done(ctx, status);
9762 6 : }
9763 :
9764 : static void
9765 7 : bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
9766 : {
9767 : int i;
9768 :
9769 7 : assert(bdev->internal.qos != NULL);
9770 :
9771 35 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9772 28 : if (limits[i] != SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
9773 28 : bdev->internal.qos->rate_limits[i].limit = limits[i];
9774 :
9775 28 : if (limits[i] == 0) {
9776 19 : bdev->internal.qos->rate_limits[i].limit =
9777 : SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
9778 19 : }
9779 28 : }
9780 28 : }
9781 7 : }
9782 :
9783 : void
9784 9 : spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
9785 : void (*cb_fn)(void *cb_arg, int status), void *cb_arg)
9786 : {
9787 : struct set_qos_limit_ctx *ctx;
9788 : uint32_t limit_set_complement;
9789 : uint64_t min_limit_per_sec;
9790 : int i;
9791 9 : bool disable_rate_limit = true;
9792 :
9793 45 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9794 36 : if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
9795 0 : continue;
9796 : }
9797 :
9798 36 : if (limits[i] > 0) {
9799 10 : disable_rate_limit = false;
9800 10 : }
9801 :
9802 36 : if (bdev_qos_is_iops_rate_limit(i) == true) {
9803 9 : min_limit_per_sec = SPDK_BDEV_QOS_MIN_IOS_PER_SEC;
9804 9 : } else {
9805 27 : if (limits[i] > SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC) {
9806 0 : SPDK_WARNLOG("Requested rate limit %" PRIu64 " will result in uint64_t overflow, "
9807 : "reset to %" PRIu64 "\n", limits[i], SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC);
9808 0 : limits[i] = SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC;
9809 0 : }
9810 : /* Change from megabyte to byte rate limit */
9811 27 : limits[i] = limits[i] * 1024 * 1024;
9812 27 : min_limit_per_sec = SPDK_BDEV_QOS_MIN_BYTES_PER_SEC;
9813 : }
9814 :
9815 36 : limit_set_complement = limits[i] % min_limit_per_sec;
9816 36 : if (limit_set_complement) {
9817 0 : SPDK_ERRLOG("Requested rate limit %" PRIu64 " is not a multiple of %" PRIu64 "\n",
9818 : limits[i], min_limit_per_sec);
9819 0 : limits[i] += min_limit_per_sec - limit_set_complement;
9820 0 : SPDK_ERRLOG("Round up the rate limit to %" PRIu64 "\n", limits[i]);
9821 0 : }
9822 36 : }
9823 :
9824 9 : ctx = calloc(1, sizeof(*ctx));
9825 9 : if (ctx == NULL) {
9826 0 : cb_fn(cb_arg, -ENOMEM);
9827 0 : return;
9828 : }
9829 :
9830 9 : ctx->cb_fn = cb_fn;
9831 9 : ctx->cb_arg = cb_arg;
9832 9 : ctx->bdev = bdev;
9833 :
9834 9 : spdk_spin_lock(&bdev->internal.spinlock);
9835 9 : if (bdev->internal.qos_mod_in_progress) {
9836 1 : spdk_spin_unlock(&bdev->internal.spinlock);
9837 1 : free(ctx);
9838 1 : cb_fn(cb_arg, -EAGAIN);
9839 1 : return;
9840 : }
9841 8 : bdev->internal.qos_mod_in_progress = true;
9842 :
9843 8 : if (disable_rate_limit == true && bdev->internal.qos) {
9844 10 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9845 8 : if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED &&
9846 0 : (bdev->internal.qos->rate_limits[i].limit > 0 &&
9847 0 : bdev->internal.qos->rate_limits[i].limit !=
9848 : SPDK_BDEV_QOS_LIMIT_NOT_DEFINED)) {
9849 0 : disable_rate_limit = false;
9850 0 : break;
9851 : }
9852 8 : }
9853 2 : }
9854 :
9855 8 : if (disable_rate_limit == false) {
9856 5 : if (bdev->internal.qos == NULL) {
9857 4 : bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
9858 4 : if (!bdev->internal.qos) {
9859 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9860 0 : SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
9861 0 : bdev_set_qos_limit_done(ctx, -ENOMEM);
9862 0 : return;
9863 : }
9864 4 : }
9865 :
9866 5 : if (bdev->internal.qos->thread == NULL) {
9867 : /* Enabling */
9868 4 : bdev_set_qos_rate_limits(bdev, limits);
9869 :
9870 4 : spdk_bdev_for_each_channel(bdev, bdev_enable_qos_msg, ctx,
9871 : bdev_enable_qos_done);
9872 4 : } else {
9873 : /* Updating */
9874 1 : bdev_set_qos_rate_limits(bdev, limits);
9875 :
9876 2 : spdk_thread_send_msg(bdev->internal.qos->thread,
9877 1 : bdev_update_qos_rate_limit_msg, ctx);
9878 : }
9879 5 : } else {
9880 3 : if (bdev->internal.qos != NULL) {
9881 2 : bdev_set_qos_rate_limits(bdev, limits);
9882 :
9883 : /* Disabling */
9884 2 : spdk_bdev_for_each_channel(bdev, bdev_disable_qos_msg, ctx,
9885 : bdev_disable_qos_msg_done);
9886 2 : } else {
9887 1 : spdk_spin_unlock(&bdev->internal.spinlock);
9888 1 : bdev_set_qos_limit_done(ctx, 0);
9889 1 : return;
9890 : }
9891 : }
9892 :
9893 7 : spdk_spin_unlock(&bdev->internal.spinlock);
9894 9 : }
9895 :
9896 : struct spdk_bdev_histogram_ctx {
9897 : spdk_bdev_histogram_status_cb cb_fn;
9898 : void *cb_arg;
9899 : struct spdk_bdev *bdev;
9900 : int status;
9901 : };
9902 :
9903 : static void
9904 2 : bdev_histogram_disable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9905 : {
9906 2 : struct spdk_bdev_histogram_ctx *ctx = _ctx;
9907 :
9908 2 : spdk_spin_lock(&ctx->bdev->internal.spinlock);
9909 2 : ctx->bdev->internal.histogram_in_progress = false;
9910 2 : spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9911 2 : ctx->cb_fn(ctx->cb_arg, ctx->status);
9912 2 : free(ctx);
9913 2 : }
9914 :
9915 : static void
9916 3 : bdev_histogram_disable_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9917 : struct spdk_io_channel *_ch, void *_ctx)
9918 : {
9919 3 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9920 :
9921 3 : if (ch->histogram != NULL) {
9922 3 : spdk_histogram_data_free(ch->histogram);
9923 3 : ch->histogram = NULL;
9924 3 : }
9925 3 : spdk_bdev_for_each_channel_continue(i, 0);
9926 3 : }
9927 :
9928 : static void
9929 2 : bdev_histogram_enable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9930 : {
9931 2 : struct spdk_bdev_histogram_ctx *ctx = _ctx;
9932 :
9933 2 : if (status != 0) {
9934 0 : ctx->status = status;
9935 0 : ctx->bdev->internal.histogram_enabled = false;
9936 0 : spdk_bdev_for_each_channel(ctx->bdev, bdev_histogram_disable_channel, ctx,
9937 : bdev_histogram_disable_channel_cb);
9938 0 : } else {
9939 2 : spdk_spin_lock(&ctx->bdev->internal.spinlock);
9940 2 : ctx->bdev->internal.histogram_in_progress = false;
9941 2 : spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9942 2 : ctx->cb_fn(ctx->cb_arg, ctx->status);
9943 2 : free(ctx);
9944 : }
9945 2 : }
9946 :
9947 : static void
9948 3 : bdev_histogram_enable_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9949 : struct spdk_io_channel *_ch, void *_ctx)
9950 : {
9951 3 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9952 3 : int status = 0;
9953 :
9954 3 : if (ch->histogram == NULL) {
9955 3 : ch->histogram = spdk_histogram_data_alloc();
9956 3 : if (ch->histogram == NULL) {
9957 0 : status = -ENOMEM;
9958 0 : }
9959 3 : }
9960 :
9961 3 : spdk_bdev_for_each_channel_continue(i, status);
9962 3 : }
9963 :
9964 : void
9965 4 : spdk_bdev_histogram_enable_ext(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb cb_fn,
9966 : void *cb_arg, bool enable, struct spdk_bdev_enable_histogram_opts *opts)
9967 : {
9968 : struct spdk_bdev_histogram_ctx *ctx;
9969 :
9970 4 : ctx = calloc(1, sizeof(struct spdk_bdev_histogram_ctx));
9971 4 : if (ctx == NULL) {
9972 0 : cb_fn(cb_arg, -ENOMEM);
9973 0 : return;
9974 : }
9975 :
9976 4 : ctx->bdev = bdev;
9977 4 : ctx->status = 0;
9978 4 : ctx->cb_fn = cb_fn;
9979 4 : ctx->cb_arg = cb_arg;
9980 :
9981 4 : spdk_spin_lock(&bdev->internal.spinlock);
9982 4 : if (bdev->internal.histogram_in_progress) {
9983 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9984 0 : free(ctx);
9985 0 : cb_fn(cb_arg, -EAGAIN);
9986 0 : return;
9987 : }
9988 :
9989 4 : bdev->internal.histogram_in_progress = true;
9990 4 : spdk_spin_unlock(&bdev->internal.spinlock);
9991 :
9992 4 : bdev->internal.histogram_enabled = enable;
9993 4 : bdev->internal.histogram_io_type = opts->io_type;
9994 :
9995 4 : if (enable) {
9996 : /* Allocate histogram for each channel */
9997 2 : spdk_bdev_for_each_channel(bdev, bdev_histogram_enable_channel, ctx,
9998 : bdev_histogram_enable_channel_cb);
9999 2 : } else {
10000 2 : spdk_bdev_for_each_channel(bdev, bdev_histogram_disable_channel, ctx,
10001 : bdev_histogram_disable_channel_cb);
10002 : }
10003 4 : }
10004 :
10005 : void
10006 4 : spdk_bdev_enable_histogram_opts_init(struct spdk_bdev_enable_histogram_opts *opts, size_t size)
10007 : {
10008 4 : if (opts == NULL) {
10009 0 : SPDK_ERRLOG("opts should not be NULL\n");
10010 0 : assert(opts != NULL);
10011 0 : return;
10012 : }
10013 4 : if (size == 0) {
10014 0 : SPDK_ERRLOG("size should not be zero\n");
10015 0 : assert(size != 0);
10016 0 : return;
10017 : }
10018 :
10019 4 : memset(opts, 0, size);
10020 4 : opts->size = size;
10021 :
10022 : #define FIELD_OK(field) \
10023 : offsetof(struct spdk_bdev_enable_histogram_opts, field) + sizeof(opts->field) <= size
10024 :
10025 : #define SET_FIELD(field, value) \
10026 : if (FIELD_OK(field)) { \
10027 : opts->field = value; \
10028 : } \
10029 :
10030 4 : SET_FIELD(io_type, 0);
10031 :
10032 : /* You should not remove this statement, but need to update the assert statement
10033 : * if you add a new field, and also add a corresponding SET_FIELD statement */
10034 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_enable_histogram_opts) == 9, "Incorrect size");
10035 :
10036 : #undef FIELD_OK
10037 : #undef SET_FIELD
10038 4 : }
10039 :
10040 : void
10041 4 : spdk_bdev_histogram_enable(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb cb_fn,
10042 : void *cb_arg, bool enable)
10043 : {
10044 : struct spdk_bdev_enable_histogram_opts opts;
10045 :
10046 4 : spdk_bdev_enable_histogram_opts_init(&opts, sizeof(opts));
10047 4 : spdk_bdev_histogram_enable_ext(bdev, cb_fn, cb_arg, enable, &opts);
10048 4 : }
10049 :
10050 : struct spdk_bdev_histogram_data_ctx {
10051 : spdk_bdev_histogram_data_cb cb_fn;
10052 : void *cb_arg;
10053 : struct spdk_bdev *bdev;
10054 : /** merged histogram data from all channels */
10055 : struct spdk_histogram_data *histogram;
10056 : };
10057 :
10058 : static void
10059 5 : bdev_histogram_get_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
10060 : {
10061 5 : struct spdk_bdev_histogram_data_ctx *ctx = _ctx;
10062 :
10063 5 : ctx->cb_fn(ctx->cb_arg, status, ctx->histogram);
10064 5 : free(ctx);
10065 5 : }
10066 :
10067 : static void
10068 7 : bdev_histogram_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
10069 : struct spdk_io_channel *_ch, void *_ctx)
10070 : {
10071 7 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10072 7 : struct spdk_bdev_histogram_data_ctx *ctx = _ctx;
10073 7 : int status = 0;
10074 :
10075 7 : if (ch->histogram == NULL) {
10076 1 : status = -EFAULT;
10077 1 : } else {
10078 6 : spdk_histogram_data_merge(ctx->histogram, ch->histogram);
10079 : }
10080 :
10081 7 : spdk_bdev_for_each_channel_continue(i, status);
10082 7 : }
10083 :
10084 : void
10085 5 : spdk_bdev_histogram_get(struct spdk_bdev *bdev, struct spdk_histogram_data *histogram,
10086 : spdk_bdev_histogram_data_cb cb_fn,
10087 : void *cb_arg)
10088 : {
10089 : struct spdk_bdev_histogram_data_ctx *ctx;
10090 :
10091 5 : ctx = calloc(1, sizeof(struct spdk_bdev_histogram_data_ctx));
10092 5 : if (ctx == NULL) {
10093 0 : cb_fn(cb_arg, -ENOMEM, NULL);
10094 0 : return;
10095 : }
10096 :
10097 5 : ctx->bdev = bdev;
10098 5 : ctx->cb_fn = cb_fn;
10099 5 : ctx->cb_arg = cb_arg;
10100 :
10101 5 : ctx->histogram = histogram;
10102 :
10103 5 : spdk_bdev_for_each_channel(bdev, bdev_histogram_get_channel, ctx,
10104 : bdev_histogram_get_channel_cb);
10105 5 : }
10106 :
10107 : void
10108 2 : spdk_bdev_channel_get_histogram(struct spdk_io_channel *ch, spdk_bdev_histogram_data_cb cb_fn,
10109 : void *cb_arg)
10110 : {
10111 2 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
10112 2 : int status = 0;
10113 :
10114 2 : assert(cb_fn != NULL);
10115 :
10116 2 : if (bdev_ch->histogram == NULL) {
10117 1 : status = -EFAULT;
10118 1 : }
10119 2 : cb_fn(cb_arg, status, bdev_ch->histogram);
10120 2 : }
10121 :
10122 : size_t
10123 0 : spdk_bdev_get_media_events(struct spdk_bdev_desc *desc, struct spdk_bdev_media_event *events,
10124 : size_t max_events)
10125 : {
10126 : struct media_event_entry *entry;
10127 0 : size_t num_events = 0;
10128 :
10129 0 : for (; num_events < max_events; ++num_events) {
10130 0 : entry = TAILQ_FIRST(&desc->pending_media_events);
10131 0 : if (entry == NULL) {
10132 0 : break;
10133 : }
10134 :
10135 0 : events[num_events] = entry->event;
10136 0 : TAILQ_REMOVE(&desc->pending_media_events, entry, tailq);
10137 0 : TAILQ_INSERT_TAIL(&desc->free_media_events, entry, tailq);
10138 0 : }
10139 :
10140 0 : return num_events;
10141 : }
10142 :
10143 : int
10144 0 : spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media_event *events,
10145 : size_t num_events)
10146 : {
10147 : struct spdk_bdev_desc *desc;
10148 : struct media_event_entry *entry;
10149 : size_t event_id;
10150 0 : int rc = 0;
10151 :
10152 0 : assert(bdev->media_events);
10153 :
10154 0 : spdk_spin_lock(&bdev->internal.spinlock);
10155 0 : TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
10156 0 : if (desc->write) {
10157 0 : break;
10158 : }
10159 0 : }
10160 :
10161 0 : if (desc == NULL || desc->media_events_buffer == NULL) {
10162 0 : rc = -ENODEV;
10163 0 : goto out;
10164 : }
10165 :
10166 0 : for (event_id = 0; event_id < num_events; ++event_id) {
10167 0 : entry = TAILQ_FIRST(&desc->free_media_events);
10168 0 : if (entry == NULL) {
10169 0 : break;
10170 : }
10171 :
10172 0 : TAILQ_REMOVE(&desc->free_media_events, entry, tailq);
10173 0 : TAILQ_INSERT_TAIL(&desc->pending_media_events, entry, tailq);
10174 0 : entry->event = events[event_id];
10175 0 : }
10176 :
10177 0 : rc = event_id;
10178 : out:
10179 0 : spdk_spin_unlock(&bdev->internal.spinlock);
10180 0 : return rc;
10181 : }
10182 :
10183 : static void
10184 0 : _media_management_notify(void *arg)
10185 : {
10186 0 : struct spdk_bdev_desc *desc = arg;
10187 :
10188 0 : _event_notify(desc, SPDK_BDEV_EVENT_MEDIA_MANAGEMENT);
10189 0 : }
10190 :
10191 : void
10192 0 : spdk_bdev_notify_media_management(struct spdk_bdev *bdev)
10193 : {
10194 : struct spdk_bdev_desc *desc;
10195 :
10196 0 : spdk_spin_lock(&bdev->internal.spinlock);
10197 0 : TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
10198 0 : if (!TAILQ_EMPTY(&desc->pending_media_events)) {
10199 0 : event_notify(desc, _media_management_notify);
10200 0 : }
10201 0 : }
10202 0 : spdk_spin_unlock(&bdev->internal.spinlock);
10203 0 : }
10204 :
10205 : struct locked_lba_range_ctx {
10206 : struct lba_range range;
10207 : struct lba_range *current_range;
10208 : struct lba_range *owner_range;
10209 : struct spdk_poller *poller;
10210 : lock_range_cb cb_fn;
10211 : void *cb_arg;
10212 : };
10213 :
10214 : static void
10215 0 : bdev_lock_error_cleanup_cb(struct spdk_bdev *bdev, void *_ctx, int status)
10216 : {
10217 0 : struct locked_lba_range_ctx *ctx = _ctx;
10218 :
10219 0 : ctx->cb_fn(&ctx->range, ctx->cb_arg, -ENOMEM);
10220 0 : free(ctx);
10221 0 : }
10222 :
10223 : static void bdev_unlock_lba_range_get_channel(struct spdk_bdev_channel_iter *i,
10224 : struct spdk_bdev *bdev, struct spdk_io_channel *ch, void *_ctx);
10225 :
10226 : static void
10227 14 : bdev_lock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
10228 : {
10229 14 : struct locked_lba_range_ctx *ctx = _ctx;
10230 :
10231 14 : if (status == -ENOMEM) {
10232 : /* One of the channels could not allocate a range object.
10233 : * So we have to go back and clean up any ranges that were
10234 : * allocated successfully before we return error status to
10235 : * the caller. We can reuse the unlock function to do that
10236 : * clean up.
10237 : */
10238 0 : spdk_bdev_for_each_channel(bdev, bdev_unlock_lba_range_get_channel, ctx,
10239 : bdev_lock_error_cleanup_cb);
10240 0 : return;
10241 : }
10242 :
10243 : /* All channels have locked this range and no I/O overlapping the range
10244 : * are outstanding! Set the owner_ch for the range object for the
10245 : * locking channel, so that this channel will know that it is allowed
10246 : * to write to this range.
10247 : */
10248 14 : if (ctx->owner_range != NULL) {
10249 10 : ctx->owner_range->owner_ch = ctx->range.owner_ch;
10250 10 : }
10251 :
10252 14 : ctx->cb_fn(&ctx->range, ctx->cb_arg, status);
10253 :
10254 : /* Don't free the ctx here. Its range is in the bdev's global list of
10255 : * locked ranges still, and will be removed and freed when this range
10256 : * is later unlocked.
10257 : */
10258 14 : }
10259 :
10260 : static int
10261 17 : bdev_lock_lba_range_check_io(void *_i)
10262 : {
10263 17 : struct spdk_bdev_channel_iter *i = _i;
10264 17 : struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i->i);
10265 17 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10266 17 : struct locked_lba_range_ctx *ctx = i->ctx;
10267 17 : struct lba_range *range = ctx->current_range;
10268 : struct spdk_bdev_io *bdev_io;
10269 :
10270 17 : spdk_poller_unregister(&ctx->poller);
10271 :
10272 : /* The range is now in the locked_ranges, so no new IO can be submitted to this
10273 : * range. But we need to wait until any outstanding IO overlapping with this range
10274 : * are completed.
10275 : */
10276 18 : TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
10277 3 : if (bdev_io_range_is_locked(bdev_io, range)) {
10278 2 : ctx->poller = SPDK_POLLER_REGISTER(bdev_lock_lba_range_check_io, i, 100);
10279 2 : return SPDK_POLLER_BUSY;
10280 : }
10281 1 : }
10282 :
10283 15 : spdk_bdev_for_each_channel_continue(i, 0);
10284 15 : return SPDK_POLLER_BUSY;
10285 17 : }
10286 :
10287 : static void
10288 15 : bdev_lock_lba_range_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
10289 : struct spdk_io_channel *_ch, void *_ctx)
10290 : {
10291 15 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10292 15 : struct locked_lba_range_ctx *ctx = _ctx;
10293 : struct lba_range *range;
10294 :
10295 16 : TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
10296 1 : if (range->length == ctx->range.length &&
10297 0 : range->offset == ctx->range.offset &&
10298 0 : range->locked_ctx == ctx->range.locked_ctx) {
10299 : /* This range already exists on this channel, so don't add
10300 : * it again. This can happen when a new channel is created
10301 : * while the for_each_channel operation is in progress.
10302 : * Do not check for outstanding I/O in that case, since the
10303 : * range was locked before any I/O could be submitted to the
10304 : * new channel.
10305 : */
10306 0 : spdk_bdev_for_each_channel_continue(i, 0);
10307 0 : return;
10308 : }
10309 1 : }
10310 :
10311 15 : range = calloc(1, sizeof(*range));
10312 15 : if (range == NULL) {
10313 0 : spdk_bdev_for_each_channel_continue(i, -ENOMEM);
10314 0 : return;
10315 : }
10316 :
10317 15 : range->length = ctx->range.length;
10318 15 : range->offset = ctx->range.offset;
10319 15 : range->locked_ctx = ctx->range.locked_ctx;
10320 15 : range->quiesce = ctx->range.quiesce;
10321 15 : ctx->current_range = range;
10322 15 : if (ctx->range.owner_ch == ch) {
10323 : /* This is the range object for the channel that will hold
10324 : * the lock. Store it in the ctx object so that we can easily
10325 : * set its owner_ch after the lock is finally acquired.
10326 : */
10327 10 : ctx->owner_range = range;
10328 10 : }
10329 15 : TAILQ_INSERT_TAIL(&ch->locked_ranges, range, tailq);
10330 15 : bdev_lock_lba_range_check_io(i);
10331 15 : }
10332 :
10333 : static void
10334 14 : bdev_lock_lba_range_ctx(struct spdk_bdev *bdev, struct locked_lba_range_ctx *ctx)
10335 : {
10336 14 : assert(spdk_get_thread() == ctx->range.owner_thread);
10337 14 : assert(ctx->range.owner_ch == NULL ||
10338 : spdk_io_channel_get_thread(ctx->range.owner_ch->channel) == ctx->range.owner_thread);
10339 :
10340 : /* We will add a copy of this range to each channel now. */
10341 14 : spdk_bdev_for_each_channel(bdev, bdev_lock_lba_range_get_channel, ctx,
10342 : bdev_lock_lba_range_cb);
10343 14 : }
10344 :
10345 : static bool
10346 17 : bdev_lba_range_overlaps_tailq(struct lba_range *range, lba_range_tailq_t *tailq)
10347 : {
10348 : struct lba_range *r;
10349 :
10350 18 : TAILQ_FOREACH(r, tailq, tailq) {
10351 4 : if (bdev_lba_range_overlapped(range, r)) {
10352 3 : return true;
10353 : }
10354 1 : }
10355 14 : return false;
10356 17 : }
10357 :
10358 : static void bdev_quiesce_range_locked(struct lba_range *range, void *ctx, int status);
10359 :
10360 : static int
10361 14 : _bdev_lock_lba_range(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch,
10362 : uint64_t offset, uint64_t length,
10363 : lock_range_cb cb_fn, void *cb_arg)
10364 : {
10365 : struct locked_lba_range_ctx *ctx;
10366 :
10367 14 : ctx = calloc(1, sizeof(*ctx));
10368 14 : if (ctx == NULL) {
10369 0 : return -ENOMEM;
10370 : }
10371 :
10372 14 : ctx->range.offset = offset;
10373 14 : ctx->range.length = length;
10374 14 : ctx->range.owner_thread = spdk_get_thread();
10375 14 : ctx->range.owner_ch = ch;
10376 14 : ctx->range.locked_ctx = cb_arg;
10377 14 : ctx->range.bdev = bdev;
10378 14 : ctx->range.quiesce = (cb_fn == bdev_quiesce_range_locked);
10379 14 : ctx->cb_fn = cb_fn;
10380 14 : ctx->cb_arg = cb_arg;
10381 :
10382 14 : spdk_spin_lock(&bdev->internal.spinlock);
10383 14 : if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) {
10384 : /* There is an active lock overlapping with this range.
10385 : * Put it on the pending list until this range no
10386 : * longer overlaps with another.
10387 : */
10388 2 : TAILQ_INSERT_TAIL(&bdev->internal.pending_locked_ranges, &ctx->range, tailq);
10389 2 : } else {
10390 12 : TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq);
10391 12 : bdev_lock_lba_range_ctx(bdev, ctx);
10392 : }
10393 14 : spdk_spin_unlock(&bdev->internal.spinlock);
10394 14 : return 0;
10395 14 : }
10396 :
10397 : static int
10398 10 : bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
10399 : uint64_t offset, uint64_t length,
10400 : lock_range_cb cb_fn, void *cb_arg)
10401 : {
10402 10 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
10403 10 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10404 :
10405 10 : if (cb_arg == NULL) {
10406 0 : SPDK_ERRLOG("cb_arg must not be NULL\n");
10407 0 : return -EINVAL;
10408 : }
10409 :
10410 10 : return _bdev_lock_lba_range(bdev, ch, offset, length, cb_fn, cb_arg);
10411 10 : }
10412 :
10413 : static void
10414 2 : bdev_lock_lba_range_ctx_msg(void *_ctx)
10415 : {
10416 2 : struct locked_lba_range_ctx *ctx = _ctx;
10417 :
10418 2 : bdev_lock_lba_range_ctx(ctx->range.bdev, ctx);
10419 2 : }
10420 :
10421 : static void
10422 14 : bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
10423 : {
10424 14 : struct locked_lba_range_ctx *ctx = _ctx;
10425 : struct locked_lba_range_ctx *pending_ctx;
10426 : struct lba_range *range, *tmp;
10427 :
10428 14 : spdk_spin_lock(&bdev->internal.spinlock);
10429 : /* Check if there are any pending locked ranges that overlap with this range
10430 : * that was just unlocked. If there are, check that it doesn't overlap with any
10431 : * other locked ranges before calling bdev_lock_lba_range_ctx which will start
10432 : * the lock process.
10433 : */
10434 17 : TAILQ_FOREACH_SAFE(range, &bdev->internal.pending_locked_ranges, tailq, tmp) {
10435 3 : if (bdev_lba_range_overlapped(range, &ctx->range) &&
10436 3 : !bdev_lba_range_overlaps_tailq(range, &bdev->internal.locked_ranges)) {
10437 2 : TAILQ_REMOVE(&bdev->internal.pending_locked_ranges, range, tailq);
10438 2 : pending_ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
10439 2 : TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, range, tailq);
10440 4 : spdk_thread_send_msg(pending_ctx->range.owner_thread,
10441 2 : bdev_lock_lba_range_ctx_msg, pending_ctx);
10442 2 : }
10443 3 : }
10444 14 : spdk_spin_unlock(&bdev->internal.spinlock);
10445 :
10446 14 : ctx->cb_fn(&ctx->range, ctx->cb_arg, status);
10447 14 : free(ctx);
10448 14 : }
10449 :
10450 : static void
10451 16 : bdev_unlock_lba_range_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
10452 : struct spdk_io_channel *_ch, void *_ctx)
10453 : {
10454 16 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10455 16 : struct locked_lba_range_ctx *ctx = _ctx;
10456 : TAILQ_HEAD(, spdk_bdev_io) io_locked;
10457 : struct spdk_bdev_io *bdev_io;
10458 : struct lba_range *range;
10459 :
10460 16 : TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
10461 32 : if (ctx->range.offset == range->offset &&
10462 16 : ctx->range.length == range->length &&
10463 16 : ctx->range.locked_ctx == range->locked_ctx) {
10464 16 : TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
10465 16 : free(range);
10466 16 : break;
10467 : }
10468 0 : }
10469 :
10470 : /* Note: we should almost always be able to assert that the range specified
10471 : * was found. But there are some very rare corner cases where a new channel
10472 : * gets created simultaneously with a range unlock, where this function
10473 : * would execute on that new channel and wouldn't have the range.
10474 : * We also use this to clean up range allocations when a later allocation
10475 : * fails in the locking path.
10476 : * So we can't actually assert() here.
10477 : */
10478 :
10479 : /* Swap the locked IO into a temporary list, and then try to submit them again.
10480 : * We could hyper-optimize this to only resubmit locked I/O that overlap
10481 : * with the range that was just unlocked, but this isn't a performance path so
10482 : * we go for simplicity here.
10483 : */
10484 16 : TAILQ_INIT(&io_locked);
10485 16 : TAILQ_SWAP(&ch->io_locked, &io_locked, spdk_bdev_io, internal.ch_link);
10486 19 : while (!TAILQ_EMPTY(&io_locked)) {
10487 3 : bdev_io = TAILQ_FIRST(&io_locked);
10488 3 : TAILQ_REMOVE(&io_locked, bdev_io, internal.ch_link);
10489 3 : bdev_io_submit(bdev_io);
10490 : }
10491 :
10492 16 : spdk_bdev_for_each_channel_continue(i, 0);
10493 16 : }
10494 :
10495 : static int
10496 14 : _bdev_unlock_lba_range(struct spdk_bdev *bdev, uint64_t offset, uint64_t length,
10497 : lock_range_cb cb_fn, void *cb_arg)
10498 : {
10499 : struct locked_lba_range_ctx *ctx;
10500 : struct lba_range *range;
10501 :
10502 14 : spdk_spin_lock(&bdev->internal.spinlock);
10503 : /* To start the unlock the process, we find the range in the bdev's locked_ranges
10504 : * and remove it. This ensures new channels don't inherit the locked range.
10505 : * Then we will send a message to each channel to remove the range from its
10506 : * per-channel list.
10507 : */
10508 14 : TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
10509 24 : if (range->offset == offset && range->length == length &&
10510 14 : (range->owner_ch == NULL || range->locked_ctx == cb_arg)) {
10511 14 : break;
10512 : }
10513 0 : }
10514 14 : if (range == NULL) {
10515 0 : assert(false);
10516 : spdk_spin_unlock(&bdev->internal.spinlock);
10517 : return -EINVAL;
10518 : }
10519 14 : TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq);
10520 14 : ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
10521 14 : spdk_spin_unlock(&bdev->internal.spinlock);
10522 :
10523 14 : ctx->cb_fn = cb_fn;
10524 14 : ctx->cb_arg = cb_arg;
10525 :
10526 14 : spdk_bdev_for_each_channel(bdev, bdev_unlock_lba_range_get_channel, ctx,
10527 : bdev_unlock_lba_range_cb);
10528 14 : return 0;
10529 : }
10530 :
10531 : static int
10532 12 : bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
10533 : uint64_t offset, uint64_t length,
10534 : lock_range_cb cb_fn, void *cb_arg)
10535 : {
10536 12 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
10537 12 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10538 : struct lba_range *range;
10539 12 : bool range_found = false;
10540 :
10541 : /* Let's make sure the specified channel actually has a lock on
10542 : * the specified range. Note that the range must match exactly.
10543 : */
10544 14 : TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
10545 22 : if (range->offset == offset && range->length == length &&
10546 11 : range->owner_ch == ch && range->locked_ctx == cb_arg) {
10547 10 : range_found = true;
10548 10 : break;
10549 : }
10550 2 : }
10551 :
10552 12 : if (!range_found) {
10553 2 : return -EINVAL;
10554 : }
10555 :
10556 10 : return _bdev_unlock_lba_range(bdev, offset, length, cb_fn, cb_arg);
10557 12 : }
10558 :
10559 : struct bdev_quiesce_ctx {
10560 : spdk_bdev_quiesce_cb cb_fn;
10561 : void *cb_arg;
10562 : };
10563 :
10564 : static void
10565 4 : bdev_unquiesce_range_unlocked(struct lba_range *range, void *ctx, int status)
10566 : {
10567 4 : struct bdev_quiesce_ctx *quiesce_ctx = ctx;
10568 :
10569 4 : if (quiesce_ctx->cb_fn != NULL) {
10570 4 : quiesce_ctx->cb_fn(quiesce_ctx->cb_arg, status);
10571 4 : }
10572 :
10573 4 : free(quiesce_ctx);
10574 4 : }
10575 :
10576 : static void
10577 4 : bdev_quiesce_range_locked(struct lba_range *range, void *ctx, int status)
10578 : {
10579 4 : struct bdev_quiesce_ctx *quiesce_ctx = ctx;
10580 4 : struct spdk_bdev_module *module = range->bdev->module;
10581 :
10582 4 : if (status != 0) {
10583 0 : if (quiesce_ctx->cb_fn != NULL) {
10584 0 : quiesce_ctx->cb_fn(quiesce_ctx->cb_arg, status);
10585 0 : }
10586 0 : free(quiesce_ctx);
10587 0 : return;
10588 : }
10589 :
10590 4 : spdk_spin_lock(&module->internal.spinlock);
10591 4 : TAILQ_INSERT_TAIL(&module->internal.quiesced_ranges, range, tailq_module);
10592 4 : spdk_spin_unlock(&module->internal.spinlock);
10593 :
10594 4 : if (quiesce_ctx->cb_fn != NULL) {
10595 : /* copy the context in case the range is unlocked by the callback */
10596 4 : struct bdev_quiesce_ctx tmp = *quiesce_ctx;
10597 :
10598 4 : quiesce_ctx->cb_fn = NULL;
10599 4 : quiesce_ctx->cb_arg = NULL;
10600 :
10601 4 : tmp.cb_fn(tmp.cb_arg, status);
10602 4 : }
10603 : /* quiesce_ctx will be freed on unquiesce */
10604 4 : }
10605 :
10606 : static int
10607 9 : _spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10608 : uint64_t offset, uint64_t length,
10609 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg,
10610 : bool unquiesce)
10611 : {
10612 : struct bdev_quiesce_ctx *quiesce_ctx;
10613 : int rc;
10614 :
10615 9 : if (module != bdev->module) {
10616 0 : SPDK_ERRLOG("Bdev does not belong to specified module.\n");
10617 0 : return -EINVAL;
10618 : }
10619 :
10620 9 : if (!bdev_io_valid_blocks(bdev, offset, length)) {
10621 0 : return -EINVAL;
10622 : }
10623 :
10624 9 : if (unquiesce) {
10625 : struct lba_range *range;
10626 :
10627 : /* Make sure the specified range is actually quiesced in the specified module and
10628 : * then remove it from the list. Note that the range must match exactly.
10629 : */
10630 5 : spdk_spin_lock(&module->internal.spinlock);
10631 6 : TAILQ_FOREACH(range, &module->internal.quiesced_ranges, tailq_module) {
10632 5 : if (range->bdev == bdev && range->offset == offset && range->length == length) {
10633 4 : TAILQ_REMOVE(&module->internal.quiesced_ranges, range, tailq_module);
10634 4 : break;
10635 : }
10636 1 : }
10637 5 : spdk_spin_unlock(&module->internal.spinlock);
10638 :
10639 5 : if (range == NULL) {
10640 1 : SPDK_ERRLOG("The range to unquiesce was not found.\n");
10641 1 : return -EINVAL;
10642 : }
10643 :
10644 4 : quiesce_ctx = range->locked_ctx;
10645 4 : quiesce_ctx->cb_fn = cb_fn;
10646 4 : quiesce_ctx->cb_arg = cb_arg;
10647 :
10648 4 : rc = _bdev_unlock_lba_range(bdev, offset, length, bdev_unquiesce_range_unlocked, quiesce_ctx);
10649 4 : } else {
10650 4 : quiesce_ctx = malloc(sizeof(*quiesce_ctx));
10651 4 : if (quiesce_ctx == NULL) {
10652 0 : return -ENOMEM;
10653 : }
10654 :
10655 4 : quiesce_ctx->cb_fn = cb_fn;
10656 4 : quiesce_ctx->cb_arg = cb_arg;
10657 :
10658 4 : rc = _bdev_lock_lba_range(bdev, NULL, offset, length, bdev_quiesce_range_locked, quiesce_ctx);
10659 4 : if (rc != 0) {
10660 0 : free(quiesce_ctx);
10661 0 : }
10662 : }
10663 :
10664 8 : return rc;
10665 9 : }
10666 :
10667 : int
10668 3 : spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10669 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10670 : {
10671 3 : return _spdk_bdev_quiesce(bdev, module, 0, bdev->blockcnt, cb_fn, cb_arg, false);
10672 : }
10673 :
10674 : int
10675 3 : spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10676 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10677 : {
10678 3 : return _spdk_bdev_quiesce(bdev, module, 0, bdev->blockcnt, cb_fn, cb_arg, true);
10679 : }
10680 :
10681 : int
10682 1 : spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10683 : uint64_t offset, uint64_t length,
10684 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10685 : {
10686 1 : return _spdk_bdev_quiesce(bdev, module, offset, length, cb_fn, cb_arg, false);
10687 : }
10688 :
10689 : int
10690 2 : spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10691 : uint64_t offset, uint64_t length,
10692 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10693 : {
10694 2 : return _spdk_bdev_quiesce(bdev, module, offset, length, cb_fn, cb_arg, true);
10695 : }
10696 :
10697 : int
10698 283 : spdk_bdev_get_memory_domains(struct spdk_bdev *bdev, struct spdk_memory_domain **domains,
10699 : int array_size)
10700 : {
10701 283 : if (!bdev) {
10702 1 : return -EINVAL;
10703 : }
10704 :
10705 282 : if (bdev->fn_table->get_memory_domains) {
10706 3 : return bdev->fn_table->get_memory_domains(bdev->ctxt, domains, array_size);
10707 : }
10708 :
10709 279 : return 0;
10710 283 : }
10711 :
10712 : struct spdk_bdev_for_each_io_ctx {
10713 : void *ctx;
10714 : spdk_bdev_io_fn fn;
10715 : spdk_bdev_for_each_io_cb cb;
10716 : };
10717 :
10718 : static void
10719 0 : bdev_channel_for_each_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
10720 : struct spdk_io_channel *io_ch, void *_ctx)
10721 : {
10722 0 : struct spdk_bdev_for_each_io_ctx *ctx = _ctx;
10723 0 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
10724 : struct spdk_bdev_io *bdev_io;
10725 0 : int rc = 0;
10726 :
10727 0 : TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
10728 0 : rc = ctx->fn(ctx->ctx, bdev_io);
10729 0 : if (rc != 0) {
10730 0 : break;
10731 : }
10732 0 : }
10733 :
10734 0 : spdk_bdev_for_each_channel_continue(i, rc);
10735 0 : }
10736 :
10737 : static void
10738 0 : bdev_for_each_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
10739 : {
10740 0 : struct spdk_bdev_for_each_io_ctx *ctx = _ctx;
10741 :
10742 0 : ctx->cb(ctx->ctx, status);
10743 :
10744 0 : free(ctx);
10745 0 : }
10746 :
10747 : void
10748 0 : spdk_bdev_for_each_bdev_io(struct spdk_bdev *bdev, void *_ctx, spdk_bdev_io_fn fn,
10749 : spdk_bdev_for_each_io_cb cb)
10750 : {
10751 : struct spdk_bdev_for_each_io_ctx *ctx;
10752 :
10753 0 : assert(fn != NULL && cb != NULL);
10754 :
10755 0 : ctx = calloc(1, sizeof(*ctx));
10756 0 : if (ctx == NULL) {
10757 0 : SPDK_ERRLOG("Failed to allocate context.\n");
10758 0 : cb(_ctx, -ENOMEM);
10759 0 : return;
10760 : }
10761 :
10762 0 : ctx->ctx = _ctx;
10763 0 : ctx->fn = fn;
10764 0 : ctx->cb = cb;
10765 :
10766 0 : spdk_bdev_for_each_channel(bdev, bdev_channel_for_each_io, ctx,
10767 : bdev_for_each_io_done);
10768 0 : }
10769 :
10770 : void
10771 135 : spdk_bdev_for_each_channel_continue(struct spdk_bdev_channel_iter *iter, int status)
10772 : {
10773 135 : spdk_for_each_channel_continue(iter->i, status);
10774 135 : }
10775 :
10776 : static struct spdk_bdev *
10777 371 : io_channel_iter_get_bdev(struct spdk_io_channel_iter *i)
10778 : {
10779 371 : void *io_device = spdk_io_channel_iter_get_io_device(i);
10780 :
10781 371 : return __bdev_from_io_dev(io_device);
10782 : }
10783 :
10784 : static void
10785 135 : bdev_each_channel_msg(struct spdk_io_channel_iter *i)
10786 : {
10787 135 : struct spdk_bdev_channel_iter *iter = spdk_io_channel_iter_get_ctx(i);
10788 135 : struct spdk_bdev *bdev = io_channel_iter_get_bdev(i);
10789 135 : struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
10790 :
10791 135 : iter->i = i;
10792 135 : iter->fn(iter, bdev, ch, iter->ctx);
10793 135 : }
10794 :
10795 : static void
10796 236 : bdev_each_channel_cpl(struct spdk_io_channel_iter *i, int status)
10797 : {
10798 236 : struct spdk_bdev_channel_iter *iter = spdk_io_channel_iter_get_ctx(i);
10799 236 : struct spdk_bdev *bdev = io_channel_iter_get_bdev(i);
10800 :
10801 236 : iter->i = i;
10802 236 : iter->cpl(bdev, iter->ctx, status);
10803 :
10804 236 : free(iter);
10805 236 : }
10806 :
10807 : void
10808 236 : spdk_bdev_for_each_channel(struct spdk_bdev *bdev, spdk_bdev_for_each_channel_msg fn,
10809 : void *ctx, spdk_bdev_for_each_channel_done cpl)
10810 : {
10811 : struct spdk_bdev_channel_iter *iter;
10812 :
10813 236 : assert(bdev != NULL && fn != NULL && ctx != NULL);
10814 :
10815 236 : iter = calloc(1, sizeof(struct spdk_bdev_channel_iter));
10816 236 : if (iter == NULL) {
10817 0 : SPDK_ERRLOG("Unable to allocate iterator\n");
10818 0 : assert(false);
10819 : return;
10820 : }
10821 :
10822 236 : iter->fn = fn;
10823 236 : iter->cpl = cpl;
10824 236 : iter->ctx = ctx;
10825 :
10826 472 : spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_each_channel_msg,
10827 236 : iter, bdev_each_channel_cpl);
10828 236 : }
10829 :
10830 : static void
10831 3 : bdev_copy_do_write_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
10832 : {
10833 3 : struct spdk_bdev_io *parent_io = cb_arg;
10834 :
10835 3 : spdk_bdev_free_io(bdev_io);
10836 :
10837 : /* Check return status of write */
10838 3 : parent_io->internal.status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
10839 3 : parent_io->internal.cb(parent_io, success, parent_io->internal.caller_ctx);
10840 3 : }
10841 :
10842 : static void
10843 3 : bdev_copy_do_write(void *_bdev_io)
10844 : {
10845 3 : struct spdk_bdev_io *bdev_io = _bdev_io;
10846 : int rc;
10847 :
10848 : /* Write blocks */
10849 6 : rc = spdk_bdev_write_blocks_with_md(bdev_io->internal.desc,
10850 3 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
10851 3 : bdev_io->u.bdev.iovs[0].iov_base,
10852 3 : bdev_io->u.bdev.md_buf, bdev_io->u.bdev.offset_blocks,
10853 3 : bdev_io->u.bdev.num_blocks, bdev_copy_do_write_done, bdev_io);
10854 :
10855 3 : if (rc == -ENOMEM) {
10856 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_copy_do_write);
10857 3 : } else if (rc != 0) {
10858 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10859 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10860 0 : }
10861 3 : }
10862 :
10863 : static void
10864 3 : bdev_copy_do_read_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
10865 : {
10866 3 : struct spdk_bdev_io *parent_io = cb_arg;
10867 :
10868 3 : spdk_bdev_free_io(bdev_io);
10869 :
10870 : /* Check return status of read */
10871 3 : if (!success) {
10872 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10873 0 : parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
10874 0 : return;
10875 : }
10876 :
10877 : /* Do write */
10878 3 : bdev_copy_do_write(parent_io);
10879 3 : }
10880 :
10881 : static void
10882 3 : bdev_copy_do_read(void *_bdev_io)
10883 : {
10884 3 : struct spdk_bdev_io *bdev_io = _bdev_io;
10885 : int rc;
10886 :
10887 : /* Read blocks */
10888 6 : rc = spdk_bdev_read_blocks_with_md(bdev_io->internal.desc,
10889 3 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
10890 3 : bdev_io->u.bdev.iovs[0].iov_base,
10891 3 : bdev_io->u.bdev.md_buf, bdev_io->u.bdev.copy.src_offset_blocks,
10892 3 : bdev_io->u.bdev.num_blocks, bdev_copy_do_read_done, bdev_io);
10893 :
10894 3 : if (rc == -ENOMEM) {
10895 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_copy_do_read);
10896 3 : } else if (rc != 0) {
10897 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10898 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10899 0 : }
10900 3 : }
10901 :
10902 : static void
10903 3 : bdev_copy_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
10904 : {
10905 3 : if (!success) {
10906 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10907 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10908 0 : return;
10909 : }
10910 :
10911 3 : bdev_copy_do_read(bdev_io);
10912 3 : }
10913 :
10914 : int
10915 27 : spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
10916 : uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
10917 : spdk_bdev_io_completion_cb cb, void *cb_arg)
10918 : {
10919 27 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
10920 : struct spdk_bdev_io *bdev_io;
10921 27 : struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
10922 :
10923 27 : if (!desc->write) {
10924 0 : return -EBADF;
10925 : }
10926 :
10927 27 : if (!bdev_io_valid_blocks(bdev, dst_offset_blocks, num_blocks) ||
10928 27 : !bdev_io_valid_blocks(bdev, src_offset_blocks, num_blocks)) {
10929 0 : SPDK_DEBUGLOG(bdev,
10930 : "Invalid offset or number of blocks: dst %lu, src %lu, count %lu\n",
10931 : dst_offset_blocks, src_offset_blocks, num_blocks);
10932 0 : return -EINVAL;
10933 : }
10934 :
10935 27 : bdev_io = bdev_channel_get_io(channel);
10936 27 : if (!bdev_io) {
10937 0 : return -ENOMEM;
10938 : }
10939 :
10940 27 : bdev_io->internal.ch = channel;
10941 27 : bdev_io->internal.desc = desc;
10942 27 : bdev_io->type = SPDK_BDEV_IO_TYPE_COPY;
10943 :
10944 27 : bdev_io->u.bdev.offset_blocks = dst_offset_blocks;
10945 27 : bdev_io->u.bdev.copy.src_offset_blocks = src_offset_blocks;
10946 27 : bdev_io->u.bdev.num_blocks = num_blocks;
10947 27 : bdev_io->u.bdev.memory_domain = NULL;
10948 27 : bdev_io->u.bdev.memory_domain_ctx = NULL;
10949 27 : bdev_io->u.bdev.iovs = NULL;
10950 27 : bdev_io->u.bdev.iovcnt = 0;
10951 27 : bdev_io->u.bdev.md_buf = NULL;
10952 27 : bdev_io->u.bdev.accel_sequence = NULL;
10953 27 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
10954 :
10955 27 : if (dst_offset_blocks == src_offset_blocks || num_blocks == 0) {
10956 0 : spdk_thread_send_msg(spdk_get_thread(), bdev_io_complete_cb, bdev_io);
10957 0 : return 0;
10958 : }
10959 :
10960 :
10961 : /* If the copy size is large and should be split, use the generic split logic
10962 : * regardless of whether SPDK_BDEV_IO_TYPE_COPY is supported or not.
10963 : *
10964 : * Then, send the copy request if SPDK_BDEV_IO_TYPE_COPY is supported or
10965 : * emulate it using regular read and write requests otherwise.
10966 : */
10967 27 : if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY) ||
10968 4 : bdev_io->internal.f.split) {
10969 24 : bdev_io_submit(bdev_io);
10970 24 : return 0;
10971 : }
10972 :
10973 3 : spdk_bdev_io_get_buf(bdev_io, bdev_copy_get_buf_cb, num_blocks * spdk_bdev_get_block_size(bdev));
10974 :
10975 3 : return 0;
10976 27 : }
10977 :
10978 3 : SPDK_LOG_REGISTER_COMPONENT(bdev)
10979 :
10980 : static void
10981 0 : bdev_trace(void)
10982 : {
10983 0 : struct spdk_trace_tpoint_opts opts[] = {
10984 : {
10985 : "BDEV_IO_START", TRACE_BDEV_IO_START,
10986 : OWNER_TYPE_BDEV, OBJECT_BDEV_IO, 1,
10987 : {
10988 : { "type", SPDK_TRACE_ARG_TYPE_INT, 8 },
10989 : { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
10990 : { "offset", SPDK_TRACE_ARG_TYPE_INT, 8 },
10991 : { "qd", SPDK_TRACE_ARG_TYPE_INT, 4 }
10992 : }
10993 : },
10994 : {
10995 : "BDEV_IO_DONE", TRACE_BDEV_IO_DONE,
10996 : OWNER_TYPE_BDEV, OBJECT_BDEV_IO, 0,
10997 : {
10998 : { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
10999 : { "qd", SPDK_TRACE_ARG_TYPE_INT, 4 }
11000 : }
11001 : },
11002 : {
11003 : "BDEV_IOCH_CREATE", TRACE_BDEV_IOCH_CREATE,
11004 : OWNER_TYPE_BDEV, OBJECT_NONE, 0,
11005 : {
11006 : { "tid", SPDK_TRACE_ARG_TYPE_INT, 8 }
11007 : }
11008 : },
11009 : {
11010 : "BDEV_IOCH_DESTROY", TRACE_BDEV_IOCH_DESTROY,
11011 : OWNER_TYPE_BDEV, OBJECT_NONE, 0,
11012 : {
11013 : { "tid", SPDK_TRACE_ARG_TYPE_INT, 8 }
11014 : }
11015 : },
11016 : };
11017 :
11018 :
11019 0 : spdk_trace_register_owner_type(OWNER_TYPE_BDEV, 'b');
11020 0 : spdk_trace_register_object(OBJECT_BDEV_IO, 'i');
11021 0 : spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
11022 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_NVME_IO_START, OBJECT_BDEV_IO, 0);
11023 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_NVME_IO_DONE, OBJECT_BDEV_IO, 0);
11024 0 : spdk_trace_tpoint_register_relation(TRACE_BLOB_REQ_SET_START, OBJECT_BDEV_IO, 0);
11025 0 : spdk_trace_tpoint_register_relation(TRACE_BLOB_REQ_SET_COMPLETE, OBJECT_BDEV_IO, 0);
11026 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_RAID_IO_START, OBJECT_BDEV_IO, 0);
11027 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_RAID_IO_DONE, OBJECT_BDEV_IO, 0);
11028 0 : }
11029 3 : SPDK_TRACE_REGISTER_FN(bdev_trace, "bdev", TRACE_GROUP_BDEV)
|