Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (C) 2017 Intel Corporation. All rights reserved.
3 : : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : : * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : : */
6 : :
7 : : #include "spdk_internal/cunit.h"
8 : :
9 : : #include "common/lib/ut_multithread.c"
10 : : #include "unit/lib/json_mock.c"
11 : :
12 : : #include "spdk/config.h"
13 : : /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14 : : #undef SPDK_CONFIG_VTUNE
15 : :
16 : : #include "bdev/bdev.c"
17 : :
18 [ - + ]: 640 : DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
19 [ - + ]: 320 : DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
20 [ # # ]: 0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
21 : : "test_domain");
22 [ # # ]: 0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
23 : : (struct spdk_memory_domain *domain), 0);
24 : 0 : DEFINE_STUB_V(spdk_accel_sequence_finish,
25 : : (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
26 : 0 : DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
27 : 0 : DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
28 [ # # ]: 0 : DEFINE_STUB(spdk_accel_append_copy, int,
29 : : (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
30 : : uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
31 : : struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
32 : : void *src_domain_ctx, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
33 [ # # ]: 0 : DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
34 : :
35 : : static bool g_memory_domain_pull_data_called;
36 : : static bool g_memory_domain_push_data_called;
37 : : static int g_accel_io_device;
38 : :
39 [ # # ]: 0 : DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
40 : : int
41 : 20 : spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
42 : : struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
43 : : spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
44 : : {
45 : 20 : g_memory_domain_pull_data_called = true;
46 [ - + + + : 20 : HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
+ + ]
47 : 16 : cpl_cb(cpl_cb_arg, 0);
48 : 16 : return 0;
49 : : }
50 : :
51 [ # # ]: 0 : DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
52 : : int
53 : 20 : spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
54 : : struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
55 : : spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
56 : : {
57 : 20 : g_memory_domain_push_data_called = true;
58 [ - + + + : 20 : HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
+ + ]
59 : 16 : cpl_cb(cpl_cb_arg, 0);
60 : 16 : return 0;
61 : : }
62 : :
63 : : struct spdk_io_channel *
64 : 140 : spdk_accel_get_io_channel(void)
65 : : {
66 : 140 : return spdk_get_io_channel(&g_accel_io_device);
67 : : }
68 : :
69 : : int g_status;
70 : : int g_count;
71 : : enum spdk_bdev_event_type g_event_type1;
72 : : enum spdk_bdev_event_type g_event_type2;
73 : : enum spdk_bdev_event_type g_event_type3;
74 : : enum spdk_bdev_event_type g_event_type4;
75 : : struct spdk_histogram_data *g_histogram;
76 : : void *g_unregister_arg;
77 : : int g_unregister_rc;
78 : :
79 : : void
80 : 0 : spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
81 : : int *sc, int *sk, int *asc, int *ascq)
82 : : {
83 : 0 : }
84 : :
85 : : static int
86 : 140 : ut_accel_ch_create_cb(void *io_device, void *ctx)
87 : : {
88 : 140 : return 0;
89 : : }
90 : :
91 : : static void
92 : 140 : ut_accel_ch_destroy_cb(void *io_device, void *ctx)
93 : : {
94 : 140 : }
95 : :
96 : : static int
97 : 4 : ut_bdev_setup(void)
98 : : {
99 : 4 : spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
100 : : ut_accel_ch_destroy_cb, 0, NULL);
101 : 4 : return 0;
102 : : }
103 : :
104 : : static int
105 : 4 : ut_bdev_teardown(void)
106 : : {
107 : 4 : spdk_io_device_unregister(&g_accel_io_device, NULL);
108 : :
109 : 4 : return 0;
110 : : }
111 : :
112 : : static int
113 : 320 : stub_destruct(void *ctx)
114 : : {
115 : 320 : return 0;
116 : : }
117 : :
118 : : struct ut_expected_io {
119 : : uint8_t type;
120 : : uint64_t offset;
121 : : uint64_t src_offset;
122 : : uint64_t length;
123 : : int iovcnt;
124 : : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
125 : : void *md_buf;
126 : : TAILQ_ENTRY(ut_expected_io) link;
127 : : };
128 : :
129 : : struct bdev_ut_channel {
130 : : TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
131 : : uint32_t outstanding_io_count;
132 : : TAILQ_HEAD(, ut_expected_io) expected_io;
133 : : };
134 : :
135 : : static bool g_io_done;
136 : : static struct spdk_bdev_io *g_bdev_io;
137 : : static enum spdk_bdev_io_status g_io_status;
138 : : static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
139 : : static uint32_t g_bdev_ut_io_device;
140 : : static struct bdev_ut_channel *g_bdev_ut_channel;
141 : : static void *g_compare_read_buf;
142 : : static uint32_t g_compare_read_buf_len;
143 : : static void *g_compare_write_buf;
144 : : static uint32_t g_compare_write_buf_len;
145 : : static void *g_compare_md_buf;
146 : : static bool g_abort_done;
147 : : static enum spdk_bdev_io_status g_abort_status;
148 : : static void *g_zcopy_read_buf;
149 : : static uint32_t g_zcopy_read_buf_len;
150 : : static void *g_zcopy_write_buf;
151 : : static uint32_t g_zcopy_write_buf_len;
152 : : static struct spdk_bdev_io *g_zcopy_bdev_io;
153 : : static uint64_t g_seek_data_offset;
154 : : static uint64_t g_seek_hole_offset;
155 : : static uint64_t g_seek_offset;
156 : :
157 : : static struct ut_expected_io *
158 : 1028 : ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
159 : : {
160 : : struct ut_expected_io *expected_io;
161 : :
162 : 1028 : expected_io = calloc(1, sizeof(*expected_io));
163 [ - + ]: 1028 : SPDK_CU_ASSERT_FATAL(expected_io != NULL);
164 : :
165 : 1028 : expected_io->type = type;
166 : 1028 : expected_io->offset = offset;
167 : 1028 : expected_io->length = length;
168 : 1028 : expected_io->iovcnt = iovcnt;
169 : :
170 : 1028 : return expected_io;
171 : : }
172 : :
173 : : static struct ut_expected_io *
174 : 84 : ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length)
175 : : {
176 : : struct ut_expected_io *expected_io;
177 : :
178 : 84 : expected_io = calloc(1, sizeof(*expected_io));
179 [ - + ]: 84 : SPDK_CU_ASSERT_FATAL(expected_io != NULL);
180 : :
181 : 84 : expected_io->type = type;
182 : 84 : expected_io->offset = offset;
183 : 84 : expected_io->src_offset = src_offset;
184 : 84 : expected_io->length = length;
185 : :
186 : 84 : return expected_io;
187 : : }
188 : :
189 : : static void
190 : 2184 : ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
191 : : {
192 : 2184 : expected_io->iov[pos].iov_base = base;
193 : 2184 : expected_io->iov[pos].iov_len = len;
194 : 2184 : }
195 : :
196 : : static void
197 : 1596 : stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
198 : : {
199 : 1596 : struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
200 : : struct ut_expected_io *expected_io;
201 : : struct iovec *iov, *expected_iov;
202 : : struct spdk_bdev_io *bio_to_abort;
203 : : int i;
204 : :
205 : 1596 : g_bdev_io = bdev_io;
206 : :
207 [ + + + + ]: 1596 : if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
208 : 44 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
209 : :
210 : 44 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
211 : 44 : CU_ASSERT(g_compare_read_buf_len == len);
212 [ - + - + ]: 44 : memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len);
213 [ + + + + : 44 : if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) {
+ - ]
214 [ - + - + ]: 12 : memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf,
215 : 12 : bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks);
216 : : }
217 : : }
218 : :
219 [ + + + + ]: 1596 : if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
220 : 4 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
221 : :
222 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
223 : 4 : CU_ASSERT(g_compare_write_buf_len == len);
224 [ - + - + ]: 4 : memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len);
225 : : }
226 : :
227 [ + + + + ]: 1596 : if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) {
228 : 36 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
229 : :
230 : 36 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
231 : 36 : CU_ASSERT(g_compare_read_buf_len == len);
232 [ + + - + : 36 : if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) {
+ + ]
233 : 16 : g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
234 : : }
235 [ + + ]: 36 : if (bdev_io->u.bdev.md_buf &&
236 [ - + - + : 12 : memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf,
+ + ]
237 [ + + ]: 12 : bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) {
238 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
239 : : }
240 : : }
241 : :
242 [ + + ]: 1596 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
243 [ + + ]: 32 : if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) {
244 [ + - ]: 28 : TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) {
245 [ + - ]: 28 : if (bio_to_abort == bdev_io->u.abort.bio_to_abort) {
246 [ + + ]: 28 : TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link);
247 : 28 : ch->outstanding_io_count--;
248 : 28 : spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED);
249 : 28 : break;
250 : : }
251 : : }
252 : : }
253 : : }
254 : :
255 [ + + ]: 1596 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) {
256 [ + + ]: 16 : if (bdev_io->u.bdev.zcopy.start) {
257 : 8 : g_zcopy_bdev_io = bdev_io;
258 [ + + ]: 8 : if (bdev_io->u.bdev.zcopy.populate) {
259 : : /* Start of a read */
260 : 4 : CU_ASSERT(g_zcopy_read_buf != NULL);
261 : 4 : CU_ASSERT(g_zcopy_read_buf_len > 0);
262 : 4 : bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf;
263 : 4 : bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len;
264 : 4 : bdev_io->u.bdev.iovcnt = 1;
265 : : } else {
266 : : /* Start of a write */
267 : 4 : CU_ASSERT(g_zcopy_write_buf != NULL);
268 : 4 : CU_ASSERT(g_zcopy_write_buf_len > 0);
269 : 4 : bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf;
270 : 4 : bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len;
271 : 4 : bdev_io->u.bdev.iovcnt = 1;
272 : : }
273 : : } else {
274 [ + + ]: 8 : if (bdev_io->u.bdev.zcopy.commit) {
275 : : /* End of write */
276 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf);
277 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len);
278 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
279 : 4 : g_zcopy_write_buf = NULL;
280 : 4 : g_zcopy_write_buf_len = 0;
281 : : } else {
282 : : /* End of read */
283 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf);
284 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len);
285 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
286 : 4 : g_zcopy_read_buf = NULL;
287 : 4 : g_zcopy_read_buf_len = 0;
288 : : }
289 : : }
290 : : }
291 : :
292 [ + + ]: 1596 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) {
293 : 4 : bdev_io->u.bdev.seek.offset = g_seek_data_offset;
294 : : }
295 : :
296 [ + + ]: 1596 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) {
297 : 4 : bdev_io->u.bdev.seek.offset = g_seek_hole_offset;
298 : : }
299 : :
300 : 1596 : TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
301 : 1596 : ch->outstanding_io_count++;
302 : :
303 : 1596 : expected_io = TAILQ_FIRST(&ch->expected_io);
304 [ + + ]: 1596 : if (expected_io == NULL) {
305 : 484 : return;
306 : : }
307 [ + + ]: 1112 : TAILQ_REMOVE(&ch->expected_io, expected_io, link);
308 : :
309 [ + - ]: 1112 : if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
310 : 1112 : CU_ASSERT(bdev_io->type == expected_io->type);
311 : : }
312 : :
313 [ + + ]: 1112 : if (expected_io->md_buf != NULL) {
314 : 112 : CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
315 : : }
316 : :
317 [ - + ]: 1112 : if (expected_io->length == 0) {
318 : 0 : free(expected_io);
319 : 0 : return;
320 : : }
321 : :
322 : 1112 : CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
323 : 1112 : CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
324 [ + + ]: 1112 : if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) {
325 : 84 : CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks);
326 : : }
327 : :
328 [ + + ]: 1112 : if (expected_io->iovcnt == 0) {
329 : 404 : free(expected_io);
330 : : /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */
331 : 404 : return;
332 : : }
333 : :
334 : 708 : CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
335 [ + + ]: 2892 : for (i = 0; i < expected_io->iovcnt; i++) {
336 : 2184 : expected_iov = &expected_io->iov[i];
337 [ + + ]: 2184 : if (bdev_io->internal.orig_iovcnt == 0) {
338 : 2168 : iov = &bdev_io->u.bdev.iovs[i];
339 : : } else {
340 : 16 : iov = bdev_io->internal.orig_iovs;
341 : : }
342 : 2184 : CU_ASSERT(iov->iov_len == expected_iov->iov_len);
343 : 2184 : CU_ASSERT(iov->iov_base == expected_iov->iov_base);
344 : : }
345 : :
346 : 708 : free(expected_io);
347 : : }
348 : :
349 : : static void
350 : 236 : stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch,
351 : : struct spdk_bdev_io *bdev_io, bool success)
352 : : {
353 : 236 : CU_ASSERT(success == true);
354 : :
355 : 236 : stub_submit_request(_ch, bdev_io);
356 : 236 : }
357 : :
358 : : static void
359 : 236 : stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
360 : : {
361 : 236 : spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb,
362 : 236 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
363 : 236 : }
364 : :
365 : : static uint32_t
366 : 688 : stub_complete_io(uint32_t num_to_complete)
367 : : {
368 : 688 : struct bdev_ut_channel *ch = g_bdev_ut_channel;
369 : : struct spdk_bdev_io *bdev_io;
370 : : static enum spdk_bdev_io_status io_status;
371 : 688 : uint32_t num_completed = 0;
372 : :
373 [ + + ]: 2252 : while (num_completed < num_to_complete) {
374 [ + + ]: 1576 : if (TAILQ_EMPTY(&ch->outstanding_io)) {
375 : 12 : break;
376 : : }
377 : 1564 : bdev_io = TAILQ_FIRST(&ch->outstanding_io);
378 [ + + ]: 1564 : TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
379 : 1564 : ch->outstanding_io_count--;
380 : 1564 : io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
381 : : g_io_exp_status;
382 : 1564 : spdk_bdev_io_complete(bdev_io, io_status);
383 : 1564 : num_completed++;
384 : : }
385 : :
386 : 688 : return num_completed;
387 : : }
388 : :
389 : : static struct spdk_io_channel *
390 : 140 : bdev_ut_get_io_channel(void *ctx)
391 : : {
392 : 140 : return spdk_get_io_channel(&g_bdev_ut_io_device);
393 : : }
394 : :
395 : : static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
396 : : [SPDK_BDEV_IO_TYPE_READ] = true,
397 : : [SPDK_BDEV_IO_TYPE_WRITE] = true,
398 : : [SPDK_BDEV_IO_TYPE_COMPARE] = true,
399 : : [SPDK_BDEV_IO_TYPE_UNMAP] = true,
400 : : [SPDK_BDEV_IO_TYPE_FLUSH] = true,
401 : : [SPDK_BDEV_IO_TYPE_RESET] = true,
402 : : [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true,
403 : : [SPDK_BDEV_IO_TYPE_NVME_IO] = true,
404 : : [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true,
405 : : [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true,
406 : : [SPDK_BDEV_IO_TYPE_ZCOPY] = true,
407 : : [SPDK_BDEV_IO_TYPE_ABORT] = true,
408 : : [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true,
409 : : [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true,
410 : : [SPDK_BDEV_IO_TYPE_COPY] = true,
411 : : };
412 : :
413 : : static void
414 : 88 : ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
415 : : {
416 : 88 : g_io_types_supported[io_type] = enable;
417 : 88 : }
418 : :
419 : : static bool
420 : 1200 : stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
421 : : {
422 [ - + ]: 1200 : return g_io_types_supported[io_type];
423 : : }
424 : :
425 : : static struct spdk_bdev_fn_table fn_table = {
426 : : .destruct = stub_destruct,
427 : : .submit_request = stub_submit_request,
428 : : .get_io_channel = bdev_ut_get_io_channel,
429 : : .io_type_supported = stub_io_type_supported,
430 : : };
431 : :
432 : : static int
433 : 140 : bdev_ut_create_ch(void *io_device, void *ctx_buf)
434 : : {
435 : 140 : struct bdev_ut_channel *ch = ctx_buf;
436 : :
437 : 140 : CU_ASSERT(g_bdev_ut_channel == NULL);
438 : 140 : g_bdev_ut_channel = ch;
439 : :
440 : 140 : TAILQ_INIT(&ch->outstanding_io);
441 : 140 : ch->outstanding_io_count = 0;
442 : 140 : TAILQ_INIT(&ch->expected_io);
443 : 140 : return 0;
444 : : }
445 : :
446 : : static void
447 : 140 : bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
448 : : {
449 : 140 : CU_ASSERT(g_bdev_ut_channel != NULL);
450 : 140 : g_bdev_ut_channel = NULL;
451 : 140 : }
452 : :
453 : : struct spdk_bdev_module bdev_ut_if;
454 : :
455 : : static int
456 : 160 : bdev_ut_module_init(void)
457 : : {
458 : 160 : spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
459 : : sizeof(struct bdev_ut_channel), NULL);
460 : 160 : spdk_bdev_module_init_done(&bdev_ut_if);
461 : 160 : return 0;
462 : : }
463 : :
464 : : static void
465 : 160 : bdev_ut_module_fini(void)
466 : : {
467 : 160 : spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
468 : 160 : }
469 : :
470 : : struct spdk_bdev_module bdev_ut_if = {
471 : : .name = "bdev_ut",
472 : : .module_init = bdev_ut_module_init,
473 : : .module_fini = bdev_ut_module_fini,
474 : : .async_init = true,
475 : : };
476 : :
477 : : static void vbdev_ut_examine_config(struct spdk_bdev *bdev);
478 : : static void vbdev_ut_examine_disk(struct spdk_bdev *bdev);
479 : :
480 : : static int
481 : 160 : vbdev_ut_module_init(void)
482 : : {
483 : 160 : return 0;
484 : : }
485 : :
486 : : static void
487 : 480 : vbdev_ut_module_fini(void)
488 : : {
489 : 480 : }
490 : :
491 : : struct spdk_bdev_module vbdev_ut_if = {
492 : : .name = "vbdev_ut",
493 : : .module_init = vbdev_ut_module_init,
494 : : .module_fini = vbdev_ut_module_fini,
495 : : .examine_config = vbdev_ut_examine_config,
496 : : .examine_disk = vbdev_ut_examine_disk,
497 : : };
498 : :
499 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
500 : 4 : SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
501 : :
502 : : struct ut_examine_ctx {
503 : : void (*examine_config)(struct spdk_bdev *bdev);
504 : : void (*examine_disk)(struct spdk_bdev *bdev);
505 : : uint32_t examine_config_count;
506 : : uint32_t examine_disk_count;
507 : : };
508 : :
509 : : static void
510 : 320 : vbdev_ut_examine_config(struct spdk_bdev *bdev)
511 : : {
512 : 320 : struct ut_examine_ctx *ctx = bdev->ctxt;
513 : :
514 [ + + ]: 320 : if (ctx != NULL) {
515 : 12 : ctx->examine_config_count++;
516 [ + - ]: 12 : if (ctx->examine_config != NULL) {
517 : 12 : ctx->examine_config(bdev);
518 : : }
519 : : }
520 : :
521 : 320 : spdk_bdev_module_examine_done(&vbdev_ut_if);
522 : 320 : }
523 : :
524 : : static void
525 : 308 : vbdev_ut_examine_disk(struct spdk_bdev *bdev)
526 : : {
527 : 308 : struct ut_examine_ctx *ctx = bdev->ctxt;
528 : :
529 [ + + ]: 308 : if (ctx != NULL) {
530 : 12 : ctx->examine_disk_count++;
531 [ + - ]: 12 : if (ctx->examine_disk != NULL) {
532 : 12 : ctx->examine_disk(bdev);
533 : : }
534 : : }
535 : :
536 : 308 : spdk_bdev_module_examine_done(&vbdev_ut_if);
537 : 308 : }
538 : :
539 : : static void
540 : 160 : bdev_init_cb(void *arg, int rc)
541 : : {
542 : 160 : CU_ASSERT(rc == 0);
543 : 160 : }
544 : :
545 : : static void
546 : 320 : bdev_fini_cb(void *arg)
547 : : {
548 : 320 : }
549 : :
550 : : static void
551 : 160 : ut_init_bdev(struct spdk_bdev_opts *opts)
552 : : {
553 : : int rc;
554 : :
555 [ + + ]: 160 : if (opts != NULL) {
556 : 48 : rc = spdk_bdev_set_opts(opts);
557 : 48 : CU_ASSERT(rc == 0);
558 : : }
559 : 160 : rc = spdk_iobuf_initialize();
560 : 160 : CU_ASSERT(rc == 0);
561 : 160 : spdk_bdev_initialize(bdev_init_cb, NULL);
562 : 160 : poll_threads();
563 : 160 : }
564 : :
565 : : static void
566 : 160 : ut_fini_bdev(void)
567 : : {
568 : 160 : spdk_bdev_finish(bdev_fini_cb, NULL);
569 : 160 : spdk_iobuf_finish(bdev_fini_cb, NULL);
570 : 160 : poll_threads();
571 : 160 : }
572 : :
573 : : static struct spdk_bdev *
574 : 288 : allocate_bdev_ctx(char *name, void *ctx)
575 : : {
576 : : struct spdk_bdev *bdev;
577 : : int rc;
578 : :
579 : 288 : bdev = calloc(1, sizeof(*bdev));
580 [ - + ]: 288 : SPDK_CU_ASSERT_FATAL(bdev != NULL);
581 : :
582 : 288 : bdev->ctxt = ctx;
583 : 288 : bdev->name = name;
584 : 288 : bdev->fn_table = &fn_table;
585 : 288 : bdev->module = &bdev_ut_if;
586 : 288 : bdev->blockcnt = 1024;
587 : 288 : bdev->blocklen = 512;
588 : :
589 : 288 : spdk_uuid_generate(&bdev->uuid);
590 : :
591 : 288 : rc = spdk_bdev_register(bdev);
592 : 288 : poll_threads();
593 : 288 : CU_ASSERT(rc == 0);
594 : :
595 : 288 : return bdev;
596 : : }
597 : :
598 : : static struct spdk_bdev *
599 : 276 : allocate_bdev(char *name)
600 : : {
601 : 276 : return allocate_bdev_ctx(name, NULL);
602 : : }
603 : :
604 : : static struct spdk_bdev *
605 : 20 : allocate_vbdev(char *name)
606 : : {
607 : : struct spdk_bdev *bdev;
608 : : int rc;
609 : :
610 : 20 : bdev = calloc(1, sizeof(*bdev));
611 [ - + ]: 20 : SPDK_CU_ASSERT_FATAL(bdev != NULL);
612 : :
613 : 20 : bdev->name = name;
614 : 20 : bdev->fn_table = &fn_table;
615 : 20 : bdev->module = &vbdev_ut_if;
616 : 20 : bdev->blockcnt = 1024;
617 : 20 : bdev->blocklen = 512;
618 : :
619 : 20 : rc = spdk_bdev_register(bdev);
620 : 20 : poll_threads();
621 : 20 : CU_ASSERT(rc == 0);
622 : :
623 : 20 : return bdev;
624 : : }
625 : :
626 : : static void
627 : 276 : free_bdev(struct spdk_bdev *bdev)
628 : : {
629 : 276 : spdk_bdev_unregister(bdev, NULL, NULL);
630 : 276 : poll_threads();
631 [ - + ]: 276 : memset(bdev, 0xFF, sizeof(*bdev));
632 : 276 : free(bdev);
633 : 276 : }
634 : :
635 : : static void
636 : 20 : free_vbdev(struct spdk_bdev *bdev)
637 : : {
638 : 20 : spdk_bdev_unregister(bdev, NULL, NULL);
639 : 20 : poll_threads();
640 [ - + ]: 20 : memset(bdev, 0xFF, sizeof(*bdev));
641 : 20 : free(bdev);
642 : 20 : }
643 : :
644 : : static void
645 : 4 : get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
646 : : {
647 : : const char *bdev_name;
648 : :
649 : 4 : CU_ASSERT(bdev != NULL);
650 : 4 : CU_ASSERT(rc == 0);
651 : 4 : bdev_name = spdk_bdev_get_name(bdev);
652 [ - + ]: 4 : CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
653 : :
654 : 4 : free(stat);
655 : :
656 : 4 : *(bool *)cb_arg = true;
657 : 4 : }
658 : :
659 : : static void
660 : 12 : bdev_unregister_cb(void *cb_arg, int rc)
661 : : {
662 : 12 : g_unregister_arg = cb_arg;
663 : 12 : g_unregister_rc = rc;
664 : 12 : }
665 : :
666 : : static void
667 : 4 : bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
668 : : {
669 : 4 : }
670 : :
671 : : static void
672 : 16 : bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
673 : : {
674 : 16 : struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
675 : :
676 : 16 : g_event_type1 = type;
677 [ + + ]: 16 : if (SPDK_BDEV_EVENT_REMOVE == type) {
678 : 8 : spdk_bdev_close(desc);
679 : : }
680 : 16 : }
681 : :
682 : : static void
683 : 8 : bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
684 : : {
685 : 8 : struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
686 : :
687 : 8 : g_event_type2 = type;
688 [ + - ]: 8 : if (SPDK_BDEV_EVENT_REMOVE == type) {
689 : 8 : spdk_bdev_close(desc);
690 : : }
691 : 8 : }
692 : :
693 : : static void
694 : 4 : bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
695 : : {
696 : 4 : g_event_type3 = type;
697 : 4 : }
698 : :
699 : : static void
700 : 4 : bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
701 : : {
702 : 4 : g_event_type4 = type;
703 : 4 : }
704 : :
705 : : static void
706 : 16 : bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
707 : : {
708 : 16 : g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io);
709 : 16 : spdk_bdev_free_io(bdev_io);
710 : 16 : }
711 : :
712 : : static void
713 : 4 : get_device_stat_test(void)
714 : : {
715 : : struct spdk_bdev *bdev;
716 : : struct spdk_bdev_io_stat *stat;
717 : 4 : bool done;
718 : :
719 : 4 : bdev = allocate_bdev("bdev0");
720 : 4 : stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
721 [ - + ]: 4 : if (stat == NULL) {
722 : 0 : free_bdev(bdev);
723 : 0 : return;
724 : : }
725 : :
726 : 4 : done = false;
727 : 4 : spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
728 [ + + + + ]: 8 : while (!done) { poll_threads(); }
729 : :
730 : 4 : free_bdev(bdev);
731 : : }
732 : :
733 : : static void
734 : 4 : open_write_test(void)
735 : : {
736 : : struct spdk_bdev *bdev[9];
737 : 4 : struct spdk_bdev_desc *desc[9] = {};
738 : : int rc;
739 : :
740 : 4 : ut_init_bdev(NULL);
741 : :
742 : : /*
743 : : * Create a tree of bdevs to test various open w/ write cases.
744 : : *
745 : : * bdev0 through bdev3 are physical block devices, such as NVMe
746 : : * namespaces or Ceph block devices.
747 : : *
748 : : * bdev4 is a virtual bdev with multiple base bdevs. This models
749 : : * caching or RAID use cases.
750 : : *
751 : : * bdev5 through bdev7 are all virtual bdevs with the same base
752 : : * bdev (except bdev7). This models partitioning or logical volume
753 : : * use cases.
754 : : *
755 : : * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
756 : : * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
757 : : * models caching, RAID, partitioning or logical volumes use cases.
758 : : *
759 : : * bdev8 is a virtual bdev with multiple base bdevs, but these
760 : : * base bdevs are themselves virtual bdevs.
761 : : *
762 : : * bdev8
763 : : * |
764 : : * +----------+
765 : : * | |
766 : : * bdev4 bdev5 bdev6 bdev7
767 : : * | | | |
768 : : * +---+---+ +---+ + +---+---+
769 : : * | | \ | / \
770 : : * bdev0 bdev1 bdev2 bdev3
771 : : */
772 : :
773 : 4 : bdev[0] = allocate_bdev("bdev0");
774 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
775 : 4 : CU_ASSERT(rc == 0);
776 : :
777 : 4 : bdev[1] = allocate_bdev("bdev1");
778 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
779 : 4 : CU_ASSERT(rc == 0);
780 : :
781 : 4 : bdev[2] = allocate_bdev("bdev2");
782 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
783 : 4 : CU_ASSERT(rc == 0);
784 : :
785 : 4 : bdev[3] = allocate_bdev("bdev3");
786 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
787 : 4 : CU_ASSERT(rc == 0);
788 : :
789 : 4 : bdev[4] = allocate_vbdev("bdev4");
790 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
791 : 4 : CU_ASSERT(rc == 0);
792 : :
793 : 4 : bdev[5] = allocate_vbdev("bdev5");
794 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
795 : 4 : CU_ASSERT(rc == 0);
796 : :
797 : 4 : bdev[6] = allocate_vbdev("bdev6");
798 : :
799 : 4 : bdev[7] = allocate_vbdev("bdev7");
800 : :
801 : 4 : bdev[8] = allocate_vbdev("bdev8");
802 : :
803 : : /* Open bdev0 read-only. This should succeed. */
804 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]);
805 : 4 : CU_ASSERT(rc == 0);
806 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
807 : 4 : CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0]));
808 : 4 : spdk_bdev_close(desc[0]);
809 : :
810 : : /*
811 : : * Open bdev1 read/write. This should fail since bdev1 has been claimed
812 : : * by a vbdev module.
813 : : */
814 : 4 : rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]);
815 : 4 : CU_ASSERT(rc == -EPERM);
816 : :
817 : : /*
818 : : * Open bdev4 read/write. This should fail since bdev3 has been claimed
819 : : * by a vbdev module.
820 : : */
821 : 4 : rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]);
822 : 4 : CU_ASSERT(rc == -EPERM);
823 : :
824 : : /* Open bdev4 read-only. This should succeed. */
825 : 4 : rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]);
826 : 4 : CU_ASSERT(rc == 0);
827 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
828 : 4 : CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4]));
829 : 4 : spdk_bdev_close(desc[4]);
830 : :
831 : : /*
832 : : * Open bdev8 read/write. This should succeed since it is a leaf
833 : : * bdev.
834 : : */
835 : 4 : rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]);
836 : 4 : CU_ASSERT(rc == 0);
837 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
838 : 4 : CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8]));
839 : 4 : spdk_bdev_close(desc[8]);
840 : :
841 : : /*
842 : : * Open bdev5 read/write. This should fail since bdev4 has been claimed
843 : : * by a vbdev module.
844 : : */
845 : 4 : rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]);
846 : 4 : CU_ASSERT(rc == -EPERM);
847 : :
848 : : /* Open bdev4 read-only. This should succeed. */
849 : 4 : rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]);
850 : 4 : CU_ASSERT(rc == 0);
851 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
852 : 4 : CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5]));
853 : 4 : spdk_bdev_close(desc[5]);
854 : :
855 : 4 : free_vbdev(bdev[8]);
856 : :
857 : 4 : free_vbdev(bdev[5]);
858 : 4 : free_vbdev(bdev[6]);
859 : 4 : free_vbdev(bdev[7]);
860 : :
861 : 4 : free_vbdev(bdev[4]);
862 : :
863 : 4 : free_bdev(bdev[0]);
864 : 4 : free_bdev(bdev[1]);
865 : 4 : free_bdev(bdev[2]);
866 : 4 : free_bdev(bdev[3]);
867 : :
868 : 4 : ut_fini_bdev();
869 : 4 : }
870 : :
871 : : static void
872 : 4 : claim_test(void)
873 : : {
874 : : struct spdk_bdev *bdev;
875 : 4 : struct spdk_bdev_desc *desc, *open_desc;
876 : : int rc;
877 : : uint32_t count;
878 : :
879 : 4 : ut_init_bdev(NULL);
880 : :
881 : : /*
882 : : * A vbdev that uses a read-only bdev may need it to remain read-only.
883 : : * To do so, it opens the bdev read-only, then claims it without
884 : : * passing a spdk_bdev_desc.
885 : : */
886 : 4 : bdev = allocate_bdev("bdev0");
887 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
888 : 4 : CU_ASSERT(rc == 0);
889 [ - + ]: 4 : CU_ASSERT(desc->write == false);
890 : :
891 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
892 : 4 : CU_ASSERT(rc == 0);
893 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
894 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if);
895 : :
896 : : /* There should be only one open descriptor and it should still be ro */
897 : 4 : count = 0;
898 [ + + ]: 8 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
899 : 4 : CU_ASSERT(open_desc == desc);
900 [ - + ]: 4 : CU_ASSERT(!open_desc->write);
901 : 4 : count++;
902 : : }
903 : 4 : CU_ASSERT(count == 1);
904 : :
905 : : /* A read-only bdev is upgraded to read-write if desc is passed. */
906 : 4 : spdk_bdev_module_release_bdev(bdev);
907 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if);
908 : 4 : CU_ASSERT(rc == 0);
909 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
910 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if);
911 : :
912 : : /* There should be only one open descriptor and it should be rw */
913 : 4 : count = 0;
914 [ + + ]: 8 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
915 : 4 : CU_ASSERT(open_desc == desc);
916 [ - + ]: 4 : CU_ASSERT(open_desc->write);
917 : 4 : count++;
918 : : }
919 : 4 : CU_ASSERT(count == 1);
920 : :
921 : 4 : spdk_bdev_close(desc);
922 : 4 : free_bdev(bdev);
923 : 4 : ut_fini_bdev();
924 : 4 : }
925 : :
926 : : static void
927 : 4 : bytes_to_blocks_test(void)
928 : : {
929 : 4 : struct spdk_bdev bdev;
930 : 4 : uint64_t offset_blocks, num_blocks;
931 : :
932 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
933 : :
934 : 4 : bdev.blocklen = 512;
935 : :
936 : : /* All parameters valid */
937 : 4 : offset_blocks = 0;
938 : 4 : num_blocks = 0;
939 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
940 : 4 : CU_ASSERT(offset_blocks == 1);
941 : 4 : CU_ASSERT(num_blocks == 2);
942 : :
943 : : /* Offset not a block multiple */
944 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
945 : :
946 : : /* Length not a block multiple */
947 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
948 : :
949 : : /* In case blocklen not the power of two */
950 : 4 : bdev.blocklen = 100;
951 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
952 : 4 : CU_ASSERT(offset_blocks == 1);
953 : 4 : CU_ASSERT(num_blocks == 2);
954 : :
955 : : /* Offset not a block multiple */
956 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
957 : :
958 : : /* Length not a block multiple */
959 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
960 : 4 : }
961 : :
962 : : static void
963 : 4 : num_blocks_test(void)
964 : : {
965 : : struct spdk_bdev *bdev;
966 : 4 : struct spdk_bdev_desc *desc = NULL;
967 : : int rc;
968 : :
969 : 4 : ut_init_bdev(NULL);
970 : 4 : bdev = allocate_bdev("num_blocks");
971 : :
972 : 4 : spdk_bdev_notify_blockcnt_change(bdev, 50);
973 : :
974 : : /* Growing block number */
975 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0);
976 : : /* Shrinking block number */
977 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0);
978 : :
979 : 4 : rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc);
980 : 4 : CU_ASSERT(rc == 0);
981 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
982 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
983 : :
984 : : /* Growing block number */
985 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0);
986 : : /* Shrinking block number */
987 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0);
988 : :
989 : 4 : g_event_type1 = 0xFF;
990 : : /* Growing block number */
991 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0);
992 : :
993 : 4 : poll_threads();
994 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE);
995 : :
996 : 4 : g_event_type1 = 0xFF;
997 : : /* Growing block number and closing */
998 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0);
999 : :
1000 : 4 : spdk_bdev_close(desc);
1001 : 4 : free_bdev(bdev);
1002 : 4 : ut_fini_bdev();
1003 : :
1004 : 4 : poll_threads();
1005 : :
1006 : : /* Callback is not called for closed device */
1007 : 4 : CU_ASSERT_EQUAL(g_event_type1, 0xFF);
1008 : 4 : }
1009 : :
1010 : : static void
1011 : 4 : io_valid_test(void)
1012 : : {
1013 : 4 : struct spdk_bdev bdev;
1014 : :
1015 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
1016 : :
1017 : 4 : bdev.blocklen = 512;
1018 : 4 : spdk_spin_init(&bdev.internal.spinlock);
1019 : :
1020 : 4 : spdk_bdev_notify_blockcnt_change(&bdev, 100);
1021 : :
1022 : : /* All parameters valid */
1023 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true);
1024 : :
1025 : : /* Last valid block */
1026 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true);
1027 : :
1028 : : /* Offset past end of bdev */
1029 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false);
1030 : :
1031 : : /* Offset + length past end of bdev */
1032 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false);
1033 : :
1034 : : /* Offset near end of uint64_t range (2^64 - 1) */
1035 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
1036 : :
1037 : 4 : spdk_spin_destroy(&bdev.internal.spinlock);
1038 : 4 : }
1039 : :
1040 : : static void
1041 : 4 : alias_add_del_test(void)
1042 : : {
1043 : : struct spdk_bdev *bdev[3];
1044 : : int rc;
1045 : :
1046 : 4 : ut_init_bdev(NULL);
1047 : :
1048 : : /* Creating and registering bdevs */
1049 : 4 : bdev[0] = allocate_bdev("bdev0");
1050 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
1051 : :
1052 : 4 : bdev[1] = allocate_bdev("bdev1");
1053 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
1054 : :
1055 : 4 : bdev[2] = allocate_bdev("bdev2");
1056 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
1057 : :
1058 : 4 : poll_threads();
1059 : :
1060 : : /*
1061 : : * Trying adding an alias identical to name.
1062 : : * Alias is identical to name, so it can not be added to aliases list
1063 : : */
1064 : 4 : rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
1065 : 4 : CU_ASSERT(rc == -EEXIST);
1066 : :
1067 : : /*
1068 : : * Trying to add empty alias,
1069 : : * this one should fail
1070 : : */
1071 : 4 : rc = spdk_bdev_alias_add(bdev[0], NULL);
1072 : 4 : CU_ASSERT(rc == -EINVAL);
1073 : :
1074 : : /* Trying adding same alias to two different registered bdevs */
1075 : :
1076 : : /* Alias is used first time, so this one should pass */
1077 : 4 : rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
1078 : 4 : CU_ASSERT(rc == 0);
1079 : :
1080 : : /* Alias was added to another bdev, so this one should fail */
1081 : 4 : rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
1082 : 4 : CU_ASSERT(rc == -EEXIST);
1083 : :
1084 : : /* Alias is used first time, so this one should pass */
1085 : 4 : rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
1086 : 4 : CU_ASSERT(rc == 0);
1087 : :
1088 : : /* Trying removing an alias from registered bdevs */
1089 : :
1090 : : /* Alias is not on a bdev aliases list, so this one should fail */
1091 : 4 : rc = spdk_bdev_alias_del(bdev[0], "not existing");
1092 : 4 : CU_ASSERT(rc == -ENOENT);
1093 : :
1094 : : /* Alias is present on a bdev aliases list, so this one should pass */
1095 : 4 : rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
1096 : 4 : CU_ASSERT(rc == 0);
1097 : :
1098 : : /* Alias is present on a bdev aliases list, so this one should pass */
1099 : 4 : rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
1100 : 4 : CU_ASSERT(rc == 0);
1101 : :
1102 : : /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
1103 : 4 : rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
1104 : 4 : CU_ASSERT(rc != 0);
1105 : :
1106 : : /* Trying to del all alias from empty alias list */
1107 : 4 : spdk_bdev_alias_del_all(bdev[2]);
1108 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
1109 : :
1110 : : /* Trying to del all alias from non-empty alias list */
1111 : 4 : rc = spdk_bdev_alias_add(bdev[2], "alias0");
1112 : 4 : CU_ASSERT(rc == 0);
1113 : 4 : rc = spdk_bdev_alias_add(bdev[2], "alias1");
1114 : 4 : CU_ASSERT(rc == 0);
1115 : 4 : spdk_bdev_alias_del_all(bdev[2]);
1116 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
1117 : :
1118 : : /* Unregister and free bdevs */
1119 : 4 : spdk_bdev_unregister(bdev[0], NULL, NULL);
1120 : 4 : spdk_bdev_unregister(bdev[1], NULL, NULL);
1121 : 4 : spdk_bdev_unregister(bdev[2], NULL, NULL);
1122 : :
1123 : 4 : poll_threads();
1124 : :
1125 : 4 : free(bdev[0]);
1126 : 4 : free(bdev[1]);
1127 : 4 : free(bdev[2]);
1128 : :
1129 : 4 : ut_fini_bdev();
1130 : 4 : }
1131 : :
1132 : : static void
1133 : 592 : io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1134 : : {
1135 : 592 : g_io_done = true;
1136 : 592 : g_io_status = bdev_io->internal.status;
1137 [ + + + + ]: 592 : if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) &&
1138 : : (bdev_io->u.bdev.zcopy.start)) {
1139 : 8 : g_zcopy_bdev_io = bdev_io;
1140 : : } else {
1141 : 584 : spdk_bdev_free_io(bdev_io);
1142 : 584 : g_zcopy_bdev_io = NULL;
1143 : : }
1144 : 592 : }
1145 : :
1146 : : struct bdev_ut_io_wait_entry {
1147 : : struct spdk_bdev_io_wait_entry entry;
1148 : : struct spdk_io_channel *io_ch;
1149 : : struct spdk_bdev_desc *desc;
1150 : : bool submitted;
1151 : : };
1152 : :
1153 : : static void
1154 : 8 : io_wait_cb(void *arg)
1155 : : {
1156 : 8 : struct bdev_ut_io_wait_entry *entry = arg;
1157 : : int rc;
1158 : :
1159 : 8 : rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
1160 : 8 : CU_ASSERT(rc == 0);
1161 : 8 : entry->submitted = true;
1162 : 8 : }
1163 : :
1164 : : static void
1165 : 4 : bdev_io_types_test(void)
1166 : : {
1167 : : struct spdk_bdev *bdev;
1168 : 4 : struct spdk_bdev_desc *desc = NULL;
1169 : : struct spdk_io_channel *io_ch;
1170 : 4 : struct spdk_bdev_opts bdev_opts = {};
1171 : : int rc;
1172 : :
1173 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1174 : 4 : bdev_opts.bdev_io_pool_size = 4;
1175 : 4 : bdev_opts.bdev_io_cache_size = 2;
1176 : 4 : ut_init_bdev(&bdev_opts);
1177 : :
1178 : 4 : bdev = allocate_bdev("bdev0");
1179 : :
1180 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1181 : 4 : CU_ASSERT(rc == 0);
1182 : 4 : poll_threads();
1183 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1184 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1185 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1186 : 4 : CU_ASSERT(io_ch != NULL);
1187 : :
1188 : : /* WRITE and WRITE ZEROES are not supported */
1189 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
1190 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
1191 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
1192 : 4 : CU_ASSERT(rc == -ENOTSUP);
1193 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
1194 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
1195 : :
1196 : : /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */
1197 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false);
1198 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false);
1199 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false);
1200 : 4 : rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL);
1201 : 4 : CU_ASSERT(rc == -ENOTSUP);
1202 : 4 : rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL);
1203 : 4 : CU_ASSERT(rc == -ENOTSUP);
1204 : 4 : rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL);
1205 : 4 : CU_ASSERT(rc == -ENOTSUP);
1206 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true);
1207 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true);
1208 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true);
1209 : :
1210 : 4 : spdk_put_io_channel(io_ch);
1211 : 4 : spdk_bdev_close(desc);
1212 : 4 : free_bdev(bdev);
1213 : 4 : ut_fini_bdev();
1214 : 4 : }
1215 : :
1216 : : static void
1217 : 4 : bdev_io_wait_test(void)
1218 : : {
1219 : : struct spdk_bdev *bdev;
1220 : 4 : struct spdk_bdev_desc *desc = NULL;
1221 : : struct spdk_io_channel *io_ch;
1222 : 4 : struct spdk_bdev_opts bdev_opts = {};
1223 : 4 : struct bdev_ut_io_wait_entry io_wait_entry;
1224 : 4 : struct bdev_ut_io_wait_entry io_wait_entry2;
1225 : : int rc;
1226 : :
1227 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1228 : 4 : bdev_opts.bdev_io_pool_size = 4;
1229 : 4 : bdev_opts.bdev_io_cache_size = 2;
1230 : 4 : ut_init_bdev(&bdev_opts);
1231 : :
1232 : 4 : bdev = allocate_bdev("bdev0");
1233 : :
1234 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1235 : 4 : CU_ASSERT(rc == 0);
1236 : 4 : poll_threads();
1237 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1238 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1239 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1240 : 4 : CU_ASSERT(io_ch != NULL);
1241 : :
1242 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1243 : 4 : CU_ASSERT(rc == 0);
1244 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1245 : 4 : CU_ASSERT(rc == 0);
1246 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1247 : 4 : CU_ASSERT(rc == 0);
1248 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1249 : 4 : CU_ASSERT(rc == 0);
1250 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1251 : :
1252 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1253 : 4 : CU_ASSERT(rc == -ENOMEM);
1254 : :
1255 : 4 : io_wait_entry.entry.bdev = bdev;
1256 : 4 : io_wait_entry.entry.cb_fn = io_wait_cb;
1257 : 4 : io_wait_entry.entry.cb_arg = &io_wait_entry;
1258 : 4 : io_wait_entry.io_ch = io_ch;
1259 : 4 : io_wait_entry.desc = desc;
1260 : 4 : io_wait_entry.submitted = false;
1261 : : /* Cannot use the same io_wait_entry for two different calls. */
1262 : 4 : memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
1263 : 4 : io_wait_entry2.entry.cb_arg = &io_wait_entry2;
1264 : :
1265 : : /* Queue two I/O waits. */
1266 : 4 : rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
1267 : 4 : CU_ASSERT(rc == 0);
1268 [ - + ]: 4 : CU_ASSERT(io_wait_entry.submitted == false);
1269 : 4 : rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
1270 : 4 : CU_ASSERT(rc == 0);
1271 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == false);
1272 : :
1273 : 4 : stub_complete_io(1);
1274 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1275 [ - + ]: 4 : CU_ASSERT(io_wait_entry.submitted == true);
1276 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == false);
1277 : :
1278 : 4 : stub_complete_io(1);
1279 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1280 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == true);
1281 : :
1282 : 4 : stub_complete_io(4);
1283 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1284 : :
1285 : 4 : spdk_put_io_channel(io_ch);
1286 : 4 : spdk_bdev_close(desc);
1287 : 4 : free_bdev(bdev);
1288 : 4 : ut_fini_bdev();
1289 : 4 : }
1290 : :
1291 : : static void
1292 : 4 : bdev_io_spans_split_test(void)
1293 : : {
1294 : 4 : struct spdk_bdev bdev;
1295 : 4 : struct spdk_bdev_io bdev_io;
1296 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
1297 : :
1298 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
1299 : 4 : bdev_io.u.bdev.iovs = iov;
1300 : :
1301 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1302 : 4 : bdev.optimal_io_boundary = 0;
1303 : 4 : bdev.max_segment_size = 0;
1304 : 4 : bdev.max_num_segments = 0;
1305 : 4 : bdev_io.bdev = &bdev;
1306 : :
1307 : : /* bdev has no optimal_io_boundary and max_size set - so this should return false. */
1308 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1309 : :
1310 : 4 : bdev.split_on_optimal_io_boundary = true;
1311 : 4 : bdev.optimal_io_boundary = 32;
1312 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
1313 : :
1314 : : /* RESETs are not based on LBAs - so this should return false. */
1315 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1316 : :
1317 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1318 : 4 : bdev_io.u.bdev.offset_blocks = 0;
1319 : 4 : bdev_io.u.bdev.num_blocks = 32;
1320 : :
1321 : : /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
1322 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1323 : :
1324 : 4 : bdev_io.u.bdev.num_blocks = 33;
1325 : :
1326 : : /* This I/O spans a boundary. */
1327 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1328 : :
1329 : 4 : bdev_io.u.bdev.num_blocks = 32;
1330 : 4 : bdev.max_segment_size = 512 * 32;
1331 : 4 : bdev.max_num_segments = 1;
1332 : 4 : bdev_io.u.bdev.iovcnt = 1;
1333 : 4 : iov[0].iov_len = 512;
1334 : :
1335 : : /* Does not cross and exceed max_size or max_segs */
1336 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1337 : :
1338 : 4 : bdev.split_on_optimal_io_boundary = false;
1339 : 4 : bdev.max_segment_size = 512;
1340 : 4 : bdev.max_num_segments = 1;
1341 : 4 : bdev_io.u.bdev.iovcnt = 2;
1342 : :
1343 : : /* Exceed max_segs */
1344 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1345 : :
1346 : 4 : bdev.max_num_segments = 2;
1347 : 4 : iov[0].iov_len = 513;
1348 : 4 : iov[1].iov_len = 512;
1349 : :
1350 : : /* Exceed max_sizes */
1351 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1352 : :
1353 : 4 : bdev.max_segment_size = 0;
1354 : 4 : bdev.write_unit_size = 32;
1355 : 4 : bdev.split_on_write_unit = true;
1356 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE;
1357 : :
1358 : : /* This I/O is one write unit */
1359 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1360 : :
1361 : 4 : bdev_io.u.bdev.num_blocks = 32 * 2;
1362 : :
1363 : : /* This I/O is more than one write unit */
1364 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1365 : :
1366 : 4 : bdev_io.u.bdev.offset_blocks = 1;
1367 : 4 : bdev_io.u.bdev.num_blocks = 32;
1368 : :
1369 : : /* This I/O is not aligned to write unit size */
1370 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1371 : 4 : }
1372 : :
1373 : : static void
1374 : 4 : bdev_io_boundary_split_test(void)
1375 : : {
1376 : : struct spdk_bdev *bdev;
1377 : 4 : struct spdk_bdev_desc *desc = NULL;
1378 : : struct spdk_io_channel *io_ch;
1379 : 4 : struct spdk_bdev_opts bdev_opts = {};
1380 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
1381 : : struct ut_expected_io *expected_io;
1382 : 4 : void *md_buf = (void *)0xFF000000;
1383 : : uint64_t i;
1384 : : int rc;
1385 : :
1386 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1387 : 4 : bdev_opts.bdev_io_pool_size = 512;
1388 : 4 : bdev_opts.bdev_io_cache_size = 64;
1389 : 4 : ut_init_bdev(&bdev_opts);
1390 : :
1391 : 4 : bdev = allocate_bdev("bdev0");
1392 : :
1393 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1394 : 4 : CU_ASSERT(rc == 0);
1395 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1396 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1397 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1398 : 4 : CU_ASSERT(io_ch != NULL);
1399 : :
1400 : 4 : bdev->optimal_io_boundary = 16;
1401 : 4 : bdev->split_on_optimal_io_boundary = false;
1402 : :
1403 : 4 : g_io_done = false;
1404 : :
1405 : : /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
1406 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
1407 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
1408 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1409 : :
1410 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1411 : 4 : CU_ASSERT(rc == 0);
1412 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1413 : :
1414 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1415 : 4 : stub_complete_io(1);
1416 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1417 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1418 : :
1419 : 4 : bdev->split_on_optimal_io_boundary = true;
1420 : 4 : bdev->md_interleave = false;
1421 : 4 : bdev->md_len = 8;
1422 : :
1423 : : /* Now test that a single-vector command is split correctly.
1424 : : * Offset 14, length 8, payload 0xF000
1425 : : * Child - Offset 14, length 2, payload 0xF000
1426 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1427 : : *
1428 : : * Set up the expected values before calling spdk_bdev_read_blocks
1429 : : */
1430 : 4 : g_io_done = false;
1431 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1432 : 4 : expected_io->md_buf = md_buf;
1433 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1434 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1435 : :
1436 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1437 : 4 : expected_io->md_buf = md_buf + 2 * 8;
1438 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1439 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1440 : :
1441 : : /* spdk_bdev_read_blocks will submit the first child immediately. */
1442 : 4 : rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf,
1443 : : 14, 8, io_done, NULL);
1444 : 4 : CU_ASSERT(rc == 0);
1445 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1446 : :
1447 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1448 : 4 : stub_complete_io(2);
1449 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1450 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1451 : :
1452 : : /* Now set up a more complex, multi-vector command that needs to be split,
1453 : : * including splitting iovecs.
1454 : : */
1455 : 4 : iov[0].iov_base = (void *)0x10000;
1456 : 4 : iov[0].iov_len = 512;
1457 : 4 : iov[1].iov_base = (void *)0x20000;
1458 : 4 : iov[1].iov_len = 20 * 512;
1459 : 4 : iov[2].iov_base = (void *)0x30000;
1460 : 4 : iov[2].iov_len = 11 * 512;
1461 : :
1462 : 4 : g_io_done = false;
1463 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1464 : 4 : expected_io->md_buf = md_buf;
1465 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1466 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1467 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1468 : :
1469 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1470 : 4 : expected_io->md_buf = md_buf + 2 * 8;
1471 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1472 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1473 : :
1474 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1475 : 4 : expected_io->md_buf = md_buf + 18 * 8;
1476 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1477 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1478 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1479 : :
1480 : 4 : rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf,
1481 : : 14, 32, io_done, NULL);
1482 : 4 : CU_ASSERT(rc == 0);
1483 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1484 : :
1485 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1486 : 4 : stub_complete_io(3);
1487 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1488 : :
1489 : : /* Test multi vector command that needs to be split by strip and then needs to be
1490 : : * split further due to the capacity of child iovs.
1491 : : */
1492 [ + + ]: 260 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) {
1493 : 256 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1494 : 256 : iov[i].iov_len = 512;
1495 : : }
1496 : :
1497 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1498 : 4 : g_io_done = false;
1499 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV,
1500 : : SPDK_BDEV_IO_NUM_CHILD_IOV);
1501 : 4 : expected_io->md_buf = md_buf;
1502 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1503 : 128 : ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
1504 : : }
1505 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1506 : :
1507 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1508 : : SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV);
1509 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1510 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1511 : 128 : ut_expected_io_set_iov(expected_io, i,
1512 : 128 : (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1513 : : }
1514 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1515 : :
1516 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf,
1517 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1518 : 4 : CU_ASSERT(rc == 0);
1519 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1520 : :
1521 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1522 : 4 : stub_complete_io(1);
1523 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1524 : :
1525 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1526 : 4 : stub_complete_io(1);
1527 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1528 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1529 : :
1530 : : /* Test multi vector command that needs to be split by strip and then needs to be
1531 : : * split further due to the capacity of child iovs. In this case, the length of
1532 : : * the rest of iovec array with an I/O boundary is the multiple of block size.
1533 : : */
1534 : :
1535 : : /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1536 : : * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1537 : : */
1538 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1539 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1540 : 120 : iov[i].iov_len = 512;
1541 : : }
1542 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1543 : 8 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1544 : 8 : iov[i].iov_len = 256;
1545 : : }
1546 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1547 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1548 : :
1549 : : /* Add an extra iovec to trigger split */
1550 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1551 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1552 : :
1553 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1554 : 4 : g_io_done = false;
1555 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1556 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV);
1557 : 4 : expected_io->md_buf = md_buf;
1558 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1559 : 120 : ut_expected_io_set_iov(expected_io, i,
1560 : 120 : (void *)((i + 1) * 0x10000), 512);
1561 : : }
1562 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1563 : 8 : ut_expected_io_set_iov(expected_io, i,
1564 : 8 : (void *)((i + 1) * 0x10000), 256);
1565 : : }
1566 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1567 : :
1568 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1,
1569 : : 1, 1);
1570 : 4 : expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8;
1571 : 4 : ut_expected_io_set_iov(expected_io, 0,
1572 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1573 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1574 : :
1575 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1576 : : 1, 1);
1577 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1578 : 4 : ut_expected_io_set_iov(expected_io, 0,
1579 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1580 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1581 : :
1582 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf,
1583 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1584 : 4 : CU_ASSERT(rc == 0);
1585 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1586 : :
1587 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1588 : 4 : stub_complete_io(1);
1589 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1590 : :
1591 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1592 : 4 : stub_complete_io(2);
1593 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1594 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1595 : :
1596 : : /* Test multi vector command that needs to be split by strip and then needs to be
1597 : : * split further due to the capacity of child iovs, the child request offset should
1598 : : * be rewind to last aligned offset and go success without error.
1599 : : */
1600 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1601 : 124 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1602 : 124 : iov[i].iov_len = 512;
1603 : : }
1604 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000);
1605 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1606 : :
1607 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1608 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1609 : :
1610 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1611 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1612 : :
1613 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1614 : 4 : g_io_done = false;
1615 : 4 : g_io_status = 0;
1616 : : /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */
1617 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1618 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1);
1619 : 4 : expected_io->md_buf = md_buf;
1620 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1621 : 124 : ut_expected_io_set_iov(expected_io, i,
1622 : 124 : (void *)((i + 1) * 0x10000), 512);
1623 : : }
1624 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1625 : : /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */
1626 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1,
1627 : : 1, 2);
1628 : 4 : expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8;
1629 : 4 : ut_expected_io_set_iov(expected_io, 0,
1630 : : (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
1631 : 4 : ut_expected_io_set_iov(expected_io, 1,
1632 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
1633 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1634 : : /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */
1635 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1636 : : 1, 1);
1637 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1638 : 4 : ut_expected_io_set_iov(expected_io, 0,
1639 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1640 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1641 : :
1642 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf,
1643 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1644 : 4 : CU_ASSERT(rc == 0);
1645 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1646 : :
1647 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1648 : 4 : stub_complete_io(1);
1649 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1650 : :
1651 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1652 : 4 : stub_complete_io(2);
1653 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1654 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1655 : :
1656 : : /* Test multi vector command that needs to be split due to the IO boundary and
1657 : : * the capacity of child iovs. Especially test the case when the command is
1658 : : * split due to the capacity of child iovs, the tail address is not aligned with
1659 : : * block size and is rewinded to the aligned address.
1660 : : *
1661 : : * The iovecs used in read request is complex but is based on the data
1662 : : * collected in the real issue. We change the base addresses but keep the lengths
1663 : : * not to loose the credibility of the test.
1664 : : */
1665 : 4 : bdev->optimal_io_boundary = 128;
1666 : 4 : g_io_done = false;
1667 : 4 : g_io_status = 0;
1668 : :
1669 [ + + ]: 128 : for (i = 0; i < 31; i++) {
1670 : 124 : iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20));
1671 : 124 : iov[i].iov_len = 1024;
1672 : : }
1673 : 4 : iov[31].iov_base = (void *)0xFEED1F00000;
1674 : 4 : iov[31].iov_len = 32768;
1675 : 4 : iov[32].iov_base = (void *)0xFEED2000000;
1676 : 4 : iov[32].iov_len = 160;
1677 : 4 : iov[33].iov_base = (void *)0xFEED2100000;
1678 : 4 : iov[33].iov_len = 4096;
1679 : 4 : iov[34].iov_base = (void *)0xFEED2200000;
1680 : 4 : iov[34].iov_len = 4096;
1681 : 4 : iov[35].iov_base = (void *)0xFEED2300000;
1682 : 4 : iov[35].iov_len = 4096;
1683 : 4 : iov[36].iov_base = (void *)0xFEED2400000;
1684 : 4 : iov[36].iov_len = 4096;
1685 : 4 : iov[37].iov_base = (void *)0xFEED2500000;
1686 : 4 : iov[37].iov_len = 4096;
1687 : 4 : iov[38].iov_base = (void *)0xFEED2600000;
1688 : 4 : iov[38].iov_len = 4096;
1689 : 4 : iov[39].iov_base = (void *)0xFEED2700000;
1690 : 4 : iov[39].iov_len = 4096;
1691 : 4 : iov[40].iov_base = (void *)0xFEED2800000;
1692 : 4 : iov[40].iov_len = 4096;
1693 : 4 : iov[41].iov_base = (void *)0xFEED2900000;
1694 : 4 : iov[41].iov_len = 4096;
1695 : 4 : iov[42].iov_base = (void *)0xFEED2A00000;
1696 : 4 : iov[42].iov_len = 4096;
1697 : 4 : iov[43].iov_base = (void *)0xFEED2B00000;
1698 : 4 : iov[43].iov_len = 12288;
1699 : 4 : iov[44].iov_base = (void *)0xFEED2C00000;
1700 : 4 : iov[44].iov_len = 8192;
1701 : 4 : iov[45].iov_base = (void *)0xFEED2F00000;
1702 : 4 : iov[45].iov_len = 4096;
1703 : 4 : iov[46].iov_base = (void *)0xFEED3000000;
1704 : 4 : iov[46].iov_len = 4096;
1705 : 4 : iov[47].iov_base = (void *)0xFEED3100000;
1706 : 4 : iov[47].iov_len = 4096;
1707 : 4 : iov[48].iov_base = (void *)0xFEED3200000;
1708 : 4 : iov[48].iov_len = 24576;
1709 : 4 : iov[49].iov_base = (void *)0xFEED3300000;
1710 : 4 : iov[49].iov_len = 16384;
1711 : 4 : iov[50].iov_base = (void *)0xFEED3400000;
1712 : 4 : iov[50].iov_len = 12288;
1713 : 4 : iov[51].iov_base = (void *)0xFEED3500000;
1714 : 4 : iov[51].iov_len = 4096;
1715 : 4 : iov[52].iov_base = (void *)0xFEED3600000;
1716 : 4 : iov[52].iov_len = 4096;
1717 : 4 : iov[53].iov_base = (void *)0xFEED3700000;
1718 : 4 : iov[53].iov_len = 4096;
1719 : 4 : iov[54].iov_base = (void *)0xFEED3800000;
1720 : 4 : iov[54].iov_len = 28672;
1721 : 4 : iov[55].iov_base = (void *)0xFEED3900000;
1722 : 4 : iov[55].iov_len = 20480;
1723 : 4 : iov[56].iov_base = (void *)0xFEED3A00000;
1724 : 4 : iov[56].iov_len = 4096;
1725 : 4 : iov[57].iov_base = (void *)0xFEED3B00000;
1726 : 4 : iov[57].iov_len = 12288;
1727 : 4 : iov[58].iov_base = (void *)0xFEED3C00000;
1728 : 4 : iov[58].iov_len = 4096;
1729 : 4 : iov[59].iov_base = (void *)0xFEED3D00000;
1730 : 4 : iov[59].iov_len = 4096;
1731 : 4 : iov[60].iov_base = (void *)0xFEED3E00000;
1732 : 4 : iov[60].iov_len = 352;
1733 : :
1734 : : /* The 1st child IO must be from iov[0] to iov[31] split by the capacity
1735 : : * of child iovs,
1736 : : */
1737 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32);
1738 : 4 : expected_io->md_buf = md_buf;
1739 [ + + ]: 132 : for (i = 0; i < 32; i++) {
1740 : 128 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1741 : : }
1742 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1743 : :
1744 : : /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33]
1745 : : * split by the IO boundary requirement.
1746 : : */
1747 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2);
1748 : 4 : expected_io->md_buf = md_buf + 126 * 8;
1749 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len);
1750 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864);
1751 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1752 : :
1753 : : /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to
1754 : : * the first 864 bytes of iov[46] split by the IO boundary requirement.
1755 : : */
1756 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14);
1757 : 4 : expected_io->md_buf = md_buf + 128 * 8;
1758 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864),
1759 : 4 : iov[33].iov_len - 864);
1760 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len);
1761 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len);
1762 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len);
1763 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len);
1764 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len);
1765 : 4 : ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len);
1766 : 4 : ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len);
1767 : 4 : ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len);
1768 : 4 : ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len);
1769 : 4 : ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len);
1770 : 4 : ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len);
1771 : 4 : ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len);
1772 : 4 : ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864);
1773 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1774 : :
1775 : : /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the
1776 : : * first 864 bytes of iov[52] split by the IO boundary requirement.
1777 : : */
1778 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7);
1779 : 4 : expected_io->md_buf = md_buf + 256 * 8;
1780 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864),
1781 : 4 : iov[46].iov_len - 864);
1782 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len);
1783 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len);
1784 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len);
1785 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len);
1786 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len);
1787 : 4 : ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864);
1788 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1789 : :
1790 : : /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to
1791 : : * the first 4096 bytes of iov[57] split by the IO boundary requirement.
1792 : : */
1793 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6);
1794 : 4 : expected_io->md_buf = md_buf + 384 * 8;
1795 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864),
1796 : 4 : iov[52].iov_len - 864);
1797 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len);
1798 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len);
1799 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len);
1800 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len);
1801 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960);
1802 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1803 : :
1804 : : /* The 6th child IO must be from the remaining 7328 bytes of iov[57]
1805 : : * to the first 3936 bytes of iov[58] split by the capacity of child iovs.
1806 : : */
1807 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3);
1808 : 4 : expected_io->md_buf = md_buf + 512 * 8;
1809 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960),
1810 : 4 : iov[57].iov_len - 4960);
1811 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len);
1812 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936);
1813 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1814 : :
1815 : : /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */
1816 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2);
1817 : 4 : expected_io->md_buf = md_buf + 542 * 8;
1818 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936),
1819 : 4 : iov[59].iov_len - 3936);
1820 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len);
1821 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1822 : :
1823 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf,
1824 : : 0, 543, io_done, NULL);
1825 : 4 : CU_ASSERT(rc == 0);
1826 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1827 : :
1828 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1829 : 4 : stub_complete_io(1);
1830 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1831 : :
1832 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1833 : 4 : stub_complete_io(5);
1834 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1835 : :
1836 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1837 : 4 : stub_complete_io(1);
1838 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1839 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1840 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1841 : :
1842 : : /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
1843 : : * split, so test that.
1844 : : */
1845 : 4 : bdev->optimal_io_boundary = 15;
1846 : 4 : g_io_done = false;
1847 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1848 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1849 : :
1850 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1851 : 4 : CU_ASSERT(rc == 0);
1852 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1853 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1854 : 4 : stub_complete_io(1);
1855 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1856 : :
1857 : : /* Test an UNMAP. This should also not be split. */
1858 : 4 : bdev->optimal_io_boundary = 16;
1859 : 4 : g_io_done = false;
1860 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1861 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1862 : :
1863 : 4 : rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1864 : 4 : CU_ASSERT(rc == 0);
1865 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1866 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1867 : 4 : stub_complete_io(1);
1868 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1869 : :
1870 : : /* Test a FLUSH. This should also not be split. */
1871 : 4 : bdev->optimal_io_boundary = 16;
1872 : 4 : g_io_done = false;
1873 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1874 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1875 : :
1876 : 4 : rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1877 : 4 : CU_ASSERT(rc == 0);
1878 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1879 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1880 : 4 : stub_complete_io(1);
1881 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1882 : :
1883 : : /* Test a COPY. This should also not be split. */
1884 : 4 : bdev->optimal_io_boundary = 15;
1885 : 4 : g_io_done = false;
1886 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36);
1887 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1888 : :
1889 : 4 : rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL);
1890 : 4 : CU_ASSERT(rc == 0);
1891 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1892 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1893 : 4 : stub_complete_io(1);
1894 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1895 : :
1896 : 4 : CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1897 : :
1898 : : /* Children requests return an error status */
1899 : 4 : bdev->optimal_io_boundary = 16;
1900 : 4 : iov[0].iov_base = (void *)0x10000;
1901 : 4 : iov[0].iov_len = 512 * 64;
1902 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1903 : 4 : g_io_done = false;
1904 : 4 : g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1905 : :
1906 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1907 : 4 : CU_ASSERT(rc == 0);
1908 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1909 : 4 : stub_complete_io(4);
1910 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1911 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1912 : 4 : stub_complete_io(1);
1913 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1914 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1915 : :
1916 : : /* Test if a multi vector command terminated with failure before continuing
1917 : : * splitting process when one of child I/O failed.
1918 : : * The multi vector command is as same as the above that needs to be split by strip
1919 : : * and then needs to be split further due to the capacity of child iovs.
1920 : : */
1921 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1922 : 124 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1923 : 124 : iov[i].iov_len = 512;
1924 : : }
1925 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000);
1926 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1927 : :
1928 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1929 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1930 : :
1931 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1932 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1933 : :
1934 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1935 : :
1936 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1937 : 4 : g_io_done = false;
1938 : 4 : g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1939 : :
1940 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0,
1941 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1942 : 4 : CU_ASSERT(rc == 0);
1943 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1944 : :
1945 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1946 : 4 : stub_complete_io(1);
1947 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1948 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1949 : :
1950 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1951 : :
1952 : : /* for this test we will create the following conditions to hit the code path where
1953 : : * we are trying to send and IO following a split that has no iovs because we had to
1954 : : * trim them for alignment reasons.
1955 : : *
1956 : : * - 16K boundary, our IO will start at offset 0 with a length of 0x4200
1957 : : * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV
1958 : : * position 30 and overshoot by 0x2e.
1959 : : * - That means we'll send the IO and loop back to pick up the remaining bytes at
1960 : : * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e
1961 : : * which eliniates that vector so we just send the first split IO with 30 vectors
1962 : : * and let the completion pick up the last 2 vectors.
1963 : : */
1964 : 4 : bdev->optimal_io_boundary = 32;
1965 : 4 : bdev->split_on_optimal_io_boundary = true;
1966 : 4 : g_io_done = false;
1967 : :
1968 : : /* Init all parent IOVs to 0x212 */
1969 [ + + ]: 140 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
1970 : 136 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1971 : 136 : iov[i].iov_len = 0x212;
1972 : : }
1973 : :
1974 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV,
1975 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1);
1976 : : /* expect 0-29 to be 1:1 with the parent iov */
1977 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1978 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1979 : : }
1980 : :
1981 : : /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment
1982 : : * where 0x1e is the amount we overshot the 16K boundary
1983 : : */
1984 : 4 : ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2,
1985 : : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4);
1986 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1987 : :
1988 : : /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was
1989 : : * shortened that take it to the next boundary and then a final one to get us to
1990 : : * 0x4200 bytes for the IO.
1991 : : */
1992 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1993 : : SPDK_BDEV_IO_NUM_CHILD_IOV, 2);
1994 : : /* position 30 picked up the remaining bytes to the next boundary */
1995 : 4 : ut_expected_io_set_iov(expected_io, 0,
1996 : 4 : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e);
1997 : :
1998 : : /* position 31 picked the the rest of the transfer to get us to 0x4200 */
1999 : 4 : ut_expected_io_set_iov(expected_io, 1,
2000 : : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2);
2001 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2002 : :
2003 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0,
2004 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
2005 : 4 : CU_ASSERT(rc == 0);
2006 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2007 : :
2008 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2009 : 4 : stub_complete_io(1);
2010 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2011 : :
2012 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2013 : 4 : stub_complete_io(1);
2014 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2015 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2016 : :
2017 : 4 : spdk_put_io_channel(io_ch);
2018 : 4 : spdk_bdev_close(desc);
2019 : 4 : free_bdev(bdev);
2020 : 4 : ut_fini_bdev();
2021 : 4 : }
2022 : :
2023 : : static void
2024 : 4 : bdev_io_max_size_and_segment_split_test(void)
2025 : : {
2026 : : struct spdk_bdev *bdev;
2027 : 4 : struct spdk_bdev_desc *desc = NULL;
2028 : : struct spdk_io_channel *io_ch;
2029 : 4 : struct spdk_bdev_opts bdev_opts = {};
2030 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
2031 : : struct ut_expected_io *expected_io;
2032 : : uint64_t i;
2033 : : int rc;
2034 : :
2035 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2036 : 4 : bdev_opts.bdev_io_pool_size = 512;
2037 : 4 : bdev_opts.bdev_io_cache_size = 64;
2038 : 4 : bdev_opts.opts_size = sizeof(bdev_opts);
2039 : 4 : ut_init_bdev(&bdev_opts);
2040 : :
2041 : 4 : bdev = allocate_bdev("bdev0");
2042 : :
2043 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
2044 : 4 : CU_ASSERT(rc == 0);
2045 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
2046 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
2047 : 4 : CU_ASSERT(io_ch != NULL);
2048 : :
2049 : 4 : bdev->split_on_optimal_io_boundary = false;
2050 : 4 : bdev->optimal_io_boundary = 0;
2051 : :
2052 : : /* Case 0 max_num_segments == 0.
2053 : : * but segment size 2 * 512 > 512
2054 : : */
2055 : 4 : bdev->max_segment_size = 512;
2056 : 4 : bdev->max_num_segments = 0;
2057 : 4 : g_io_done = false;
2058 : :
2059 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2);
2060 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2061 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512);
2062 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2063 : :
2064 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL);
2065 : 4 : CU_ASSERT(rc == 0);
2066 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2067 : :
2068 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2069 : 4 : stub_complete_io(1);
2070 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2071 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2072 : :
2073 : : /* Case 1 max_segment_size == 0
2074 : : * but iov num 2 > 1.
2075 : : */
2076 : 4 : bdev->max_segment_size = 0;
2077 : 4 : bdev->max_num_segments = 1;
2078 : 4 : g_io_done = false;
2079 : :
2080 : 4 : iov[0].iov_base = (void *)0x10000;
2081 : 4 : iov[0].iov_len = 512;
2082 : 4 : iov[1].iov_base = (void *)0x20000;
2083 : 4 : iov[1].iov_len = 8 * 512;
2084 : :
2085 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1);
2086 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len);
2087 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2088 : :
2089 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1);
2090 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len);
2091 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2092 : :
2093 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL);
2094 : 4 : CU_ASSERT(rc == 0);
2095 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2096 : :
2097 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2098 : 4 : stub_complete_io(2);
2099 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2100 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2101 : :
2102 : : /* Test that a non-vector command is split correctly.
2103 : : * Set up the expected values before calling spdk_bdev_read_blocks
2104 : : */
2105 : 4 : bdev->max_segment_size = 512;
2106 : 4 : bdev->max_num_segments = 1;
2107 : 4 : g_io_done = false;
2108 : :
2109 : : /* Child IO 0 */
2110 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1);
2111 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2112 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2113 : :
2114 : : /* Child IO 1 */
2115 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1);
2116 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512);
2117 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2118 : :
2119 : : /* spdk_bdev_read_blocks will submit the first child immediately. */
2120 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL);
2121 : 4 : CU_ASSERT(rc == 0);
2122 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2123 : :
2124 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2125 : 4 : stub_complete_io(2);
2126 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2127 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2128 : :
2129 : : /* Now set up a more complex, multi-vector command that needs to be split,
2130 : : * including splitting iovecs.
2131 : : */
2132 : 4 : bdev->max_segment_size = 2 * 512;
2133 : 4 : bdev->max_num_segments = 1;
2134 : 4 : g_io_done = false;
2135 : :
2136 : 4 : iov[0].iov_base = (void *)0x10000;
2137 : 4 : iov[0].iov_len = 2 * 512;
2138 : 4 : iov[1].iov_base = (void *)0x20000;
2139 : 4 : iov[1].iov_len = 4 * 512;
2140 : 4 : iov[2].iov_base = (void *)0x30000;
2141 : 4 : iov[2].iov_len = 6 * 512;
2142 : :
2143 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
2144 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2);
2145 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2146 : :
2147 : : /* Split iov[1].size to 2 iov entries then split the segments */
2148 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1);
2149 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2);
2150 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2151 : :
2152 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1);
2153 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2);
2154 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2155 : :
2156 : : /* Split iov[2].size to 3 iov entries then split the segments */
2157 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1);
2158 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2);
2159 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2160 : :
2161 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1);
2162 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2);
2163 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2164 : :
2165 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1);
2166 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2);
2167 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2168 : :
2169 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL);
2170 : 4 : CU_ASSERT(rc == 0);
2171 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2172 : :
2173 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
2174 : 4 : stub_complete_io(6);
2175 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2176 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2177 : :
2178 : : /* Test multi vector command that needs to be split by strip and then needs to be
2179 : : * split further due to the capacity of parent IO child iovs.
2180 : : */
2181 : 4 : bdev->max_segment_size = 512;
2182 : 4 : bdev->max_num_segments = 1;
2183 : 4 : g_io_done = false;
2184 : :
2185 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2186 : 128 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2187 : 128 : iov[i].iov_len = 512 * 2;
2188 : : }
2189 : :
2190 : : /* Each input iov.size is split into 2 iovs,
2191 : : * half of the input iov can fill all child iov entries of a single IO.
2192 : : */
2193 [ + + ]: 68 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) {
2194 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1);
2195 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512);
2196 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2197 : :
2198 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1);
2199 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512);
2200 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2201 : : }
2202 : :
2203 : : /* The remaining iov is split in the second round */
2204 [ + + ]: 68 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2205 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1);
2206 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512);
2207 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2208 : :
2209 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1);
2210 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512);
2211 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2212 : : }
2213 : :
2214 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0,
2215 : : SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
2216 : 4 : CU_ASSERT(rc == 0);
2217 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2218 : :
2219 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2220 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2221 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2222 : :
2223 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2224 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2225 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2226 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2227 : :
2228 : : /* A wrong case, a child IO that is divided does
2229 : : * not meet the principle of multiples of block size,
2230 : : * and exits with error
2231 : : */
2232 : 4 : bdev->max_segment_size = 512;
2233 : 4 : bdev->max_num_segments = 1;
2234 : 4 : g_io_done = false;
2235 : :
2236 : 4 : iov[0].iov_base = (void *)0x10000;
2237 : 4 : iov[0].iov_len = 512 + 256;
2238 : 4 : iov[1].iov_base = (void *)0x20000;
2239 : 4 : iov[1].iov_len = 256;
2240 : :
2241 : : /* iov[0] is split to 512 and 256.
2242 : : * 256 is less than a block size, and it is found
2243 : : * in the next round of split that it is the first child IO smaller than
2244 : : * the block size, so the error exit
2245 : : */
2246 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1);
2247 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512);
2248 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2249 : :
2250 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL);
2251 : 4 : CU_ASSERT(rc == 0);
2252 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2253 : :
2254 : : /* First child IO is OK */
2255 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2256 : 4 : stub_complete_io(1);
2257 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2258 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2259 : :
2260 : : /* error exit */
2261 : 4 : stub_complete_io(1);
2262 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2263 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
2264 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2265 : :
2266 : : /* Test multi vector command that needs to be split by strip and then needs to be
2267 : : * split further due to the capacity of child iovs.
2268 : : *
2269 : : * In this case, the last two iovs need to be split, but it will exceed the capacity
2270 : : * of child iovs, so it needs to wait until the first batch completed.
2271 : : */
2272 : 4 : bdev->max_segment_size = 512;
2273 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2274 : 4 : g_io_done = false;
2275 : :
2276 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2277 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2278 : 120 : iov[i].iov_len = 512;
2279 : : }
2280 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2281 : 8 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2282 : 8 : iov[i].iov_len = 512 * 2;
2283 : : }
2284 : :
2285 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
2286 : : SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV);
2287 : : /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */
2288 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2289 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2290 : : }
2291 : : /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */
2292 : 4 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512);
2293 : 4 : ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512);
2294 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2295 : :
2296 : : /* Child iov entries exceed the max num of parent IO so split it in next round */
2297 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2);
2298 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512);
2299 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512);
2300 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2301 : :
2302 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0,
2303 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL);
2304 : 4 : CU_ASSERT(rc == 0);
2305 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2306 : :
2307 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2308 : 4 : stub_complete_io(1);
2309 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2310 : :
2311 : : /* Next round */
2312 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2313 : 4 : stub_complete_io(1);
2314 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2315 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2316 : :
2317 : : /* This case is similar to the previous one, but the io composed of
2318 : : * the last few entries of child iov is not enough for a blocklen, so they
2319 : : * cannot be put into this IO, but wait until the next time.
2320 : : */
2321 : 4 : bdev->max_segment_size = 512;
2322 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2323 : 4 : g_io_done = false;
2324 : :
2325 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2326 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2327 : 120 : iov[i].iov_len = 512;
2328 : : }
2329 : :
2330 [ + + ]: 20 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2331 : 16 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2332 : 16 : iov[i].iov_len = 128;
2333 : : }
2334 : :
2335 : : /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2.
2336 : : * Because the left 2 iov is not enough for a blocklen.
2337 : : */
2338 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
2339 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2);
2340 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2341 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2342 : : }
2343 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2344 : :
2345 : : /* The second child io waits until the end of the first child io before executing.
2346 : : * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO.
2347 : : * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2
2348 : : */
2349 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2,
2350 : : 1, 4);
2351 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len);
2352 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len);
2353 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len);
2354 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len);
2355 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2356 : :
2357 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0,
2358 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL);
2359 : 4 : CU_ASSERT(rc == 0);
2360 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2361 : :
2362 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2363 : 4 : stub_complete_io(1);
2364 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2365 : :
2366 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2367 : 4 : stub_complete_io(1);
2368 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2369 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2370 : :
2371 : : /* A very complicated case. Each sg entry exceeds max_segment_size and
2372 : : * needs to be split. At the same time, child io must be a multiple of blocklen.
2373 : : * At the same time, child iovcnt exceeds parent iovcnt.
2374 : : */
2375 : 4 : bdev->max_segment_size = 512 + 128;
2376 : 4 : bdev->max_num_segments = 3;
2377 : 4 : g_io_done = false;
2378 : :
2379 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2380 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2381 : 120 : iov[i].iov_len = 512 + 256;
2382 : : }
2383 : :
2384 [ + + ]: 20 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2385 : 16 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2386 : 16 : iov[i].iov_len = 512 + 128;
2387 : : }
2388 : :
2389 : : /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries.
2390 : : * Consume 4 parent IO iov entries per for() round and 6 block size.
2391 : : * Generate 9 child IOs.
2392 : : */
2393 [ + + ]: 16 : for (i = 0; i < 3; i++) {
2394 : 12 : uint32_t j = i * 4;
2395 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3);
2396 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640);
2397 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128);
2398 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256);
2399 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2400 : :
2401 : : /* Child io must be a multiple of blocklen
2402 : : * iov[j + 2] must be split. If the third entry is also added,
2403 : : * the multiple of blocklen cannot be guaranteed. But it still
2404 : : * occupies one iov entry of the parent child iov.
2405 : : */
2406 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2);
2407 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512);
2408 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512);
2409 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2410 : :
2411 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3);
2412 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256);
2413 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640);
2414 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128);
2415 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2416 : : }
2417 : :
2418 : : /* Child iov position at 27, the 10th child IO
2419 : : * iov entry index is 3 * 4 and offset is 3 * 6
2420 : : */
2421 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3);
2422 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640);
2423 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128);
2424 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256);
2425 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2426 : :
2427 : : /* Child iov position at 30, the 11th child IO */
2428 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2);
2429 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512);
2430 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512);
2431 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2432 : :
2433 : : /* The 2nd split round and iovpos is 0, the 12th child IO */
2434 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3);
2435 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256);
2436 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640);
2437 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128);
2438 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2439 : :
2440 : : /* Consume 9 child IOs and 27 child iov entries.
2441 : : * Consume 4 parent IO iov entries per for() round and 6 block size.
2442 : : * Parent IO iov index start from 16 and block offset start from 24
2443 : : */
2444 [ + + ]: 16 : for (i = 0; i < 3; i++) {
2445 : 12 : uint32_t j = i * 4 + 16;
2446 : 12 : uint32_t offset = i * 6 + 24;
2447 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3);
2448 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640);
2449 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128);
2450 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256);
2451 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2452 : :
2453 : : /* Child io must be a multiple of blocklen
2454 : : * iov[j + 2] must be split. If the third entry is also added,
2455 : : * the multiple of blocklen cannot be guaranteed. But it still
2456 : : * occupies one iov entry of the parent child iov.
2457 : : */
2458 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2);
2459 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512);
2460 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512);
2461 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2462 : :
2463 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3);
2464 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256);
2465 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640);
2466 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128);
2467 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2468 : : }
2469 : :
2470 : : /* The 22th child IO, child iov position at 30 */
2471 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1);
2472 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512);
2473 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2474 : :
2475 : : /* The third round */
2476 : : /* Here is the 23nd child IO and child iovpos is 0 */
2477 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3);
2478 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256);
2479 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640);
2480 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128);
2481 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2482 : :
2483 : : /* The 24th child IO */
2484 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3);
2485 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640);
2486 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640);
2487 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256);
2488 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2489 : :
2490 : : /* The 25th child IO */
2491 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2);
2492 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384);
2493 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640);
2494 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2495 : :
2496 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0,
2497 : : 50, io_done, NULL);
2498 : 4 : CU_ASSERT(rc == 0);
2499 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2500 : :
2501 : : /* Parent IO supports up to 32 child iovs, so it is calculated that
2502 : : * a maximum of 11 IOs can be split at a time, and the
2503 : : * splitting will continue after the first batch is over.
2504 : : */
2505 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11);
2506 : 4 : stub_complete_io(11);
2507 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2508 : :
2509 : : /* The 2nd round */
2510 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11);
2511 : 4 : stub_complete_io(11);
2512 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2513 : :
2514 : : /* The last round */
2515 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2516 : 4 : stub_complete_io(3);
2517 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2518 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2519 : :
2520 : : /* Test an WRITE_ZEROES. This should also not be split. */
2521 : 4 : bdev->max_segment_size = 512;
2522 : 4 : bdev->max_num_segments = 1;
2523 : 4 : g_io_done = false;
2524 : :
2525 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
2526 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2527 : :
2528 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
2529 : 4 : CU_ASSERT(rc == 0);
2530 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2531 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2532 : 4 : stub_complete_io(1);
2533 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2534 : :
2535 : : /* Test an UNMAP. This should also not be split. */
2536 : 4 : g_io_done = false;
2537 : :
2538 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0);
2539 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2540 : :
2541 : 4 : rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL);
2542 : 4 : CU_ASSERT(rc == 0);
2543 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2544 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2545 : 4 : stub_complete_io(1);
2546 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2547 : :
2548 : : /* Test a FLUSH. This should also not be split. */
2549 : 4 : g_io_done = false;
2550 : :
2551 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0);
2552 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2553 : :
2554 : 4 : rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
2555 : 4 : CU_ASSERT(rc == 0);
2556 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2557 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2558 : 4 : stub_complete_io(1);
2559 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2560 : :
2561 : : /* Test a COPY. This should also not be split. */
2562 : 4 : g_io_done = false;
2563 : :
2564 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36);
2565 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2566 : :
2567 : 4 : rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL);
2568 : 4 : CU_ASSERT(rc == 0);
2569 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2570 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2571 : 4 : stub_complete_io(1);
2572 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2573 : :
2574 : : /* Test that IOs are split on max_rw_size */
2575 : 4 : bdev->max_rw_size = 2;
2576 : 4 : bdev->max_segment_size = 0;
2577 : 4 : bdev->max_num_segments = 0;
2578 : 4 : g_io_done = false;
2579 : :
2580 : : /* 5 blocks in a contiguous buffer */
2581 : 4 : iov[0].iov_base = (void *)0x10000;
2582 : 4 : iov[0].iov_len = 5 * 512;
2583 : :
2584 : : /* First: offset=0, num_blocks=2 */
2585 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1);
2586 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512);
2587 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2588 : : /* Second: offset=2, num_blocks=2 */
2589 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1);
2590 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512);
2591 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2592 : : /* Third: offset=4, num_blocks=1 */
2593 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1);
2594 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512);
2595 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2596 : :
2597 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL);
2598 : 4 : CU_ASSERT(rc == 0);
2599 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2600 : :
2601 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2602 : 4 : stub_complete_io(3);
2603 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2604 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2605 : :
2606 : : /* Check splitting on both max_rw_size + max_num_segments */
2607 : 4 : bdev->max_rw_size = 2;
2608 : 4 : bdev->max_num_segments = 2;
2609 : 4 : bdev->max_segment_size = 0;
2610 : 4 : g_io_done = false;
2611 : :
2612 : : /* 5 blocks split across 4 iovs */
2613 : 4 : iov[0].iov_base = (void *)0x10000;
2614 : 4 : iov[0].iov_len = 3 * 512;
2615 : 4 : iov[1].iov_base = (void *)0x20000;
2616 : 4 : iov[1].iov_len = 256;
2617 : 4 : iov[2].iov_base = (void *)0x30000;
2618 : 4 : iov[2].iov_len = 256;
2619 : 4 : iov[3].iov_base = (void *)0x40000;
2620 : 4 : iov[3].iov_len = 512;
2621 : :
2622 : : /* First: offset=0, num_blocks=2, iovcnt=1 */
2623 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1);
2624 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512);
2625 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2626 : : /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting
2627 : : * the rest of iov[0], and iov[1]+iov[2])
2628 : : */
2629 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1);
2630 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512);
2631 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2632 : : /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */
2633 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2);
2634 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256);
2635 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256);
2636 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2637 : : /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */
2638 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1);
2639 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512);
2640 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2641 : :
2642 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL);
2643 : 4 : CU_ASSERT(rc == 0);
2644 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2645 : :
2646 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
2647 : 4 : stub_complete_io(4);
2648 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2649 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2650 : :
2651 : : /* Check splitting on both max_rw_size + max_segment_size */
2652 : 4 : bdev->max_rw_size = 2;
2653 : 4 : bdev->max_segment_size = 512;
2654 : 4 : bdev->max_num_segments = 0;
2655 : 4 : g_io_done = false;
2656 : :
2657 : : /* 6 blocks in a contiguous buffer */
2658 : 4 : iov[0].iov_base = (void *)0x10000;
2659 : 4 : iov[0].iov_len = 6 * 512;
2660 : :
2661 : : /* We expect 3 IOs each with 2 blocks and 2 iovs */
2662 [ + + ]: 16 : for (i = 0; i < 3; ++i) {
2663 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2);
2664 : 12 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512);
2665 : 12 : ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512);
2666 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2667 : : }
2668 : :
2669 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL);
2670 : 4 : CU_ASSERT(rc == 0);
2671 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2672 : :
2673 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2674 : 4 : stub_complete_io(3);
2675 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2676 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2677 : :
2678 : : /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */
2679 : 4 : bdev->max_rw_size = 1;
2680 : 4 : bdev->max_segment_size = 0;
2681 : 4 : bdev->max_num_segments = 0;
2682 : 4 : g_io_done = false;
2683 : :
2684 : : /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */
2685 : 4 : iov[0].iov_base = (void *)0x10000;
2686 : 4 : iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512;
2687 : :
2688 : : /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */
2689 [ + + ]: 16 : for (i = 0; i < 3; ++i) {
2690 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1);
2691 : 12 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512);
2692 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2693 : : }
2694 : :
2695 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
2696 : 4 : CU_ASSERT(rc == 0);
2697 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2698 : :
2699 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2700 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2701 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2702 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2703 : 4 : stub_complete_io(1);
2704 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2705 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2706 : :
2707 : 4 : spdk_put_io_channel(io_ch);
2708 : 4 : spdk_bdev_close(desc);
2709 : 4 : free_bdev(bdev);
2710 : 4 : ut_fini_bdev();
2711 : 4 : }
2712 : :
2713 : : static void
2714 : 4 : bdev_io_mix_split_test(void)
2715 : : {
2716 : : struct spdk_bdev *bdev;
2717 : 4 : struct spdk_bdev_desc *desc = NULL;
2718 : : struct spdk_io_channel *io_ch;
2719 : 4 : struct spdk_bdev_opts bdev_opts = {};
2720 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
2721 : : struct ut_expected_io *expected_io;
2722 : : uint64_t i;
2723 : : int rc;
2724 : :
2725 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2726 : 4 : bdev_opts.bdev_io_pool_size = 512;
2727 : 4 : bdev_opts.bdev_io_cache_size = 64;
2728 : 4 : ut_init_bdev(&bdev_opts);
2729 : :
2730 : 4 : bdev = allocate_bdev("bdev0");
2731 : :
2732 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
2733 : 4 : CU_ASSERT(rc == 0);
2734 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
2735 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
2736 : 4 : CU_ASSERT(io_ch != NULL);
2737 : :
2738 : : /* First case optimal_io_boundary == max_segment_size * max_num_segments */
2739 : 4 : bdev->split_on_optimal_io_boundary = true;
2740 : 4 : bdev->optimal_io_boundary = 16;
2741 : :
2742 : 4 : bdev->max_segment_size = 512;
2743 : 4 : bdev->max_num_segments = 16;
2744 : 4 : g_io_done = false;
2745 : :
2746 : : /* IO crossing the IO boundary requires split
2747 : : * Total 2 child IOs.
2748 : : */
2749 : :
2750 : : /* The 1st child IO split the segment_size to multiple segment entry */
2751 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2);
2752 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2753 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512);
2754 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2755 : :
2756 : : /* The 2nd child IO split the segment_size to multiple segment entry */
2757 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2);
2758 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512);
2759 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512);
2760 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2761 : :
2762 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL);
2763 : 4 : CU_ASSERT(rc == 0);
2764 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2765 : :
2766 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2767 : 4 : stub_complete_io(2);
2768 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2769 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2770 : :
2771 : : /* Second case optimal_io_boundary > max_segment_size * max_num_segments */
2772 : 4 : bdev->max_segment_size = 15 * 512;
2773 : 4 : bdev->max_num_segments = 1;
2774 : 4 : g_io_done = false;
2775 : :
2776 : : /* IO crossing the IO boundary requires split.
2777 : : * The 1st child IO segment size exceeds the max_segment_size,
2778 : : * So 1st child IO will be split to multiple segment entry.
2779 : : * Then it split to 2 child IOs because of the max_num_segments.
2780 : : * Total 3 child IOs.
2781 : : */
2782 : :
2783 : : /* The first 2 IOs are in an IO boundary.
2784 : : * Because the optimal_io_boundary > max_segment_size * max_num_segments
2785 : : * So it split to the first 2 IOs.
2786 : : */
2787 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1);
2788 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15);
2789 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2790 : :
2791 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1);
2792 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512);
2793 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2794 : :
2795 : : /* The 3rd Child IO is because of the io boundary */
2796 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1);
2797 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2);
2798 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2799 : :
2800 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL);
2801 : 4 : CU_ASSERT(rc == 0);
2802 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2803 : :
2804 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2805 : 4 : stub_complete_io(3);
2806 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2807 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2808 : :
2809 : : /* Third case optimal_io_boundary < max_segment_size * max_num_segments */
2810 : 4 : bdev->max_segment_size = 17 * 512;
2811 : 4 : bdev->max_num_segments = 1;
2812 : 4 : g_io_done = false;
2813 : :
2814 : : /* IO crossing the IO boundary requires split.
2815 : : * Child IO does not split.
2816 : : * Total 2 child IOs.
2817 : : */
2818 : :
2819 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1);
2820 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16);
2821 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2822 : :
2823 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1);
2824 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2);
2825 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2826 : :
2827 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL);
2828 : 4 : CU_ASSERT(rc == 0);
2829 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2830 : :
2831 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2832 : 4 : stub_complete_io(2);
2833 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2834 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2835 : :
2836 : : /* Now set up a more complex, multi-vector command that needs to be split,
2837 : : * including splitting iovecs.
2838 : : * optimal_io_boundary < max_segment_size * max_num_segments
2839 : : */
2840 : 4 : bdev->max_segment_size = 3 * 512;
2841 : 4 : bdev->max_num_segments = 6;
2842 : 4 : g_io_done = false;
2843 : :
2844 : 4 : iov[0].iov_base = (void *)0x10000;
2845 : 4 : iov[0].iov_len = 4 * 512;
2846 : 4 : iov[1].iov_base = (void *)0x20000;
2847 : 4 : iov[1].iov_len = 4 * 512;
2848 : 4 : iov[2].iov_base = (void *)0x30000;
2849 : 4 : iov[2].iov_len = 10 * 512;
2850 : :
2851 : : /* IO crossing the IO boundary requires split.
2852 : : * The 1st child IO segment size exceeds the max_segment_size and after
2853 : : * splitting segment_size, the num_segments exceeds max_num_segments.
2854 : : * So 1st child IO will be split to 2 child IOs.
2855 : : * Total 3 child IOs.
2856 : : */
2857 : :
2858 : : /* The first 2 IOs are in an IO boundary.
2859 : : * After splitting segment size the segment num exceeds.
2860 : : * So it splits to 2 child IOs.
2861 : : */
2862 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6);
2863 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3);
2864 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512);
2865 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3);
2866 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512);
2867 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3);
2868 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3);
2869 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2870 : :
2871 : : /* The 2nd child IO has the left segment entry */
2872 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
2873 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2);
2874 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2875 : :
2876 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1);
2877 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2);
2878 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2879 : :
2880 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL);
2881 : 4 : CU_ASSERT(rc == 0);
2882 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2883 : :
2884 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2885 : 4 : stub_complete_io(3);
2886 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2887 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2888 : :
2889 : : /* A very complicated case. Each sg entry exceeds max_segment_size
2890 : : * and split on io boundary.
2891 : : * optimal_io_boundary < max_segment_size * max_num_segments
2892 : : */
2893 : 4 : bdev->max_segment_size = 3 * 512;
2894 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2895 : 4 : g_io_done = false;
2896 : :
2897 [ + + ]: 84 : for (i = 0; i < 20; i++) {
2898 : 80 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2899 : 80 : iov[i].iov_len = 512 * 4;
2900 : : }
2901 : :
2902 : : /* IO crossing the IO boundary requires split.
2903 : : * 80 block length can split 5 child IOs base on offset and IO boundary.
2904 : : * Each iov entry needs to be split to 2 entries because of max_segment_size
2905 : : * Total 5 child IOs.
2906 : : */
2907 : :
2908 : : /* 4 iov entries are in an IO boundary and each iov entry splits to 2.
2909 : : * So each child IO occupies 8 child iov entries.
2910 : : */
2911 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8);
2912 [ + + ]: 20 : for (i = 0; i < 4; i++) {
2913 : 16 : int iovcnt = i * 2;
2914 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2915 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2916 : : }
2917 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2918 : :
2919 : : /* 2nd child IO and total 16 child iov entries of parent IO */
2920 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8);
2921 [ + + ]: 20 : for (i = 4; i < 8; i++) {
2922 : 16 : int iovcnt = (i - 4) * 2;
2923 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2924 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2925 : : }
2926 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2927 : :
2928 : : /* 3rd child IO and total 24 child iov entries of parent IO */
2929 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8);
2930 [ + + ]: 20 : for (i = 8; i < 12; i++) {
2931 : 16 : int iovcnt = (i - 8) * 2;
2932 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2933 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2934 : : }
2935 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2936 : :
2937 : : /* 4th child IO and total 32 child iov entries of parent IO */
2938 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8);
2939 [ + + ]: 20 : for (i = 12; i < 16; i++) {
2940 : 16 : int iovcnt = (i - 12) * 2;
2941 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2942 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2943 : : }
2944 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2945 : :
2946 : : /* 5th child IO and because of the child iov entry it should be split
2947 : : * in next round.
2948 : : */
2949 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8);
2950 [ + + ]: 20 : for (i = 16; i < 20; i++) {
2951 : 16 : int iovcnt = (i - 16) * 2;
2952 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2953 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2954 : : }
2955 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2956 : :
2957 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL);
2958 : 4 : CU_ASSERT(rc == 0);
2959 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2960 : :
2961 : : /* First split round */
2962 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
2963 : 4 : stub_complete_io(4);
2964 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2965 : :
2966 : : /* Second split round */
2967 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2968 : 4 : stub_complete_io(1);
2969 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2970 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2971 : :
2972 : 4 : spdk_put_io_channel(io_ch);
2973 : 4 : spdk_bdev_close(desc);
2974 : 4 : free_bdev(bdev);
2975 : 4 : ut_fini_bdev();
2976 : 4 : }
2977 : :
2978 : : static void
2979 : 4 : bdev_io_split_with_io_wait(void)
2980 : : {
2981 : : struct spdk_bdev *bdev;
2982 : 4 : struct spdk_bdev_desc *desc = NULL;
2983 : : struct spdk_io_channel *io_ch;
2984 : : struct spdk_bdev_channel *channel;
2985 : : struct spdk_bdev_mgmt_channel *mgmt_ch;
2986 : 4 : struct spdk_bdev_opts bdev_opts = {};
2987 : 4 : struct iovec iov[3];
2988 : : struct ut_expected_io *expected_io;
2989 : : int rc;
2990 : :
2991 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2992 : 4 : bdev_opts.bdev_io_pool_size = 2;
2993 : 4 : bdev_opts.bdev_io_cache_size = 1;
2994 : 4 : ut_init_bdev(&bdev_opts);
2995 : :
2996 : 4 : bdev = allocate_bdev("bdev0");
2997 : :
2998 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
2999 : 4 : CU_ASSERT(rc == 0);
3000 : 4 : CU_ASSERT(desc != NULL);
3001 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3002 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3003 : 4 : CU_ASSERT(io_ch != NULL);
3004 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
3005 : 4 : mgmt_ch = channel->shared_resource->mgmt_ch;
3006 : :
3007 : 4 : bdev->optimal_io_boundary = 16;
3008 : 4 : bdev->split_on_optimal_io_boundary = true;
3009 : :
3010 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
3011 : 4 : CU_ASSERT(rc == 0);
3012 : :
3013 : : /* Now test that a single-vector command is split correctly.
3014 : : * Offset 14, length 8, payload 0xF000
3015 : : * Child - Offset 14, length 2, payload 0xF000
3016 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
3017 : : *
3018 : : * Set up the expected values before calling spdk_bdev_read_blocks
3019 : : */
3020 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
3021 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
3022 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3023 : :
3024 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
3025 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
3026 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3027 : :
3028 : : /* The following children will be submitted sequentially due to the capacity of
3029 : : * spdk_bdev_io.
3030 : : */
3031 : :
3032 : : /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
3033 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
3034 : 4 : CU_ASSERT(rc == 0);
3035 : 4 : CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3036 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3037 : :
3038 : : /* Completing the first read I/O will submit the first child */
3039 : 4 : stub_complete_io(1);
3040 : 4 : CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3041 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3042 : :
3043 : : /* Completing the first child will submit the second child */
3044 : 4 : stub_complete_io(1);
3045 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3046 : :
3047 : : /* Complete the second child I/O. This should result in our callback getting
3048 : : * invoked since the parent I/O is now complete.
3049 : : */
3050 : 4 : stub_complete_io(1);
3051 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3052 : :
3053 : : /* Now set up a more complex, multi-vector command that needs to be split,
3054 : : * including splitting iovecs.
3055 : : */
3056 : 4 : iov[0].iov_base = (void *)0x10000;
3057 : 4 : iov[0].iov_len = 512;
3058 : 4 : iov[1].iov_base = (void *)0x20000;
3059 : 4 : iov[1].iov_len = 20 * 512;
3060 : 4 : iov[2].iov_base = (void *)0x30000;
3061 : 4 : iov[2].iov_len = 11 * 512;
3062 : :
3063 : 4 : g_io_done = false;
3064 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
3065 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
3066 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
3067 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3068 : :
3069 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
3070 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
3071 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3072 : :
3073 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
3074 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
3075 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
3076 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3077 : :
3078 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
3079 : 4 : CU_ASSERT(rc == 0);
3080 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3081 : :
3082 : : /* The following children will be submitted sequentially due to the capacity of
3083 : : * spdk_bdev_io.
3084 : : */
3085 : :
3086 : : /* Completing the first child will submit the second child */
3087 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3088 : 4 : stub_complete_io(1);
3089 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3090 : :
3091 : : /* Completing the second child will submit the third child */
3092 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3093 : 4 : stub_complete_io(1);
3094 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3095 : :
3096 : : /* Completing the third child will result in our callback getting invoked
3097 : : * since the parent I/O is now complete.
3098 : : */
3099 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3100 : 4 : stub_complete_io(1);
3101 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3102 : :
3103 : 4 : CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
3104 : :
3105 : 4 : spdk_put_io_channel(io_ch);
3106 : 4 : spdk_bdev_close(desc);
3107 : 4 : free_bdev(bdev);
3108 : 4 : ut_fini_bdev();
3109 : 4 : }
3110 : :
3111 : : static void
3112 : 4 : bdev_io_write_unit_split_test(void)
3113 : : {
3114 : : struct spdk_bdev *bdev;
3115 : 4 : struct spdk_bdev_desc *desc = NULL;
3116 : : struct spdk_io_channel *io_ch;
3117 : 4 : struct spdk_bdev_opts bdev_opts = {};
3118 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4];
3119 : : struct ut_expected_io *expected_io;
3120 : : uint64_t i;
3121 : : int rc;
3122 : :
3123 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3124 : 4 : bdev_opts.bdev_io_pool_size = 512;
3125 : 4 : bdev_opts.bdev_io_cache_size = 64;
3126 : 4 : ut_init_bdev(&bdev_opts);
3127 : :
3128 : 4 : bdev = allocate_bdev("bdev0");
3129 : :
3130 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
3131 : 4 : CU_ASSERT(rc == 0);
3132 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3133 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3134 : 4 : CU_ASSERT(io_ch != NULL);
3135 : :
3136 : : /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */
3137 : 4 : bdev->write_unit_size = 32;
3138 : 4 : bdev->split_on_write_unit = true;
3139 : 4 : g_io_done = false;
3140 : :
3141 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1);
3142 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512);
3143 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3144 : :
3145 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1);
3146 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512);
3147 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3148 : :
3149 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL);
3150 : 4 : CU_ASSERT(rc == 0);
3151 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3152 : :
3153 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3154 : 4 : stub_complete_io(2);
3155 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3156 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3157 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3158 : :
3159 : : /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split
3160 : : * based on write_unit_size, not optimal_io_boundary */
3161 : 4 : bdev->split_on_optimal_io_boundary = true;
3162 : 4 : bdev->optimal_io_boundary = 16;
3163 : 4 : g_io_done = false;
3164 : :
3165 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL);
3166 : 4 : CU_ASSERT(rc == 0);
3167 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3168 : :
3169 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3170 : 4 : stub_complete_io(2);
3171 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3172 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3173 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3174 : :
3175 : : /* Write I/O should fail if it is smaller than write_unit_size */
3176 : 4 : g_io_done = false;
3177 : :
3178 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL);
3179 : 4 : CU_ASSERT(rc == 0);
3180 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3181 : :
3182 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3183 : 4 : poll_threads();
3184 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3185 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3186 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3187 : :
3188 : : /* Same for I/O not aligned to write_unit_size */
3189 : 4 : g_io_done = false;
3190 : :
3191 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL);
3192 : 4 : CU_ASSERT(rc == 0);
3193 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3194 : :
3195 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3196 : 4 : poll_threads();
3197 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3198 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3199 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3200 : :
3201 : : /* Write should fail if it needs to be split but there are not enough iovs to submit
3202 : : * an entire write unit */
3203 : 4 : bdev->write_unit_size = SPDK_COUNTOF(iov) / 2;
3204 : 4 : g_io_done = false;
3205 : :
3206 [ + + ]: 516 : for (i = 0; i < SPDK_COUNTOF(iov); i++) {
3207 : 512 : iov[i].iov_base = (void *)(0x1000 + 512 * i);
3208 : 512 : iov[i].iov_len = 512;
3209 : : }
3210 : :
3211 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov),
3212 : : io_done, NULL);
3213 : 4 : CU_ASSERT(rc == 0);
3214 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3215 : :
3216 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3217 : 4 : poll_threads();
3218 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3219 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3220 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3221 : :
3222 : 4 : spdk_put_io_channel(io_ch);
3223 : 4 : spdk_bdev_close(desc);
3224 : 4 : free_bdev(bdev);
3225 : 4 : ut_fini_bdev();
3226 : 4 : }
3227 : :
3228 : : static void
3229 : 4 : bdev_io_alignment(void)
3230 : : {
3231 : : struct spdk_bdev *bdev;
3232 : 4 : struct spdk_bdev_desc *desc = NULL;
3233 : : struct spdk_io_channel *io_ch;
3234 : 4 : struct spdk_bdev_opts bdev_opts = {};
3235 : : int rc;
3236 : 4 : void *buf = NULL;
3237 : 4 : struct iovec iovs[2];
3238 : : int iovcnt;
3239 : : uint64_t alignment;
3240 : :
3241 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3242 : 4 : bdev_opts.bdev_io_pool_size = 20;
3243 : 4 : bdev_opts.bdev_io_cache_size = 2;
3244 : 4 : ut_init_bdev(&bdev_opts);
3245 : :
3246 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
3247 : 4 : bdev = allocate_bdev("bdev0");
3248 : :
3249 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3250 : 4 : CU_ASSERT(rc == 0);
3251 : 4 : CU_ASSERT(desc != NULL);
3252 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3253 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3254 : 4 : CU_ASSERT(io_ch != NULL);
3255 : :
3256 : : /* Create aligned buffer */
3257 : 4 : rc = posix_memalign(&buf, 4096, 8192);
3258 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(rc == 0);
3259 : :
3260 : : /* Pass aligned single buffer with no alignment required */
3261 : 4 : alignment = 1;
3262 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3263 : :
3264 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
3265 : 4 : CU_ASSERT(rc == 0);
3266 : 4 : stub_complete_io(1);
3267 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3268 : : alignment));
3269 : :
3270 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
3271 : 4 : CU_ASSERT(rc == 0);
3272 : 4 : stub_complete_io(1);
3273 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3274 : : alignment));
3275 : :
3276 : : /* Pass unaligned single buffer with no alignment required */
3277 : 4 : alignment = 1;
3278 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3279 : :
3280 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3281 : 4 : CU_ASSERT(rc == 0);
3282 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3283 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
3284 : 4 : stub_complete_io(1);
3285 : :
3286 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3287 : 4 : CU_ASSERT(rc == 0);
3288 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3289 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
3290 : 4 : stub_complete_io(1);
3291 : :
3292 : : /* Pass unaligned single buffer with 512 alignment required */
3293 : 4 : alignment = 512;
3294 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3295 : :
3296 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3297 : 4 : CU_ASSERT(rc == 0);
3298 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
3299 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3300 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3301 : : alignment));
3302 : 4 : stub_complete_io(1);
3303 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3304 : :
3305 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3306 : 4 : CU_ASSERT(rc == 0);
3307 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
3308 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3309 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3310 : : alignment));
3311 : 4 : stub_complete_io(1);
3312 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3313 : :
3314 : : /* Pass unaligned single buffer with 4096 alignment required */
3315 : 4 : alignment = 4096;
3316 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3317 : :
3318 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
3319 : 4 : CU_ASSERT(rc == 0);
3320 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
3321 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3322 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3323 : : alignment));
3324 : 4 : stub_complete_io(1);
3325 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3326 : :
3327 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
3328 : 4 : CU_ASSERT(rc == 0);
3329 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
3330 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3331 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3332 : : alignment));
3333 : 4 : stub_complete_io(1);
3334 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3335 : :
3336 : : /* Pass aligned iovs with no alignment required */
3337 : 4 : alignment = 1;
3338 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3339 : :
3340 : 4 : iovcnt = 1;
3341 : 4 : iovs[0].iov_base = buf;
3342 : 4 : iovs[0].iov_len = 512;
3343 : :
3344 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3345 : 4 : CU_ASSERT(rc == 0);
3346 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3347 : 4 : stub_complete_io(1);
3348 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3349 : :
3350 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3351 : 4 : CU_ASSERT(rc == 0);
3352 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3353 : 4 : stub_complete_io(1);
3354 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3355 : :
3356 : : /* Pass unaligned iovs with no alignment required */
3357 : 4 : alignment = 1;
3358 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3359 : :
3360 : 4 : iovcnt = 2;
3361 : 4 : iovs[0].iov_base = buf + 16;
3362 : 4 : iovs[0].iov_len = 256;
3363 : 4 : iovs[1].iov_base = buf + 16 + 256 + 32;
3364 : 4 : iovs[1].iov_len = 256;
3365 : :
3366 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3367 : 4 : CU_ASSERT(rc == 0);
3368 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3369 : 4 : stub_complete_io(1);
3370 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3371 : :
3372 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3373 : 4 : CU_ASSERT(rc == 0);
3374 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3375 : 4 : stub_complete_io(1);
3376 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3377 : :
3378 : : /* Pass unaligned iov with 2048 alignment required */
3379 : 4 : alignment = 2048;
3380 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3381 : :
3382 : 4 : iovcnt = 2;
3383 : 4 : iovs[0].iov_base = buf + 16;
3384 : 4 : iovs[0].iov_len = 256;
3385 : 4 : iovs[1].iov_base = buf + 16 + 256 + 32;
3386 : 4 : iovs[1].iov_len = 256;
3387 : :
3388 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3389 : 4 : CU_ASSERT(rc == 0);
3390 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
3391 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3392 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3393 : : alignment));
3394 : 4 : stub_complete_io(1);
3395 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3396 : :
3397 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3398 : 4 : CU_ASSERT(rc == 0);
3399 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
3400 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3401 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3402 : : alignment));
3403 : 4 : stub_complete_io(1);
3404 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3405 : :
3406 : : /* Pass iov without allocated buffer without alignment required */
3407 : 4 : alignment = 1;
3408 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3409 : :
3410 : 4 : iovcnt = 1;
3411 : 4 : iovs[0].iov_base = NULL;
3412 : 4 : iovs[0].iov_len = 0;
3413 : :
3414 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3415 : 4 : CU_ASSERT(rc == 0);
3416 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3417 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3418 : : alignment));
3419 : 4 : stub_complete_io(1);
3420 : :
3421 : : /* Pass iov without allocated buffer with 1024 alignment required */
3422 : 4 : alignment = 1024;
3423 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3424 : :
3425 : 4 : iovcnt = 1;
3426 : 4 : iovs[0].iov_base = NULL;
3427 : 4 : iovs[0].iov_len = 0;
3428 : :
3429 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3430 : 4 : CU_ASSERT(rc == 0);
3431 : 4 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3432 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3433 : : alignment));
3434 : 4 : stub_complete_io(1);
3435 : :
3436 : 4 : spdk_put_io_channel(io_ch);
3437 : 4 : spdk_bdev_close(desc);
3438 : 4 : free_bdev(bdev);
3439 : 4 : fn_table.submit_request = stub_submit_request;
3440 : 4 : ut_fini_bdev();
3441 : :
3442 : 4 : free(buf);
3443 : 4 : }
3444 : :
3445 : : static void
3446 : 4 : bdev_io_alignment_with_boundary(void)
3447 : : {
3448 : : struct spdk_bdev *bdev;
3449 : 4 : struct spdk_bdev_desc *desc = NULL;
3450 : : struct spdk_io_channel *io_ch;
3451 : 4 : struct spdk_bdev_opts bdev_opts = {};
3452 : : int rc;
3453 : 4 : void *buf = NULL;
3454 : 4 : struct iovec iovs[2];
3455 : : int iovcnt;
3456 : : uint64_t alignment;
3457 : :
3458 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3459 : 4 : bdev_opts.bdev_io_pool_size = 20;
3460 : 4 : bdev_opts.bdev_io_cache_size = 2;
3461 : 4 : bdev_opts.opts_size = sizeof(bdev_opts);
3462 : 4 : ut_init_bdev(&bdev_opts);
3463 : :
3464 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
3465 : 4 : bdev = allocate_bdev("bdev0");
3466 : :
3467 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3468 : 4 : CU_ASSERT(rc == 0);
3469 : 4 : CU_ASSERT(desc != NULL);
3470 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3471 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3472 : 4 : CU_ASSERT(io_ch != NULL);
3473 : :
3474 : : /* Create aligned buffer */
3475 : 4 : rc = posix_memalign(&buf, 4096, 131072);
3476 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(rc == 0);
3477 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3478 : :
3479 : : /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
3480 : 4 : alignment = 512;
3481 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3482 : 4 : bdev->optimal_io_boundary = 2;
3483 : 4 : bdev->split_on_optimal_io_boundary = true;
3484 : :
3485 : 4 : iovcnt = 1;
3486 : 4 : iovs[0].iov_base = NULL;
3487 : 4 : iovs[0].iov_len = 512 * 3;
3488 : :
3489 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3490 : 4 : CU_ASSERT(rc == 0);
3491 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3492 : 4 : stub_complete_io(2);
3493 : :
3494 : : /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
3495 : 4 : alignment = 512;
3496 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3497 : 4 : bdev->optimal_io_boundary = 16;
3498 : 4 : bdev->split_on_optimal_io_boundary = true;
3499 : :
3500 : 4 : iovcnt = 1;
3501 : 4 : iovs[0].iov_base = NULL;
3502 : 4 : iovs[0].iov_len = 512 * 16;
3503 : :
3504 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
3505 : 4 : CU_ASSERT(rc == 0);
3506 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3507 : 4 : stub_complete_io(2);
3508 : :
3509 : : /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
3510 : 4 : alignment = 512;
3511 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3512 : 4 : bdev->optimal_io_boundary = 128;
3513 : 4 : bdev->split_on_optimal_io_boundary = true;
3514 : :
3515 : 4 : iovcnt = 1;
3516 : 4 : iovs[0].iov_base = buf + 16;
3517 : 4 : iovs[0].iov_len = 512 * 160;
3518 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
3519 : 4 : CU_ASSERT(rc == 0);
3520 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3521 : 4 : stub_complete_io(2);
3522 : :
3523 : : /* 512 * 3 with 2 IO boundary */
3524 : 4 : alignment = 512;
3525 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3526 : 4 : bdev->optimal_io_boundary = 2;
3527 : 4 : bdev->split_on_optimal_io_boundary = true;
3528 : :
3529 : 4 : iovcnt = 2;
3530 : 4 : iovs[0].iov_base = buf + 16;
3531 : 4 : iovs[0].iov_len = 512;
3532 : 4 : iovs[1].iov_base = buf + 16 + 512 + 32;
3533 : 4 : iovs[1].iov_len = 1024;
3534 : :
3535 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3536 : 4 : CU_ASSERT(rc == 0);
3537 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3538 : 4 : stub_complete_io(2);
3539 : :
3540 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3541 : 4 : CU_ASSERT(rc == 0);
3542 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3543 : 4 : stub_complete_io(2);
3544 : :
3545 : : /* 512 * 64 with 32 IO boundary */
3546 : 4 : bdev->optimal_io_boundary = 32;
3547 : 4 : iovcnt = 2;
3548 : 4 : iovs[0].iov_base = buf + 16;
3549 : 4 : iovs[0].iov_len = 16384;
3550 : 4 : iovs[1].iov_base = buf + 16 + 16384 + 32;
3551 : 4 : iovs[1].iov_len = 16384;
3552 : :
3553 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
3554 : 4 : CU_ASSERT(rc == 0);
3555 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
3556 : 4 : stub_complete_io(3);
3557 : :
3558 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
3559 : 4 : CU_ASSERT(rc == 0);
3560 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
3561 : 4 : stub_complete_io(3);
3562 : :
3563 : : /* 512 * 160 with 32 IO boundary */
3564 : 4 : iovcnt = 1;
3565 : 4 : iovs[0].iov_base = buf + 16;
3566 : 4 : iovs[0].iov_len = 16384 + 65536;
3567 : :
3568 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
3569 : 4 : CU_ASSERT(rc == 0);
3570 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
3571 : 4 : stub_complete_io(6);
3572 : :
3573 : 4 : spdk_put_io_channel(io_ch);
3574 : 4 : spdk_bdev_close(desc);
3575 : 4 : free_bdev(bdev);
3576 : 4 : fn_table.submit_request = stub_submit_request;
3577 : 4 : ut_fini_bdev();
3578 : :
3579 : 4 : free(buf);
3580 : 4 : }
3581 : :
3582 : : static void
3583 : 8 : histogram_status_cb(void *cb_arg, int status)
3584 : : {
3585 : 8 : g_status = status;
3586 : 8 : }
3587 : :
3588 : : static void
3589 : 12 : histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
3590 : : {
3591 : 12 : g_status = status;
3592 : 12 : g_histogram = histogram;
3593 : 12 : }
3594 : :
3595 : : static void
3596 : 89088 : histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
3597 : : uint64_t total, uint64_t so_far)
3598 : : {
3599 : 89088 : g_count += count;
3600 : 89088 : }
3601 : :
3602 : : static void
3603 : 8 : histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
3604 : : {
3605 : 8 : spdk_histogram_data_fn cb_fn = cb_arg;
3606 : :
3607 : 8 : g_status = status;
3608 : :
3609 [ + + ]: 8 : if (status == 0) {
3610 : 4 : spdk_histogram_data_iterate(histogram, cb_fn, NULL);
3611 : : }
3612 : 8 : }
3613 : :
3614 : : static void
3615 : 4 : bdev_histograms(void)
3616 : : {
3617 : : struct spdk_bdev *bdev;
3618 : 4 : struct spdk_bdev_desc *desc = NULL;
3619 : : struct spdk_io_channel *ch;
3620 : : struct spdk_histogram_data *histogram;
3621 : 4 : uint8_t buf[4096];
3622 : : int rc;
3623 : :
3624 : 4 : ut_init_bdev(NULL);
3625 : :
3626 : 4 : bdev = allocate_bdev("bdev");
3627 : :
3628 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3629 : 4 : CU_ASSERT(rc == 0);
3630 : 4 : CU_ASSERT(desc != NULL);
3631 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3632 : :
3633 : 4 : ch = spdk_bdev_get_io_channel(desc);
3634 : 4 : CU_ASSERT(ch != NULL);
3635 : :
3636 : : /* Enable histogram */
3637 : 4 : g_status = -1;
3638 : 4 : spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
3639 : 4 : poll_threads();
3640 : 4 : CU_ASSERT(g_status == 0);
3641 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == true);
3642 : :
3643 : : /* Allocate histogram */
3644 : 4 : histogram = spdk_histogram_data_alloc();
3645 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(histogram != NULL);
3646 : :
3647 : : /* Check if histogram is zeroed */
3648 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3649 : 4 : poll_threads();
3650 : 4 : CU_ASSERT(g_status == 0);
3651 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
3652 : :
3653 : 4 : g_count = 0;
3654 : 4 : spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
3655 : :
3656 : 4 : CU_ASSERT(g_count == 0);
3657 : :
3658 : 4 : rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL);
3659 : 4 : CU_ASSERT(rc == 0);
3660 : :
3661 : 4 : spdk_delay_us(10);
3662 : 4 : stub_complete_io(1);
3663 : 4 : poll_threads();
3664 : :
3665 : 4 : rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL);
3666 : 4 : CU_ASSERT(rc == 0);
3667 : :
3668 : 4 : spdk_delay_us(10);
3669 : 4 : stub_complete_io(1);
3670 : 4 : poll_threads();
3671 : :
3672 : : /* Check if histogram gathered data from all I/O channels */
3673 : 4 : g_histogram = NULL;
3674 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3675 : 4 : poll_threads();
3676 : 4 : CU_ASSERT(g_status == 0);
3677 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == true);
3678 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
3679 : :
3680 : 4 : g_count = 0;
3681 : 4 : spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
3682 : 4 : CU_ASSERT(g_count == 2);
3683 : :
3684 : 4 : g_count = 0;
3685 : 4 : spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count);
3686 : 4 : CU_ASSERT(g_status == 0);
3687 : 4 : CU_ASSERT(g_count == 2);
3688 : :
3689 : : /* Disable histogram */
3690 : 4 : spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
3691 : 4 : poll_threads();
3692 : 4 : CU_ASSERT(g_status == 0);
3693 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == false);
3694 : :
3695 : : /* Try to run histogram commands on disabled bdev */
3696 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3697 : 4 : poll_threads();
3698 : 4 : CU_ASSERT(g_status == -EFAULT);
3699 : :
3700 : 4 : spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL);
3701 : 4 : CU_ASSERT(g_status == -EFAULT);
3702 : :
3703 : 4 : spdk_histogram_data_free(histogram);
3704 : 4 : spdk_put_io_channel(ch);
3705 : 4 : spdk_bdev_close(desc);
3706 : 4 : free_bdev(bdev);
3707 : 4 : ut_fini_bdev();
3708 : 4 : }
3709 : :
3710 : : static void
3711 : 8 : _bdev_compare(bool emulated)
3712 : : {
3713 : : struct spdk_bdev *bdev;
3714 : 8 : struct spdk_bdev_desc *desc = NULL;
3715 : : struct spdk_io_channel *ioch;
3716 : : struct ut_expected_io *expected_io;
3717 : : uint64_t offset, num_blocks;
3718 : : uint32_t num_completed;
3719 : 8 : char aa_buf[512];
3720 : 8 : char bb_buf[512];
3721 : 8 : struct iovec compare_iov;
3722 : : uint8_t expected_io_type;
3723 : : int rc;
3724 : :
3725 [ + + ]: 8 : if (emulated) {
3726 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_READ;
3727 : : } else {
3728 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE;
3729 : : }
3730 : :
3731 : 8 : memset(aa_buf, 0xaa, sizeof(aa_buf));
3732 : 8 : memset(bb_buf, 0xbb, sizeof(bb_buf));
3733 : :
3734 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
3735 : :
3736 : 8 : ut_init_bdev(NULL);
3737 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3738 : 8 : bdev = allocate_bdev("bdev");
3739 : :
3740 : 8 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3741 : 8 : CU_ASSERT_EQUAL(rc, 0);
3742 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3743 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3744 : 8 : ioch = spdk_bdev_get_io_channel(desc);
3745 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
3746 : :
3747 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3748 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3749 : :
3750 : 8 : offset = 50;
3751 : 8 : num_blocks = 1;
3752 : 8 : compare_iov.iov_base = aa_buf;
3753 : 8 : compare_iov.iov_len = sizeof(aa_buf);
3754 : :
3755 : : /* 1. successful comparev */
3756 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3757 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3758 : :
3759 : 8 : g_io_done = false;
3760 : 8 : g_compare_read_buf = aa_buf;
3761 : 8 : g_compare_read_buf_len = sizeof(aa_buf);
3762 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3763 : 8 : CU_ASSERT_EQUAL(rc, 0);
3764 : 8 : num_completed = stub_complete_io(1);
3765 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3766 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3767 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3768 : :
3769 : : /* 2. miscompare comparev */
3770 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3771 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3772 : :
3773 : 8 : g_io_done = false;
3774 : 8 : g_compare_read_buf = bb_buf;
3775 : 8 : g_compare_read_buf_len = sizeof(bb_buf);
3776 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3777 : 8 : CU_ASSERT_EQUAL(rc, 0);
3778 : 8 : num_completed = stub_complete_io(1);
3779 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3780 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3781 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3782 : :
3783 : : /* 3. successful compare */
3784 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3785 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3786 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3787 : :
3788 : 8 : g_io_done = false;
3789 : 8 : g_compare_read_buf = aa_buf;
3790 : 8 : g_compare_read_buf_len = sizeof(aa_buf);
3791 : 8 : rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL);
3792 : 8 : CU_ASSERT_EQUAL(rc, 0);
3793 : 8 : num_completed = stub_complete_io(1);
3794 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3795 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3796 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3797 : :
3798 : : /* 4. miscompare compare */
3799 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3800 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3801 : :
3802 : 8 : g_io_done = false;
3803 : 8 : g_compare_read_buf = bb_buf;
3804 : 8 : g_compare_read_buf_len = sizeof(bb_buf);
3805 : 8 : rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL);
3806 : 8 : CU_ASSERT_EQUAL(rc, 0);
3807 : 8 : num_completed = stub_complete_io(1);
3808 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3809 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3810 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3811 : :
3812 : 8 : spdk_put_io_channel(ioch);
3813 : 8 : spdk_bdev_close(desc);
3814 : 8 : free_bdev(bdev);
3815 : 8 : fn_table.submit_request = stub_submit_request;
3816 : 8 : ut_fini_bdev();
3817 : :
3818 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
3819 : :
3820 : 8 : g_compare_read_buf = NULL;
3821 : 8 : }
3822 : :
3823 : : static void
3824 : 8 : _bdev_compare_with_md(bool emulated)
3825 : : {
3826 : : struct spdk_bdev *bdev;
3827 : 8 : struct spdk_bdev_desc *desc = NULL;
3828 : : struct spdk_io_channel *ioch;
3829 : : struct ut_expected_io *expected_io;
3830 : : uint64_t offset, num_blocks;
3831 : : uint32_t num_completed;
3832 : 8 : char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */];
3833 : 8 : char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */];
3834 : 8 : char buf_miscompare[1024 /* 2 * blocklen */];
3835 : 8 : char md_buf[16];
3836 : 8 : char md_buf_miscompare[16];
3837 : 8 : struct iovec compare_iov;
3838 : : uint8_t expected_io_type;
3839 : : int rc;
3840 : :
3841 [ + + ]: 8 : if (emulated) {
3842 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_READ;
3843 : : } else {
3844 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE;
3845 : : }
3846 : :
3847 : 8 : memset(buf, 0xaa, sizeof(buf));
3848 : 8 : memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare));
3849 : : /* make last md different */
3850 [ - + ]: 8 : memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8);
3851 : 8 : memset(buf_miscompare, 0xbb, sizeof(buf_miscompare));
3852 : 8 : memset(md_buf, 0xaa, 16);
3853 : 8 : memset(md_buf_miscompare, 0xbb, 16);
3854 : :
3855 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
3856 : :
3857 : 8 : ut_init_bdev(NULL);
3858 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3859 : 8 : bdev = allocate_bdev("bdev");
3860 : :
3861 : 8 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3862 : 8 : CU_ASSERT_EQUAL(rc, 0);
3863 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3864 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3865 : 8 : ioch = spdk_bdev_get_io_channel(desc);
3866 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
3867 : :
3868 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3869 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3870 : :
3871 : 8 : offset = 50;
3872 : 8 : num_blocks = 2;
3873 : :
3874 : : /* interleaved md & data */
3875 : 8 : bdev->md_interleave = true;
3876 : 8 : bdev->md_len = 8;
3877 : 8 : bdev->blocklen = 512 + 8;
3878 : 8 : compare_iov.iov_base = buf;
3879 : 8 : compare_iov.iov_len = sizeof(buf);
3880 : :
3881 : : /* 1. successful compare with md interleaved */
3882 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3883 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3884 : :
3885 : 8 : g_io_done = false;
3886 : 8 : g_compare_read_buf = buf;
3887 : 8 : g_compare_read_buf_len = sizeof(buf);
3888 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3889 : 8 : CU_ASSERT_EQUAL(rc, 0);
3890 : 8 : num_completed = stub_complete_io(1);
3891 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3892 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3893 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3894 : :
3895 : : /* 2. miscompare with md interleaved */
3896 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3897 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3898 : :
3899 : 8 : g_io_done = false;
3900 : 8 : g_compare_read_buf = buf_interleaved_miscompare;
3901 : 8 : g_compare_read_buf_len = sizeof(buf_interleaved_miscompare);
3902 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3903 : 8 : CU_ASSERT_EQUAL(rc, 0);
3904 : 8 : num_completed = stub_complete_io(1);
3905 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3906 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3907 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3908 : :
3909 : : /* Separate data & md buffers */
3910 : 8 : bdev->md_interleave = false;
3911 : 8 : bdev->blocklen = 512;
3912 : 8 : compare_iov.iov_base = buf;
3913 : 8 : compare_iov.iov_len = 1024;
3914 : :
3915 : : /* 3. successful compare with md separated */
3916 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3917 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3918 : :
3919 : 8 : g_io_done = false;
3920 : 8 : g_compare_read_buf = buf;
3921 : 8 : g_compare_read_buf_len = 1024;
3922 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3923 : 8 : g_compare_md_buf = md_buf;
3924 : 8 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3925 : : offset, num_blocks, io_done, NULL);
3926 : 8 : CU_ASSERT_EQUAL(rc, 0);
3927 : 8 : num_completed = stub_complete_io(1);
3928 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3929 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3930 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3931 : :
3932 : : /* 4. miscompare with md separated where md buf is different */
3933 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3934 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3935 : :
3936 : 8 : g_io_done = false;
3937 : 8 : g_compare_read_buf = buf;
3938 : 8 : g_compare_read_buf_len = 1024;
3939 : 8 : g_compare_md_buf = md_buf_miscompare;
3940 : 8 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3941 : : offset, num_blocks, io_done, NULL);
3942 : 8 : CU_ASSERT_EQUAL(rc, 0);
3943 : 8 : num_completed = stub_complete_io(1);
3944 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3945 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3946 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3947 : :
3948 : : /* 5. miscompare with md separated where buf is different */
3949 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3950 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3951 : :
3952 : 8 : g_io_done = false;
3953 : 8 : g_compare_read_buf = buf_miscompare;
3954 : 8 : g_compare_read_buf_len = sizeof(buf_miscompare);
3955 : 8 : g_compare_md_buf = md_buf;
3956 : 8 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3957 : : offset, num_blocks, io_done, NULL);
3958 : 8 : CU_ASSERT_EQUAL(rc, 0);
3959 : 8 : num_completed = stub_complete_io(1);
3960 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3961 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3962 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3963 : :
3964 : 8 : bdev->md_len = 0;
3965 : 8 : g_compare_md_buf = NULL;
3966 : :
3967 : 8 : spdk_put_io_channel(ioch);
3968 : 8 : spdk_bdev_close(desc);
3969 : 8 : free_bdev(bdev);
3970 : 8 : fn_table.submit_request = stub_submit_request;
3971 : 8 : ut_fini_bdev();
3972 : :
3973 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
3974 : :
3975 : 8 : g_compare_read_buf = NULL;
3976 : 8 : }
3977 : :
3978 : : static void
3979 : 4 : bdev_compare(void)
3980 : : {
3981 : 4 : _bdev_compare(false);
3982 : 4 : _bdev_compare_with_md(false);
3983 : 4 : }
3984 : :
3985 : : static void
3986 : 4 : bdev_compare_emulated(void)
3987 : : {
3988 : 4 : _bdev_compare(true);
3989 : 4 : _bdev_compare_with_md(true);
3990 : 4 : }
3991 : :
3992 : : static void
3993 : 4 : bdev_compare_and_write(void)
3994 : : {
3995 : : struct spdk_bdev *bdev;
3996 : 4 : struct spdk_bdev_desc *desc = NULL;
3997 : : struct spdk_io_channel *ioch;
3998 : : struct ut_expected_io *expected_io;
3999 : : uint64_t offset, num_blocks;
4000 : : uint32_t num_completed;
4001 : 4 : char aa_buf[512];
4002 : 4 : char bb_buf[512];
4003 : 4 : char cc_buf[512];
4004 : 4 : char write_buf[512];
4005 : 4 : struct iovec compare_iov;
4006 : 4 : struct iovec write_iov;
4007 : : int rc;
4008 : :
4009 : 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4010 : 4 : memset(bb_buf, 0xbb, sizeof(bb_buf));
4011 : 4 : memset(cc_buf, 0xcc, sizeof(cc_buf));
4012 : :
4013 : 4 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false;
4014 : :
4015 : 4 : ut_init_bdev(NULL);
4016 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
4017 : 4 : bdev = allocate_bdev("bdev");
4018 : :
4019 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4020 : 4 : CU_ASSERT_EQUAL(rc, 0);
4021 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4022 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4023 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4024 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4025 : :
4026 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
4027 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4028 : :
4029 : 4 : offset = 50;
4030 : 4 : num_blocks = 1;
4031 : 4 : compare_iov.iov_base = aa_buf;
4032 : 4 : compare_iov.iov_len = sizeof(aa_buf);
4033 : 4 : write_iov.iov_base = bb_buf;
4034 : 4 : write_iov.iov_len = sizeof(bb_buf);
4035 : :
4036 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
4037 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4038 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0);
4039 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4040 : :
4041 : 4 : g_io_done = false;
4042 : 4 : g_compare_read_buf = aa_buf;
4043 : 4 : g_compare_read_buf_len = sizeof(aa_buf);
4044 : 4 : memset(write_buf, 0, sizeof(write_buf));
4045 : 4 : g_compare_write_buf = write_buf;
4046 : 4 : g_compare_write_buf_len = sizeof(write_buf);
4047 : 4 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
4048 : : offset, num_blocks, io_done, NULL);
4049 : : /* Trigger range locking */
4050 : 4 : poll_threads();
4051 : 4 : CU_ASSERT_EQUAL(rc, 0);
4052 : 4 : num_completed = stub_complete_io(1);
4053 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4054 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
4055 : 4 : num_completed = stub_complete_io(1);
4056 : : /* Trigger range unlocking */
4057 : 4 : poll_threads();
4058 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4059 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4060 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4061 : 4 : CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0);
4062 : :
4063 : : /* Test miscompare */
4064 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
4065 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4066 : :
4067 : 4 : g_io_done = false;
4068 : 4 : g_compare_read_buf = cc_buf;
4069 : 4 : g_compare_read_buf_len = sizeof(cc_buf);
4070 : 4 : memset(write_buf, 0, sizeof(write_buf));
4071 : 4 : g_compare_write_buf = write_buf;
4072 : 4 : g_compare_write_buf_len = sizeof(write_buf);
4073 : 4 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
4074 : : offset, num_blocks, io_done, NULL);
4075 : : /* Trigger range locking */
4076 : 4 : poll_threads();
4077 : 4 : CU_ASSERT_EQUAL(rc, 0);
4078 : 4 : num_completed = stub_complete_io(1);
4079 : : /* Trigger range unlocking earlier because we expect error here */
4080 : 4 : poll_threads();
4081 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4082 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4083 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
4084 : 4 : num_completed = stub_complete_io(1);
4085 : 4 : CU_ASSERT_EQUAL(num_completed, 0);
4086 : :
4087 : 4 : spdk_put_io_channel(ioch);
4088 : 4 : spdk_bdev_close(desc);
4089 : 4 : free_bdev(bdev);
4090 : 4 : fn_table.submit_request = stub_submit_request;
4091 : 4 : ut_fini_bdev();
4092 : :
4093 : 4 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
4094 : :
4095 : 4 : g_compare_read_buf = NULL;
4096 : 4 : g_compare_write_buf = NULL;
4097 : 4 : }
4098 : :
4099 : : static void
4100 : 4 : bdev_write_zeroes(void)
4101 : : {
4102 : : struct spdk_bdev *bdev;
4103 : 4 : struct spdk_bdev_desc *desc = NULL;
4104 : : struct spdk_io_channel *ioch;
4105 : : struct ut_expected_io *expected_io;
4106 : : uint64_t offset, num_io_blocks, num_blocks;
4107 : : uint32_t num_completed, num_requests;
4108 : : int rc;
4109 : :
4110 : 4 : ut_init_bdev(NULL);
4111 : 4 : bdev = allocate_bdev("bdev");
4112 : :
4113 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4114 : 4 : CU_ASSERT_EQUAL(rc, 0);
4115 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4116 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4117 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4118 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4119 : :
4120 : 4 : fn_table.submit_request = stub_submit_request;
4121 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4122 : :
4123 : : /* First test that if the bdev supports write_zeroes, the request won't be split */
4124 : 4 : bdev->md_len = 0;
4125 : 4 : bdev->blocklen = 4096;
4126 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
4127 : :
4128 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
4129 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4130 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4131 : 4 : CU_ASSERT_EQUAL(rc, 0);
4132 : 4 : num_completed = stub_complete_io(1);
4133 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4134 : :
4135 : : /* Check that if write zeroes is not supported it'll be replaced by regular writes */
4136 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
4137 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4138 [ - + ]: 4 : num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
4139 : 4 : num_requests = 2;
4140 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
4141 : :
4142 [ + + ]: 12 : for (offset = 0; offset < num_requests; ++offset) {
4143 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4144 : : offset * num_io_blocks, num_io_blocks, 0);
4145 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4146 : : }
4147 : :
4148 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4149 : 4 : CU_ASSERT_EQUAL(rc, 0);
4150 : 4 : num_completed = stub_complete_io(num_requests);
4151 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4152 : :
4153 : : /* Check that the splitting is correct if bdev has interleaved metadata */
4154 : 4 : bdev->md_interleave = true;
4155 : 4 : bdev->md_len = 64;
4156 : 4 : bdev->blocklen = 4096 + 64;
4157 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4158 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
4159 : :
4160 : 4 : num_requests = offset = 0;
4161 [ + + ]: 12 : while (offset < num_blocks) {
4162 [ - + + + : 8 : num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
- + ]
4163 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4164 : : offset, num_io_blocks, 0);
4165 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4166 : 8 : offset += num_io_blocks;
4167 : 8 : num_requests++;
4168 : : }
4169 : :
4170 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4171 : 4 : CU_ASSERT_EQUAL(rc, 0);
4172 : 4 : num_completed = stub_complete_io(num_requests);
4173 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4174 : 4 : num_completed = stub_complete_io(num_requests);
4175 [ - + ]: 4 : assert(num_completed == 0);
4176 : :
4177 : : /* Check the the same for separate metadata buffer */
4178 : 4 : bdev->md_interleave = false;
4179 : 4 : bdev->md_len = 64;
4180 : 4 : bdev->blocklen = 4096;
4181 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4182 : :
4183 : 4 : num_requests = offset = 0;
4184 [ + + ]: 12 : while (offset < num_blocks) {
4185 [ - + + - : 8 : num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
- + ]
4186 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4187 : : offset, num_io_blocks, 0);
4188 : 8 : expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
4189 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4190 : 8 : offset += num_io_blocks;
4191 : 8 : num_requests++;
4192 : : }
4193 : :
4194 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4195 : 4 : CU_ASSERT_EQUAL(rc, 0);
4196 : 4 : num_completed = stub_complete_io(num_requests);
4197 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4198 : :
4199 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
4200 : 4 : spdk_put_io_channel(ioch);
4201 : 4 : spdk_bdev_close(desc);
4202 : 4 : free_bdev(bdev);
4203 : 4 : ut_fini_bdev();
4204 : 4 : }
4205 : :
4206 : : static void
4207 : 4 : bdev_zcopy_write(void)
4208 : : {
4209 : : struct spdk_bdev *bdev;
4210 : 4 : struct spdk_bdev_desc *desc = NULL;
4211 : : struct spdk_io_channel *ioch;
4212 : : struct ut_expected_io *expected_io;
4213 : : uint64_t offset, num_blocks;
4214 : : uint32_t num_completed;
4215 : 4 : char aa_buf[512];
4216 : 4 : struct iovec iov;
4217 : : int rc;
4218 : 4 : const bool populate = false;
4219 : 4 : const bool commit = true;
4220 : :
4221 : 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4222 : :
4223 : 4 : ut_init_bdev(NULL);
4224 : 4 : bdev = allocate_bdev("bdev");
4225 : :
4226 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4227 : 4 : CU_ASSERT_EQUAL(rc, 0);
4228 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4229 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4230 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4231 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4232 : :
4233 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4234 : :
4235 : 4 : offset = 50;
4236 : 4 : num_blocks = 1;
4237 : 4 : iov.iov_base = NULL;
4238 : 4 : iov.iov_len = 0;
4239 : :
4240 : 4 : g_zcopy_read_buf = (void *) 0x1122334455667788UL;
4241 : 4 : g_zcopy_read_buf_len = (uint32_t) -1;
4242 : : /* Do a zcopy start for a write (populate=false) */
4243 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4244 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4245 : 4 : g_io_done = false;
4246 : 4 : g_zcopy_write_buf = aa_buf;
4247 : 4 : g_zcopy_write_buf_len = sizeof(aa_buf);
4248 : 4 : g_zcopy_bdev_io = NULL;
4249 : 4 : rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL);
4250 : 4 : CU_ASSERT_EQUAL(rc, 0);
4251 : 4 : num_completed = stub_complete_io(1);
4252 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4253 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4254 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4255 : : /* Check that the iov has been set up */
4256 : 4 : CU_ASSERT(iov.iov_base == g_zcopy_write_buf);
4257 : 4 : CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len);
4258 : : /* Check that the bdev_io has been saved */
4259 : 4 : CU_ASSERT(g_zcopy_bdev_io != NULL);
4260 : : /* Now do the zcopy end for a write (commit=true) */
4261 : 4 : g_io_done = false;
4262 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4263 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4264 : 4 : rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL);
4265 : 4 : CU_ASSERT_EQUAL(rc, 0);
4266 : 4 : num_completed = stub_complete_io(1);
4267 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4268 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4269 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4270 : : /* Check the g_zcopy are reset by io_done */
4271 : 4 : CU_ASSERT(g_zcopy_write_buf == NULL);
4272 : 4 : CU_ASSERT(g_zcopy_write_buf_len == 0);
4273 : : /* Check that io_done has freed the g_zcopy_bdev_io */
4274 : 4 : CU_ASSERT(g_zcopy_bdev_io == NULL);
4275 : :
4276 : : /* Check the zcopy read buffer has not been touched which
4277 : : * ensures that the correct buffers were used.
4278 : : */
4279 : 4 : CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL);
4280 : 4 : CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1);
4281 : :
4282 : 4 : spdk_put_io_channel(ioch);
4283 : 4 : spdk_bdev_close(desc);
4284 : 4 : free_bdev(bdev);
4285 : 4 : ut_fini_bdev();
4286 : 4 : }
4287 : :
4288 : : static void
4289 : 4 : bdev_zcopy_read(void)
4290 : : {
4291 : : struct spdk_bdev *bdev;
4292 : 4 : struct spdk_bdev_desc *desc = NULL;
4293 : : struct spdk_io_channel *ioch;
4294 : : struct ut_expected_io *expected_io;
4295 : : uint64_t offset, num_blocks;
4296 : : uint32_t num_completed;
4297 : 4 : char aa_buf[512];
4298 : 4 : struct iovec iov;
4299 : : int rc;
4300 : 4 : const bool populate = true;
4301 : 4 : const bool commit = false;
4302 : :
4303 : 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4304 : :
4305 : 4 : ut_init_bdev(NULL);
4306 : 4 : bdev = allocate_bdev("bdev");
4307 : :
4308 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4309 : 4 : CU_ASSERT_EQUAL(rc, 0);
4310 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4311 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4312 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4313 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4314 : :
4315 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4316 : :
4317 : 4 : offset = 50;
4318 : 4 : num_blocks = 1;
4319 : 4 : iov.iov_base = NULL;
4320 : 4 : iov.iov_len = 0;
4321 : :
4322 : 4 : g_zcopy_write_buf = (void *) 0x1122334455667788UL;
4323 : 4 : g_zcopy_write_buf_len = (uint32_t) -1;
4324 : :
4325 : : /* Do a zcopy start for a read (populate=true) */
4326 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4327 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4328 : 4 : g_io_done = false;
4329 : 4 : g_zcopy_read_buf = aa_buf;
4330 : 4 : g_zcopy_read_buf_len = sizeof(aa_buf);
4331 : 4 : g_zcopy_bdev_io = NULL;
4332 : 4 : rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL);
4333 : 4 : CU_ASSERT_EQUAL(rc, 0);
4334 : 4 : num_completed = stub_complete_io(1);
4335 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4336 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4337 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4338 : : /* Check that the iov has been set up */
4339 : 4 : CU_ASSERT(iov.iov_base == g_zcopy_read_buf);
4340 : 4 : CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len);
4341 : : /* Check that the bdev_io has been saved */
4342 : 4 : CU_ASSERT(g_zcopy_bdev_io != NULL);
4343 : :
4344 : : /* Now do the zcopy end for a read (commit=false) */
4345 : 4 : g_io_done = false;
4346 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4347 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4348 : 4 : rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL);
4349 : 4 : CU_ASSERT_EQUAL(rc, 0);
4350 : 4 : num_completed = stub_complete_io(1);
4351 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4352 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4353 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4354 : : /* Check the g_zcopy are reset by io_done */
4355 : 4 : CU_ASSERT(g_zcopy_read_buf == NULL);
4356 : 4 : CU_ASSERT(g_zcopy_read_buf_len == 0);
4357 : : /* Check that io_done has freed the g_zcopy_bdev_io */
4358 : 4 : CU_ASSERT(g_zcopy_bdev_io == NULL);
4359 : :
4360 : : /* Check the zcopy write buffer has not been touched which
4361 : : * ensures that the correct buffers were used.
4362 : : */
4363 : 4 : CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL);
4364 : 4 : CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1);
4365 : :
4366 : 4 : spdk_put_io_channel(ioch);
4367 : 4 : spdk_bdev_close(desc);
4368 : 4 : free_bdev(bdev);
4369 : 4 : ut_fini_bdev();
4370 : 4 : }
4371 : :
4372 : : static void
4373 : 4 : bdev_open_while_hotremove(void)
4374 : : {
4375 : : struct spdk_bdev *bdev;
4376 : 4 : struct spdk_bdev_desc *desc[2] = {};
4377 : : int rc;
4378 : :
4379 : 4 : bdev = allocate_bdev("bdev");
4380 : :
4381 : 4 : rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]);
4382 : 4 : CU_ASSERT(rc == 0);
4383 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
4384 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0]));
4385 : :
4386 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
4387 : : /* Bdev unregister is handled asynchronously. Poll thread to complete. */
4388 : 4 : poll_threads();
4389 : :
4390 : 4 : rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]);
4391 : 4 : CU_ASSERT(rc == -ENODEV);
4392 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[1] == NULL);
4393 : :
4394 : 4 : spdk_bdev_close(desc[0]);
4395 : 4 : free_bdev(bdev);
4396 : 4 : }
4397 : :
4398 : : static void
4399 : 4 : bdev_close_while_hotremove(void)
4400 : : {
4401 : : struct spdk_bdev *bdev;
4402 : 4 : struct spdk_bdev_desc *desc = NULL;
4403 : 4 : int rc = 0;
4404 : :
4405 : 4 : bdev = allocate_bdev("bdev");
4406 : :
4407 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc);
4408 : 4 : CU_ASSERT_EQUAL(rc, 0);
4409 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4410 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4411 : :
4412 : : /* Simulate hot-unplug by unregistering bdev */
4413 : 4 : g_event_type1 = 0xFF;
4414 : 4 : g_unregister_arg = NULL;
4415 : 4 : g_unregister_rc = -1;
4416 : 4 : spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
4417 : : /* Close device while remove event is in flight */
4418 : 4 : spdk_bdev_close(desc);
4419 : :
4420 : : /* Ensure that unregister callback is delayed */
4421 : 4 : CU_ASSERT_EQUAL(g_unregister_arg, NULL);
4422 : 4 : CU_ASSERT_EQUAL(g_unregister_rc, -1);
4423 : :
4424 : 4 : poll_threads();
4425 : :
4426 : : /* Event callback shall not be issued because device was closed */
4427 : 4 : CU_ASSERT_EQUAL(g_event_type1, 0xFF);
4428 : : /* Unregister callback is issued */
4429 : 4 : CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678);
4430 : 4 : CU_ASSERT_EQUAL(g_unregister_rc, 0);
4431 : :
4432 : 4 : free_bdev(bdev);
4433 : 4 : }
4434 : :
4435 : : static void
4436 : 4 : bdev_open_ext_test(void)
4437 : : {
4438 : : struct spdk_bdev *bdev;
4439 : 4 : struct spdk_bdev_desc *desc1 = NULL;
4440 : 4 : struct spdk_bdev_desc *desc2 = NULL;
4441 : 4 : int rc = 0;
4442 : :
4443 : 4 : bdev = allocate_bdev("bdev");
4444 : :
4445 : 4 : rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
4446 : 4 : CU_ASSERT_EQUAL(rc, -EINVAL);
4447 : :
4448 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
4449 : 4 : CU_ASSERT_EQUAL(rc, 0);
4450 : :
4451 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
4452 : 4 : CU_ASSERT_EQUAL(rc, 0);
4453 : :
4454 : 4 : g_event_type1 = 0xFF;
4455 : 4 : g_event_type2 = 0xFF;
4456 : :
4457 : : /* Simulate hot-unplug by unregistering bdev */
4458 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
4459 : 4 : poll_threads();
4460 : :
4461 : : /* Check if correct events have been triggered in event callback fn */
4462 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
4463 : 4 : CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
4464 : :
4465 : 4 : free_bdev(bdev);
4466 : 4 : poll_threads();
4467 : 4 : }
4468 : :
4469 : : static void
4470 : 4 : bdev_open_ext_unregister(void)
4471 : : {
4472 : : struct spdk_bdev *bdev;
4473 : 4 : struct spdk_bdev_desc *desc1 = NULL;
4474 : 4 : struct spdk_bdev_desc *desc2 = NULL;
4475 : 4 : struct spdk_bdev_desc *desc3 = NULL;
4476 : 4 : struct spdk_bdev_desc *desc4 = NULL;
4477 : 4 : int rc = 0;
4478 : :
4479 : 4 : bdev = allocate_bdev("bdev");
4480 : :
4481 : 4 : rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
4482 : 4 : CU_ASSERT_EQUAL(rc, -EINVAL);
4483 : :
4484 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
4485 : 4 : CU_ASSERT_EQUAL(rc, 0);
4486 : :
4487 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
4488 : 4 : CU_ASSERT_EQUAL(rc, 0);
4489 : :
4490 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3);
4491 : 4 : CU_ASSERT_EQUAL(rc, 0);
4492 : :
4493 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4);
4494 : 4 : CU_ASSERT_EQUAL(rc, 0);
4495 : :
4496 : 4 : g_event_type1 = 0xFF;
4497 : 4 : g_event_type2 = 0xFF;
4498 : 4 : g_event_type3 = 0xFF;
4499 : 4 : g_event_type4 = 0xFF;
4500 : :
4501 : 4 : g_unregister_arg = NULL;
4502 : 4 : g_unregister_rc = -1;
4503 : :
4504 : : /* Simulate hot-unplug by unregistering bdev */
4505 : 4 : spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
4506 : :
4507 : : /*
4508 : : * Unregister is handled asynchronously and event callback
4509 : : * (i.e., above bdev_open_cbN) will be called.
4510 : : * For bdev_open_cb3 and bdev_open_cb4, it is intended to not
4511 : : * close the desc3 and desc4 so that the bdev is not closed.
4512 : : */
4513 : 4 : poll_threads();
4514 : :
4515 : : /* Check if correct events have been triggered in event callback fn */
4516 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
4517 : 4 : CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
4518 : 4 : CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE);
4519 : 4 : CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE);
4520 : :
4521 : : /* Check that unregister callback is delayed */
4522 : 4 : CU_ASSERT(g_unregister_arg == NULL);
4523 : 4 : CU_ASSERT(g_unregister_rc == -1);
4524 : :
4525 : : /*
4526 : : * Explicitly close desc3. As desc4 is still opened there, the
4527 : : * unergister callback is still delayed to execute.
4528 : : */
4529 : 4 : spdk_bdev_close(desc3);
4530 : 4 : CU_ASSERT(g_unregister_arg == NULL);
4531 : 4 : CU_ASSERT(g_unregister_rc == -1);
4532 : :
4533 : : /*
4534 : : * Explicitly close desc4 to trigger the ongoing bdev unregister
4535 : : * operation after last desc is closed.
4536 : : */
4537 : 4 : spdk_bdev_close(desc4);
4538 : :
4539 : : /* Poll the thread for the async unregister operation */
4540 : 4 : poll_threads();
4541 : :
4542 : : /* Check that unregister callback is executed */
4543 : 4 : CU_ASSERT(g_unregister_arg == (void *)0x12345678);
4544 : 4 : CU_ASSERT(g_unregister_rc == 0);
4545 : :
4546 : 4 : free_bdev(bdev);
4547 : 4 : poll_threads();
4548 : 4 : }
4549 : :
4550 : : struct timeout_io_cb_arg {
4551 : : struct iovec iov;
4552 : : uint8_t type;
4553 : : };
4554 : :
4555 : : static int
4556 : 56 : bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
4557 : : {
4558 : : struct spdk_bdev_io *bdev_io;
4559 : 56 : int n = 0;
4560 : :
4561 [ - + ]: 56 : if (!ch) {
4562 : 0 : return -1;
4563 : : }
4564 : :
4565 [ + + ]: 116 : TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
4566 : 60 : n++;
4567 : : }
4568 : :
4569 : 56 : return n;
4570 : : }
4571 : :
4572 : : static void
4573 : 12 : bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
4574 : : {
4575 : 12 : struct timeout_io_cb_arg *ctx = cb_arg;
4576 : :
4577 : 12 : ctx->type = bdev_io->type;
4578 : 12 : ctx->iov.iov_base = bdev_io->iov.iov_base;
4579 : 12 : ctx->iov.iov_len = bdev_io->iov.iov_len;
4580 : 12 : }
4581 : :
4582 : : static void
4583 : 4 : bdev_set_io_timeout(void)
4584 : : {
4585 : : struct spdk_bdev *bdev;
4586 : 4 : struct spdk_bdev_desc *desc = NULL;
4587 : 4 : struct spdk_io_channel *io_ch = NULL;
4588 : 4 : struct spdk_bdev_channel *bdev_ch = NULL;
4589 : 4 : struct timeout_io_cb_arg cb_arg;
4590 : :
4591 : 4 : ut_init_bdev(NULL);
4592 : 4 : bdev = allocate_bdev("bdev");
4593 : :
4594 : 4 : CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0);
4595 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4596 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4597 : :
4598 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4599 : 4 : CU_ASSERT(io_ch != NULL);
4600 : :
4601 : 4 : bdev_ch = spdk_io_channel_get_ctx(io_ch);
4602 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
4603 : :
4604 : : /* This is the part1.
4605 : : * We will check the bdev_ch->io_submitted list
4606 : : * TO make sure that it can link IOs and only the user submitted IOs
4607 : : */
4608 : 4 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
4609 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4610 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4611 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4612 : 4 : stub_complete_io(1);
4613 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4614 : 4 : stub_complete_io(1);
4615 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4616 : :
4617 : : /* Split IO */
4618 : 4 : bdev->optimal_io_boundary = 16;
4619 : 4 : bdev->split_on_optimal_io_boundary = true;
4620 : :
4621 : : /* Now test that a single-vector command is split correctly.
4622 : : * Offset 14, length 8, payload 0xF000
4623 : : * Child - Offset 14, length 2, payload 0xF000
4624 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
4625 : : *
4626 : : * Set up the expected values before calling spdk_bdev_read_blocks
4627 : : */
4628 : 4 : CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
4629 : : /* We count all submitted IOs including IO that are generated by splitting. */
4630 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3);
4631 : 4 : stub_complete_io(1);
4632 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4633 : 4 : stub_complete_io(1);
4634 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4635 : :
4636 : : /* Also include the reset IO */
4637 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4638 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4639 : 4 : poll_threads();
4640 : 4 : stub_complete_io(1);
4641 : 4 : poll_threads();
4642 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4643 : :
4644 : : /* This is part2
4645 : : * Test the desc timeout poller register
4646 : : */
4647 : :
4648 : : /* Successfully set the timeout */
4649 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4650 : 4 : CU_ASSERT(desc->io_timeout_poller != NULL);
4651 : 4 : CU_ASSERT(desc->timeout_in_sec == 30);
4652 : 4 : CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
4653 : 4 : CU_ASSERT(desc->cb_arg == &cb_arg);
4654 : :
4655 : : /* Change the timeout limit */
4656 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4657 : 4 : CU_ASSERT(desc->io_timeout_poller != NULL);
4658 : 4 : CU_ASSERT(desc->timeout_in_sec == 20);
4659 : 4 : CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
4660 : 4 : CU_ASSERT(desc->cb_arg == &cb_arg);
4661 : :
4662 : : /* Disable the timeout */
4663 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0);
4664 : 4 : CU_ASSERT(desc->io_timeout_poller == NULL);
4665 : :
4666 : : /* This the part3
4667 : : * We will test to catch timeout IO and check whether the IO is
4668 : : * the submitted one.
4669 : : */
4670 : 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4671 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4672 : 4 : CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0);
4673 : :
4674 : : /* Don't reach the limit */
4675 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4676 : 4 : poll_threads();
4677 : 4 : CU_ASSERT(cb_arg.type == 0);
4678 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
4679 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 0);
4680 : :
4681 : : /* 15 + 15 = 30 reach the limit */
4682 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4683 : 4 : poll_threads();
4684 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
4685 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
4686 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen);
4687 : 4 : stub_complete_io(1);
4688 : :
4689 : : /* Use the same split IO above and check the IO */
4690 : 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4691 : 4 : CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
4692 : :
4693 : : /* The first child complete in time */
4694 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4695 : 4 : poll_threads();
4696 : 4 : stub_complete_io(1);
4697 : 4 : CU_ASSERT(cb_arg.type == 0);
4698 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
4699 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 0);
4700 : :
4701 : : /* The second child reach the limit */
4702 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4703 : 4 : poll_threads();
4704 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
4705 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
4706 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen);
4707 : 4 : stub_complete_io(1);
4708 : :
4709 : : /* Also include the reset IO */
4710 : 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4711 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4712 : 4 : spdk_delay_us(30 * spdk_get_ticks_hz());
4713 : 4 : poll_threads();
4714 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET);
4715 : 4 : stub_complete_io(1);
4716 : 4 : poll_threads();
4717 : :
4718 : 4 : spdk_put_io_channel(io_ch);
4719 : 4 : spdk_bdev_close(desc);
4720 : 4 : free_bdev(bdev);
4721 : 4 : ut_fini_bdev();
4722 : 4 : }
4723 : :
4724 : : static void
4725 : 4 : bdev_set_qd_sampling(void)
4726 : : {
4727 : : struct spdk_bdev *bdev;
4728 : 4 : struct spdk_bdev_desc *desc = NULL;
4729 : 4 : struct spdk_io_channel *io_ch = NULL;
4730 : 4 : struct spdk_bdev_channel *bdev_ch = NULL;
4731 : 4 : struct timeout_io_cb_arg cb_arg;
4732 : :
4733 : 4 : ut_init_bdev(NULL);
4734 : 4 : bdev = allocate_bdev("bdev");
4735 : :
4736 : 4 : CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0);
4737 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4738 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4739 : :
4740 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4741 : 4 : CU_ASSERT(io_ch != NULL);
4742 : :
4743 : 4 : bdev_ch = spdk_io_channel_get_ctx(io_ch);
4744 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
4745 : :
4746 : : /* This is the part1.
4747 : : * We will check the bdev_ch->io_submitted list
4748 : : * TO make sure that it can link IOs and only the user submitted IOs
4749 : : */
4750 : 4 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
4751 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4752 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4753 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4754 : 4 : stub_complete_io(1);
4755 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4756 : 4 : stub_complete_io(1);
4757 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4758 : :
4759 : : /* This is the part2.
4760 : : * Test the bdev's qd poller register
4761 : : */
4762 : : /* 1st Successfully set the qd sampling period */
4763 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 10);
4764 : 4 : CU_ASSERT(bdev->internal.new_period == 10);
4765 : 4 : CU_ASSERT(bdev->internal.period == 10);
4766 : 4 : CU_ASSERT(bdev->internal.qd_desc != NULL);
4767 : 4 : poll_threads();
4768 : 4 : CU_ASSERT(bdev->internal.qd_poller != NULL);
4769 : :
4770 : : /* 2nd Change the qd sampling period */
4771 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 20);
4772 : 4 : CU_ASSERT(bdev->internal.new_period == 20);
4773 : 4 : CU_ASSERT(bdev->internal.period == 10);
4774 : 4 : CU_ASSERT(bdev->internal.qd_desc != NULL);
4775 : 4 : poll_threads();
4776 : 4 : CU_ASSERT(bdev->internal.qd_poller != NULL);
4777 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4778 : :
4779 : : /* 3rd Change the qd sampling period and verify qd_poll_in_progress */
4780 : 4 : spdk_delay_us(20);
4781 : 4 : poll_thread_times(0, 1);
4782 [ - + ]: 4 : CU_ASSERT(bdev->internal.qd_poll_in_progress == true);
4783 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 30);
4784 : 4 : CU_ASSERT(bdev->internal.new_period == 30);
4785 : 4 : CU_ASSERT(bdev->internal.period == 20);
4786 : 4 : poll_threads();
4787 [ - + ]: 4 : CU_ASSERT(bdev->internal.qd_poll_in_progress == false);
4788 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4789 : :
4790 : : /* 4th Disable the qd sampling period */
4791 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 0);
4792 : 4 : CU_ASSERT(bdev->internal.new_period == 0);
4793 : 4 : CU_ASSERT(bdev->internal.period == 30);
4794 : 4 : poll_threads();
4795 : 4 : CU_ASSERT(bdev->internal.qd_poller == NULL);
4796 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4797 : 4 : CU_ASSERT(bdev->internal.qd_desc == NULL);
4798 : :
4799 : : /* This is the part3.
4800 : : * We will test the submitted IO and reset works
4801 : : * properly with the qd sampling.
4802 : : */
4803 : 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4804 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 1);
4805 : 4 : poll_threads();
4806 : :
4807 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4808 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4809 : :
4810 : : /* Also include the reset IO */
4811 : 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4812 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4813 : 4 : poll_threads();
4814 : :
4815 : : /* Close the desc */
4816 : 4 : spdk_put_io_channel(io_ch);
4817 : 4 : spdk_bdev_close(desc);
4818 : :
4819 : : /* Complete the submitted IO and reset */
4820 : 4 : stub_complete_io(2);
4821 : 4 : poll_threads();
4822 : :
4823 : 4 : free_bdev(bdev);
4824 : 4 : ut_fini_bdev();
4825 : 4 : }
4826 : :
4827 : : static void
4828 : 4 : lba_range_overlap(void)
4829 : : {
4830 : 4 : struct lba_range r1, r2;
4831 : :
4832 : 4 : r1.offset = 100;
4833 : 4 : r1.length = 50;
4834 : :
4835 : 4 : r2.offset = 0;
4836 : 4 : r2.length = 1;
4837 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4838 : :
4839 : 4 : r2.offset = 0;
4840 : 4 : r2.length = 100;
4841 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4842 : :
4843 : 4 : r2.offset = 0;
4844 : 4 : r2.length = 110;
4845 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4846 : :
4847 : 4 : r2.offset = 100;
4848 : 4 : r2.length = 10;
4849 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4850 : :
4851 : 4 : r2.offset = 110;
4852 : 4 : r2.length = 20;
4853 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4854 : :
4855 : 4 : r2.offset = 140;
4856 : 4 : r2.length = 150;
4857 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4858 : :
4859 : 4 : r2.offset = 130;
4860 : 4 : r2.length = 200;
4861 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4862 : :
4863 : 4 : r2.offset = 150;
4864 : 4 : r2.length = 100;
4865 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4866 : :
4867 : 4 : r2.offset = 110;
4868 : 4 : r2.length = 0;
4869 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4870 : 4 : }
4871 : :
4872 : : static bool g_lock_lba_range_done;
4873 : : static bool g_unlock_lba_range_done;
4874 : :
4875 : : static void
4876 : 32 : lock_lba_range_done(struct lba_range *range, void *ctx, int status)
4877 : : {
4878 : 32 : g_lock_lba_range_done = true;
4879 : 32 : }
4880 : :
4881 : : static void
4882 : 24 : unlock_lba_range_done(struct lba_range *range, void *ctx, int status)
4883 : : {
4884 : 24 : g_unlock_lba_range_done = true;
4885 : 24 : }
4886 : :
4887 : : static void
4888 : 4 : lock_lba_range_check_ranges(void)
4889 : : {
4890 : : struct spdk_bdev *bdev;
4891 : 4 : struct spdk_bdev_desc *desc = NULL;
4892 : : struct spdk_io_channel *io_ch;
4893 : : struct spdk_bdev_channel *channel;
4894 : : struct lba_range *range;
4895 : 4 : int ctx1;
4896 : : int rc;
4897 : :
4898 : 4 : ut_init_bdev(NULL);
4899 : 4 : bdev = allocate_bdev("bdev0");
4900 : :
4901 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
4902 : 4 : CU_ASSERT(rc == 0);
4903 : 4 : CU_ASSERT(desc != NULL);
4904 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4905 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4906 : 4 : CU_ASSERT(io_ch != NULL);
4907 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
4908 : :
4909 : 4 : g_lock_lba_range_done = false;
4910 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
4911 : 4 : CU_ASSERT(rc == 0);
4912 : 4 : poll_threads();
4913 : :
4914 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
4915 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
4916 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
4917 : 4 : CU_ASSERT(range->offset == 20);
4918 : 4 : CU_ASSERT(range->length == 10);
4919 : 4 : CU_ASSERT(range->owner_ch == channel);
4920 : :
4921 : : /* Unlocks must exactly match a lock. */
4922 : 4 : g_unlock_lba_range_done = false;
4923 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1);
4924 : 4 : CU_ASSERT(rc == -EINVAL);
4925 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == false);
4926 : :
4927 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
4928 : 4 : CU_ASSERT(rc == 0);
4929 : 4 : spdk_delay_us(100);
4930 : 4 : poll_threads();
4931 : :
4932 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
4933 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
4934 : :
4935 : 4 : spdk_put_io_channel(io_ch);
4936 : 4 : spdk_bdev_close(desc);
4937 : 4 : free_bdev(bdev);
4938 : 4 : ut_fini_bdev();
4939 : 4 : }
4940 : :
4941 : : static void
4942 : 4 : lock_lba_range_with_io_outstanding(void)
4943 : : {
4944 : : struct spdk_bdev *bdev;
4945 : 4 : struct spdk_bdev_desc *desc = NULL;
4946 : : struct spdk_io_channel *io_ch;
4947 : : struct spdk_bdev_channel *channel;
4948 : : struct lba_range *range;
4949 : 4 : char buf[4096];
4950 : 4 : int ctx1;
4951 : : int rc;
4952 : :
4953 : 4 : ut_init_bdev(NULL);
4954 : 4 : bdev = allocate_bdev("bdev0");
4955 : :
4956 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
4957 : 4 : CU_ASSERT(rc == 0);
4958 : 4 : CU_ASSERT(desc != NULL);
4959 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4960 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4961 : 4 : CU_ASSERT(io_ch != NULL);
4962 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
4963 : :
4964 : 4 : g_io_done = false;
4965 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
4966 : 4 : CU_ASSERT(rc == 0);
4967 : :
4968 : 4 : g_lock_lba_range_done = false;
4969 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
4970 : 4 : CU_ASSERT(rc == 0);
4971 : 4 : poll_threads();
4972 : :
4973 : : /* The lock should immediately become valid, since there are no outstanding
4974 : : * write I/O.
4975 : : */
4976 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
4977 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
4978 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
4979 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
4980 : 4 : CU_ASSERT(range->offset == 20);
4981 : 4 : CU_ASSERT(range->length == 10);
4982 : 4 : CU_ASSERT(range->owner_ch == channel);
4983 : 4 : CU_ASSERT(range->locked_ctx == &ctx1);
4984 : :
4985 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
4986 : 4 : CU_ASSERT(rc == 0);
4987 : 4 : stub_complete_io(1);
4988 : 4 : spdk_delay_us(100);
4989 : 4 : poll_threads();
4990 : :
4991 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
4992 : :
4993 : : /* Now try again, but with a write I/O. */
4994 : 4 : g_io_done = false;
4995 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
4996 : 4 : CU_ASSERT(rc == 0);
4997 : :
4998 : 4 : g_lock_lba_range_done = false;
4999 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5000 : 4 : CU_ASSERT(rc == 0);
5001 : 4 : poll_threads();
5002 : :
5003 : : /* The lock should not be fully valid yet, since a write I/O is outstanding.
5004 : : * But note that the range should be on the channel's locked_list, to make sure no
5005 : : * new write I/O are started.
5006 : : */
5007 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5008 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5009 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5010 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5011 : 4 : CU_ASSERT(range->offset == 20);
5012 : 4 : CU_ASSERT(range->length == 10);
5013 : :
5014 : : /* Complete the write I/O. This should make the lock valid (checked by confirming
5015 : : * our callback was invoked).
5016 : : */
5017 : 4 : stub_complete_io(1);
5018 : 4 : spdk_delay_us(100);
5019 : 4 : poll_threads();
5020 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5021 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5022 : :
5023 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
5024 : 4 : CU_ASSERT(rc == 0);
5025 : 4 : poll_threads();
5026 : :
5027 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5028 : :
5029 : 4 : spdk_put_io_channel(io_ch);
5030 : 4 : spdk_bdev_close(desc);
5031 : 4 : free_bdev(bdev);
5032 : 4 : ut_fini_bdev();
5033 : 4 : }
5034 : :
5035 : : static void
5036 : 4 : lock_lba_range_overlapped(void)
5037 : : {
5038 : : struct spdk_bdev *bdev;
5039 : 4 : struct spdk_bdev_desc *desc = NULL;
5040 : : struct spdk_io_channel *io_ch;
5041 : : struct spdk_bdev_channel *channel;
5042 : : struct lba_range *range;
5043 : 4 : int ctx1;
5044 : : int rc;
5045 : :
5046 : 4 : ut_init_bdev(NULL);
5047 : 4 : bdev = allocate_bdev("bdev0");
5048 : :
5049 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5050 : 4 : CU_ASSERT(rc == 0);
5051 : 4 : CU_ASSERT(desc != NULL);
5052 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5053 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5054 : 4 : CU_ASSERT(io_ch != NULL);
5055 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5056 : :
5057 : : /* Lock range 20-29. */
5058 : 4 : g_lock_lba_range_done = false;
5059 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5060 : 4 : CU_ASSERT(rc == 0);
5061 : 4 : poll_threads();
5062 : :
5063 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5064 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5065 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5066 : 4 : CU_ASSERT(range->offset == 20);
5067 : 4 : CU_ASSERT(range->length == 10);
5068 : :
5069 : : /* Try to lock range 25-39. It should not lock immediately, since it overlaps with
5070 : : * 20-29.
5071 : : */
5072 : 4 : g_lock_lba_range_done = false;
5073 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1);
5074 : 4 : CU_ASSERT(rc == 0);
5075 : 4 : poll_threads();
5076 : :
5077 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5078 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5079 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5080 : 4 : CU_ASSERT(range->offset == 25);
5081 : 4 : CU_ASSERT(range->length == 15);
5082 : :
5083 : : /* Unlock 20-29. This should result in range 25-39 now getting locked since it
5084 : : * no longer overlaps with an active lock.
5085 : : */
5086 : 4 : g_unlock_lba_range_done = false;
5087 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
5088 : 4 : CU_ASSERT(rc == 0);
5089 : 4 : poll_threads();
5090 : :
5091 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5092 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
5093 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5094 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5095 : 4 : CU_ASSERT(range->offset == 25);
5096 : 4 : CU_ASSERT(range->length == 15);
5097 : :
5098 : : /* Lock 40-59. This should immediately lock since it does not overlap with the
5099 : : * currently active 25-39 lock.
5100 : : */
5101 : 4 : g_lock_lba_range_done = false;
5102 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1);
5103 : 4 : CU_ASSERT(rc == 0);
5104 : 4 : poll_threads();
5105 : :
5106 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5107 : 4 : range = TAILQ_FIRST(&bdev->internal.locked_ranges);
5108 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5109 : 4 : range = TAILQ_NEXT(range, tailq);
5110 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5111 : 4 : CU_ASSERT(range->offset == 40);
5112 : 4 : CU_ASSERT(range->length == 20);
5113 : :
5114 : : /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */
5115 : 4 : g_lock_lba_range_done = false;
5116 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1);
5117 : 4 : CU_ASSERT(rc == 0);
5118 : 4 : poll_threads();
5119 : :
5120 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5121 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5122 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5123 : 4 : CU_ASSERT(range->offset == 35);
5124 : 4 : CU_ASSERT(range->length == 10);
5125 : :
5126 : : /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since
5127 : : * the 40-59 lock is still active.
5128 : : */
5129 : 4 : g_unlock_lba_range_done = false;
5130 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1);
5131 : 4 : CU_ASSERT(rc == 0);
5132 : 4 : poll_threads();
5133 : :
5134 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5135 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5136 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5137 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5138 : 4 : CU_ASSERT(range->offset == 35);
5139 : 4 : CU_ASSERT(range->length == 10);
5140 : :
5141 : : /* Unlock 40-59. This should result in 35-44 now getting locked, since there are
5142 : : * no longer any active overlapping locks.
5143 : : */
5144 : 4 : g_unlock_lba_range_done = false;
5145 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1);
5146 : 4 : CU_ASSERT(rc == 0);
5147 : 4 : poll_threads();
5148 : :
5149 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5150 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5151 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
5152 : 4 : range = TAILQ_FIRST(&bdev->internal.locked_ranges);
5153 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5154 : 4 : CU_ASSERT(range->offset == 35);
5155 : 4 : CU_ASSERT(range->length == 10);
5156 : :
5157 : : /* Finally, unlock 35-44. */
5158 : 4 : g_unlock_lba_range_done = false;
5159 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1);
5160 : 4 : CU_ASSERT(rc == 0);
5161 : 4 : poll_threads();
5162 : :
5163 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5164 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges));
5165 : :
5166 : 4 : spdk_put_io_channel(io_ch);
5167 : 4 : spdk_bdev_close(desc);
5168 : 4 : free_bdev(bdev);
5169 : 4 : ut_fini_bdev();
5170 : 4 : }
5171 : :
5172 : : static void
5173 : 12 : bdev_quiesce_done(void *ctx, int status)
5174 : : {
5175 : 12 : g_lock_lba_range_done = true;
5176 : 12 : }
5177 : :
5178 : : static void
5179 : 16 : bdev_unquiesce_done(void *ctx, int status)
5180 : : {
5181 : 16 : g_unlock_lba_range_done = true;
5182 : 16 : }
5183 : :
5184 : : static void
5185 : 4 : bdev_quiesce_done_unquiesce(void *ctx, int status)
5186 : : {
5187 : 4 : struct spdk_bdev *bdev = ctx;
5188 : : int rc;
5189 : :
5190 : 4 : g_lock_lba_range_done = true;
5191 : :
5192 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL);
5193 : 4 : CU_ASSERT(rc == 0);
5194 : 4 : }
5195 : :
5196 : : static void
5197 : 4 : bdev_quiesce(void)
5198 : : {
5199 : : struct spdk_bdev *bdev;
5200 : 4 : struct spdk_bdev_desc *desc = NULL;
5201 : : struct spdk_io_channel *io_ch;
5202 : : struct spdk_bdev_channel *channel;
5203 : : struct lba_range *range;
5204 : : struct spdk_bdev_io *bdev_io;
5205 : 4 : int ctx1;
5206 : : int rc;
5207 : :
5208 : 4 : ut_init_bdev(NULL);
5209 : 4 : bdev = allocate_bdev("bdev0");
5210 : :
5211 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5212 : 4 : CU_ASSERT(rc == 0);
5213 : 4 : CU_ASSERT(desc != NULL);
5214 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5215 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5216 : 4 : CU_ASSERT(io_ch != NULL);
5217 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5218 : :
5219 : 4 : g_lock_lba_range_done = false;
5220 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1);
5221 : 4 : CU_ASSERT(rc == 0);
5222 : 4 : poll_threads();
5223 : :
5224 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5225 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5226 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5227 : 4 : CU_ASSERT(range->offset == 0);
5228 : 4 : CU_ASSERT(range->length == bdev->blockcnt);
5229 : 4 : CU_ASSERT(range->owner_ch == NULL);
5230 : 4 : range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges);
5231 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5232 : 4 : CU_ASSERT(range->offset == 0);
5233 : 4 : CU_ASSERT(range->length == bdev->blockcnt);
5234 : 4 : CU_ASSERT(range->owner_ch == NULL);
5235 : :
5236 : 4 : g_unlock_lba_range_done = false;
5237 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1);
5238 : 4 : CU_ASSERT(rc == 0);
5239 : 4 : spdk_delay_us(100);
5240 : 4 : poll_threads();
5241 : :
5242 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5243 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5244 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5245 : :
5246 : 4 : g_lock_lba_range_done = false;
5247 : 4 : rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1);
5248 : 4 : CU_ASSERT(rc == 0);
5249 : 4 : poll_threads();
5250 : :
5251 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5252 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5253 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5254 : 4 : CU_ASSERT(range->offset == 20);
5255 : 4 : CU_ASSERT(range->length == 10);
5256 : 4 : CU_ASSERT(range->owner_ch == NULL);
5257 : 4 : range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges);
5258 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5259 : 4 : CU_ASSERT(range->offset == 20);
5260 : 4 : CU_ASSERT(range->length == 10);
5261 : 4 : CU_ASSERT(range->owner_ch == NULL);
5262 : :
5263 : : /* Unlocks must exactly match a lock. */
5264 : 4 : g_unlock_lba_range_done = false;
5265 : 4 : rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1);
5266 : 4 : CU_ASSERT(rc == -EINVAL);
5267 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == false);
5268 : :
5269 : 4 : rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1);
5270 : 4 : CU_ASSERT(rc == 0);
5271 : 4 : spdk_delay_us(100);
5272 : 4 : poll_threads();
5273 : :
5274 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5275 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5276 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5277 : :
5278 : : /* Test unquiesce from quiesce cb */
5279 : 4 : g_lock_lba_range_done = false;
5280 : 4 : g_unlock_lba_range_done = false;
5281 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev);
5282 : 4 : CU_ASSERT(rc == 0);
5283 : 4 : poll_threads();
5284 : :
5285 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5286 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5287 : :
5288 : : /* Test quiesce with read I/O */
5289 : 4 : g_lock_lba_range_done = false;
5290 : 4 : g_unlock_lba_range_done = false;
5291 : 4 : g_io_done = false;
5292 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1);
5293 : 4 : CU_ASSERT(rc == 0);
5294 : :
5295 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1);
5296 : 4 : CU_ASSERT(rc == 0);
5297 : 4 : poll_threads();
5298 : :
5299 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5300 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5301 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5302 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5303 : :
5304 : 4 : stub_complete_io(1);
5305 : 4 : spdk_delay_us(100);
5306 : 4 : poll_threads();
5307 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5308 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5309 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->io_locked));
5310 : :
5311 : 4 : g_io_done = false;
5312 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1);
5313 : 4 : CU_ASSERT(rc == 0);
5314 : :
5315 : 4 : bdev_io = TAILQ_FIRST(&channel->io_locked);
5316 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
5317 : 4 : CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20);
5318 : 4 : CU_ASSERT(bdev_io->u.bdev.num_blocks == 1);
5319 : :
5320 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1);
5321 : 4 : CU_ASSERT(rc == 0);
5322 : 4 : spdk_delay_us(100);
5323 : 4 : poll_threads();
5324 : :
5325 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5326 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5327 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5328 : :
5329 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->io_locked));
5330 : 4 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
5331 : 4 : poll_threads();
5332 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5333 : :
5334 : 4 : spdk_put_io_channel(io_ch);
5335 : 4 : spdk_bdev_close(desc);
5336 : 4 : free_bdev(bdev);
5337 : 4 : ut_fini_bdev();
5338 : 4 : }
5339 : :
5340 : : static void
5341 : 24 : abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
5342 : : {
5343 : 24 : g_abort_done = true;
5344 : 24 : g_abort_status = bdev_io->internal.status;
5345 : 24 : spdk_bdev_free_io(bdev_io);
5346 : 24 : }
5347 : :
5348 : : static void
5349 : 4 : bdev_io_abort(void)
5350 : : {
5351 : : struct spdk_bdev *bdev;
5352 : 4 : struct spdk_bdev_desc *desc = NULL;
5353 : : struct spdk_io_channel *io_ch;
5354 : : struct spdk_bdev_channel *channel;
5355 : : struct spdk_bdev_mgmt_channel *mgmt_ch;
5356 : 4 : struct spdk_bdev_opts bdev_opts = {};
5357 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
5358 : 4 : uint64_t io_ctx1 = 0, io_ctx2 = 0, i;
5359 : : int rc;
5360 : :
5361 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5362 : 4 : bdev_opts.bdev_io_pool_size = 7;
5363 : 4 : bdev_opts.bdev_io_cache_size = 2;
5364 : 4 : ut_init_bdev(&bdev_opts);
5365 : :
5366 : 4 : bdev = allocate_bdev("bdev0");
5367 : :
5368 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5369 : 4 : CU_ASSERT(rc == 0);
5370 : 4 : CU_ASSERT(desc != NULL);
5371 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5372 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5373 : 4 : CU_ASSERT(io_ch != NULL);
5374 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5375 : 4 : mgmt_ch = channel->shared_resource->mgmt_ch;
5376 : :
5377 : 4 : g_abort_done = false;
5378 : :
5379 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false);
5380 : :
5381 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5382 : 4 : CU_ASSERT(rc == -ENOTSUP);
5383 : :
5384 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true);
5385 : :
5386 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL);
5387 : 4 : CU_ASSERT(rc == 0);
5388 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5389 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED);
5390 : :
5391 : : /* Test the case that the target I/O was successfully aborted. */
5392 : 4 : g_io_done = false;
5393 : :
5394 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
5395 : 4 : CU_ASSERT(rc == 0);
5396 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5397 : :
5398 : 4 : g_abort_done = false;
5399 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5400 : :
5401 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5402 : 4 : CU_ASSERT(rc == 0);
5403 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5404 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5405 : 4 : stub_complete_io(1);
5406 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5407 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5408 : :
5409 : : /* Test the case that the target I/O was not aborted because it completed
5410 : : * in the middle of execution of the abort.
5411 : : */
5412 : 4 : g_io_done = false;
5413 : :
5414 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
5415 : 4 : CU_ASSERT(rc == 0);
5416 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5417 : :
5418 : 4 : g_abort_done = false;
5419 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
5420 : :
5421 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5422 : 4 : CU_ASSERT(rc == 0);
5423 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5424 : :
5425 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5426 : 4 : stub_complete_io(1);
5427 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5428 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5429 : :
5430 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
5431 : 4 : stub_complete_io(1);
5432 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5433 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5434 : :
5435 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5436 : :
5437 : 4 : bdev->optimal_io_boundary = 16;
5438 : 4 : bdev->split_on_optimal_io_boundary = true;
5439 : :
5440 : : /* Test that a single-vector command which is split is aborted correctly.
5441 : : * Offset 14, length 8, payload 0xF000
5442 : : * Child - Offset 14, length 2, payload 0xF000
5443 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
5444 : : */
5445 : 4 : g_io_done = false;
5446 : :
5447 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1);
5448 : 4 : CU_ASSERT(rc == 0);
5449 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5450 : :
5451 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5452 : :
5453 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5454 : :
5455 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5456 : 4 : CU_ASSERT(rc == 0);
5457 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5458 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5459 : 4 : stub_complete_io(2);
5460 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5461 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5462 : :
5463 : : /* Test that a multi-vector command that needs to be split by strip and then
5464 : : * needs to be split is aborted correctly. Abort is requested before the second
5465 : : * child I/O was submitted. The parent I/O should complete with failure without
5466 : : * submitting the second child I/O.
5467 : : */
5468 [ + + ]: 260 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) {
5469 : 256 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
5470 : 256 : iov[i].iov_len = 512;
5471 : : }
5472 : :
5473 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
5474 : 4 : g_io_done = false;
5475 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0,
5476 : : SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1);
5477 : 4 : CU_ASSERT(rc == 0);
5478 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5479 : :
5480 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5481 : :
5482 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5483 : :
5484 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5485 : 4 : CU_ASSERT(rc == 0);
5486 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5487 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5488 : 4 : stub_complete_io(1);
5489 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5490 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5491 : :
5492 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5493 : :
5494 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5495 : :
5496 : 4 : bdev->optimal_io_boundary = 16;
5497 : 4 : g_io_done = false;
5498 : :
5499 : : /* Test that a ingle-vector command which is split is aborted correctly.
5500 : : * Differently from the above, the child abort request will be submitted
5501 : : * sequentially due to the capacity of spdk_bdev_io.
5502 : : */
5503 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1);
5504 : 4 : CU_ASSERT(rc == 0);
5505 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5506 : :
5507 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
5508 : :
5509 : 4 : g_abort_done = false;
5510 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5511 : :
5512 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5513 : 4 : CU_ASSERT(rc == 0);
5514 : 4 : CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
5515 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
5516 : :
5517 : 4 : stub_complete_io(1);
5518 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5519 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5520 : 4 : stub_complete_io(3);
5521 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5522 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5523 : :
5524 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5525 : :
5526 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5527 : :
5528 : 4 : spdk_put_io_channel(io_ch);
5529 : 4 : spdk_bdev_close(desc);
5530 : 4 : free_bdev(bdev);
5531 : 4 : ut_fini_bdev();
5532 : 4 : }
5533 : :
5534 : : static void
5535 : 4 : bdev_unmap(void)
5536 : : {
5537 : : struct spdk_bdev *bdev;
5538 : 4 : struct spdk_bdev_desc *desc = NULL;
5539 : : struct spdk_io_channel *ioch;
5540 : : struct spdk_bdev_channel *bdev_ch;
5541 : : struct ut_expected_io *expected_io;
5542 : 4 : struct spdk_bdev_opts bdev_opts = {};
5543 : : uint32_t i, num_outstanding;
5544 : : uint64_t offset, num_blocks, max_unmap_blocks, num_children;
5545 : : int rc;
5546 : :
5547 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5548 : 4 : bdev_opts.bdev_io_pool_size = 512;
5549 : 4 : bdev_opts.bdev_io_cache_size = 64;
5550 : 4 : ut_init_bdev(&bdev_opts);
5551 : :
5552 : 4 : bdev = allocate_bdev("bdev");
5553 : :
5554 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
5555 : 4 : CU_ASSERT_EQUAL(rc, 0);
5556 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5557 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5558 : 4 : ioch = spdk_bdev_get_io_channel(desc);
5559 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
5560 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
5561 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
5562 : :
5563 : 4 : fn_table.submit_request = stub_submit_request;
5564 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5565 : :
5566 : : /* Case 1: First test the request won't be split */
5567 : 4 : num_blocks = 32;
5568 : :
5569 : 4 : g_io_done = false;
5570 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0);
5571 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5572 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5573 : 4 : CU_ASSERT_EQUAL(rc, 0);
5574 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5575 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5576 : 4 : stub_complete_io(1);
5577 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5578 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5579 : :
5580 : : /* Case 2: Test the split with 2 children requests */
5581 : 4 : bdev->max_unmap = 8;
5582 : 4 : bdev->max_unmap_segments = 2;
5583 : 4 : max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments;
5584 : 4 : num_blocks = max_unmap_blocks * 2;
5585 : 4 : offset = 0;
5586 : :
5587 : 4 : g_io_done = false;
5588 [ + + ]: 12 : for (i = 0; i < 2; i++) {
5589 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0);
5590 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5591 : 8 : offset += max_unmap_blocks;
5592 : : }
5593 : :
5594 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5595 : 4 : CU_ASSERT_EQUAL(rc, 0);
5596 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5597 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5598 : 4 : stub_complete_io(2);
5599 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5600 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5601 : :
5602 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
5603 : 4 : num_children = 15;
5604 : 4 : num_blocks = max_unmap_blocks * num_children;
5605 : 4 : g_io_done = false;
5606 : 4 : offset = 0;
5607 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
5608 : 60 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0);
5609 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5610 : 60 : offset += max_unmap_blocks;
5611 : : }
5612 : :
5613 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5614 : 4 : CU_ASSERT_EQUAL(rc, 0);
5615 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5616 : :
5617 [ + + ]: 12 : while (num_children > 0) {
5618 : 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS);
5619 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
5620 : 8 : stub_complete_io(num_outstanding);
5621 : 8 : num_children -= num_outstanding;
5622 : : }
5623 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5624 : :
5625 : 4 : spdk_put_io_channel(ioch);
5626 : 4 : spdk_bdev_close(desc);
5627 : 4 : free_bdev(bdev);
5628 : 4 : ut_fini_bdev();
5629 : 4 : }
5630 : :
5631 : : static void
5632 : 4 : bdev_write_zeroes_split_test(void)
5633 : : {
5634 : : struct spdk_bdev *bdev;
5635 : 4 : struct spdk_bdev_desc *desc = NULL;
5636 : : struct spdk_io_channel *ioch;
5637 : : struct spdk_bdev_channel *bdev_ch;
5638 : : struct ut_expected_io *expected_io;
5639 : 4 : struct spdk_bdev_opts bdev_opts = {};
5640 : : uint32_t i, num_outstanding;
5641 : : uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children;
5642 : : int rc;
5643 : :
5644 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5645 : 4 : bdev_opts.bdev_io_pool_size = 512;
5646 : 4 : bdev_opts.bdev_io_cache_size = 64;
5647 : 4 : ut_init_bdev(&bdev_opts);
5648 : :
5649 : 4 : bdev = allocate_bdev("bdev");
5650 : :
5651 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
5652 : 4 : CU_ASSERT_EQUAL(rc, 0);
5653 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5654 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5655 : 4 : ioch = spdk_bdev_get_io_channel(desc);
5656 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
5657 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
5658 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
5659 : :
5660 : 4 : fn_table.submit_request = stub_submit_request;
5661 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5662 : :
5663 : : /* Case 1: First test the request won't be split */
5664 : 4 : num_blocks = 32;
5665 : :
5666 : 4 : g_io_done = false;
5667 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
5668 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5669 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5670 : 4 : CU_ASSERT_EQUAL(rc, 0);
5671 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5672 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5673 : 4 : stub_complete_io(1);
5674 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5675 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5676 : :
5677 : : /* Case 2: Test the split with 2 children requests */
5678 : 4 : max_write_zeroes_blocks = 8;
5679 : 4 : bdev->max_write_zeroes = max_write_zeroes_blocks;
5680 : 4 : num_blocks = max_write_zeroes_blocks * 2;
5681 : 4 : offset = 0;
5682 : :
5683 : 4 : g_io_done = false;
5684 [ + + ]: 12 : for (i = 0; i < 2; i++) {
5685 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks,
5686 : : 0);
5687 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5688 : 8 : offset += max_write_zeroes_blocks;
5689 : : }
5690 : :
5691 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5692 : 4 : CU_ASSERT_EQUAL(rc, 0);
5693 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5694 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5695 : 4 : stub_complete_io(2);
5696 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5697 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5698 : :
5699 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
5700 : 4 : num_children = 15;
5701 : 4 : num_blocks = max_write_zeroes_blocks * num_children;
5702 : 4 : g_io_done = false;
5703 : 4 : offset = 0;
5704 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
5705 : 60 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks,
5706 : : 0);
5707 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5708 : 60 : offset += max_write_zeroes_blocks;
5709 : : }
5710 : :
5711 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5712 : 4 : CU_ASSERT_EQUAL(rc, 0);
5713 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5714 : :
5715 [ + + ]: 12 : while (num_children > 0) {
5716 : 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS);
5717 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
5718 : 8 : stub_complete_io(num_outstanding);
5719 : 8 : num_children -= num_outstanding;
5720 : : }
5721 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5722 : :
5723 : 4 : spdk_put_io_channel(ioch);
5724 : 4 : spdk_bdev_close(desc);
5725 : 4 : free_bdev(bdev);
5726 : 4 : ut_fini_bdev();
5727 : 4 : }
5728 : :
5729 : : static void
5730 : 4 : bdev_set_options_test(void)
5731 : : {
5732 : 4 : struct spdk_bdev_opts bdev_opts = {};
5733 : : int rc;
5734 : :
5735 : : /* Case1: Do not set opts_size */
5736 : 4 : rc = spdk_bdev_set_opts(&bdev_opts);
5737 : 4 : CU_ASSERT(rc == -1);
5738 : 4 : }
5739 : :
5740 : : static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d;
5741 : :
5742 : : static int
5743 : 12 : test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains,
5744 : : int array_size)
5745 : : {
5746 [ + + + + ]: 12 : if (array_size > 0 && domains) {
5747 : 4 : domains[0] = g_bdev_memory_domain;
5748 : : }
5749 : :
5750 : 12 : return 1;
5751 : : }
5752 : :
5753 : : static void
5754 : 4 : bdev_get_memory_domains(void)
5755 : : {
5756 : 4 : struct spdk_bdev_fn_table fn_table = {
5757 : : .get_memory_domains = test_bdev_get_supported_dma_device_types_op
5758 : : };
5759 : 4 : struct spdk_bdev bdev = { .fn_table = &fn_table };
5760 : 4 : struct spdk_memory_domain *domains[2] = {};
5761 : : int rc;
5762 : :
5763 : : /* bdev is NULL */
5764 : 4 : rc = spdk_bdev_get_memory_domains(NULL, domains, 2);
5765 : 4 : CU_ASSERT(rc == -EINVAL);
5766 : :
5767 : : /* domains is NULL */
5768 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2);
5769 : 4 : CU_ASSERT(rc == 1);
5770 : :
5771 : : /* array size is 0 */
5772 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 0);
5773 : 4 : CU_ASSERT(rc == 1);
5774 : :
5775 : : /* get_supported_dma_device_types op is set */
5776 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 2);
5777 : 4 : CU_ASSERT(rc == 1);
5778 : 4 : CU_ASSERT(domains[0] == g_bdev_memory_domain);
5779 : :
5780 : : /* get_supported_dma_device_types op is not set */
5781 : 4 : fn_table.get_memory_domains = NULL;
5782 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 2);
5783 : 4 : CU_ASSERT(rc == 0);
5784 : 4 : }
5785 : :
5786 : : static void
5787 : 8 : _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts)
5788 : : {
5789 : : struct spdk_bdev *bdev;
5790 : 8 : struct spdk_bdev_desc *desc = NULL;
5791 : : struct spdk_io_channel *io_ch;
5792 : 8 : char io_buf[512];
5793 : 8 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5794 : : struct ut_expected_io *expected_io;
5795 : : int rc;
5796 : :
5797 : 8 : ut_init_bdev(NULL);
5798 : :
5799 : 8 : bdev = allocate_bdev("bdev0");
5800 : 8 : bdev->md_interleave = false;
5801 : 8 : bdev->md_len = 8;
5802 : :
5803 : 8 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5804 : 8 : CU_ASSERT(rc == 0);
5805 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5806 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5807 : 8 : io_ch = spdk_bdev_get_io_channel(desc);
5808 : 8 : CU_ASSERT(io_ch != NULL);
5809 : :
5810 : : /* read */
5811 : 8 : g_io_done = false;
5812 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
5813 [ + + ]: 8 : if (ext_io_opts) {
5814 : 4 : expected_io->md_buf = ext_io_opts->metadata;
5815 : : }
5816 : 8 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
5817 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5818 : :
5819 : 8 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts);
5820 : :
5821 : 8 : CU_ASSERT(rc == 0);
5822 [ - + ]: 8 : CU_ASSERT(g_io_done == false);
5823 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5824 : 8 : stub_complete_io(1);
5825 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
5826 : :
5827 : : /* write */
5828 : 8 : g_io_done = false;
5829 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
5830 [ + + ]: 8 : if (ext_io_opts) {
5831 : 4 : expected_io->md_buf = ext_io_opts->metadata;
5832 : : }
5833 : 8 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
5834 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5835 : :
5836 : 8 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts);
5837 : :
5838 : 8 : CU_ASSERT(rc == 0);
5839 [ - + ]: 8 : CU_ASSERT(g_io_done == false);
5840 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5841 : 8 : stub_complete_io(1);
5842 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
5843 : :
5844 : 8 : spdk_put_io_channel(io_ch);
5845 : 8 : spdk_bdev_close(desc);
5846 : 8 : free_bdev(bdev);
5847 : 8 : ut_fini_bdev();
5848 : :
5849 : 8 : }
5850 : :
5851 : : static void
5852 : 4 : bdev_io_ext(void)
5853 : : {
5854 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5855 : : .metadata = (void *)0xFF000000,
5856 : : .size = sizeof(ext_io_opts),
5857 : : .dif_check_flags_exclude_mask = 0
5858 : : };
5859 : :
5860 : 4 : _bdev_io_ext(&ext_io_opts);
5861 : 4 : }
5862 : :
5863 : : static void
5864 : 4 : bdev_io_ext_no_opts(void)
5865 : : {
5866 : 4 : _bdev_io_ext(NULL);
5867 : 4 : }
5868 : :
5869 : : static void
5870 : 4 : bdev_io_ext_invalid_opts(void)
5871 : : {
5872 : : struct spdk_bdev *bdev;
5873 : 4 : struct spdk_bdev_desc *desc = NULL;
5874 : : struct spdk_io_channel *io_ch;
5875 : 4 : char io_buf[512];
5876 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5877 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5878 : : .metadata = (void *)0xFF000000,
5879 : : .size = sizeof(ext_io_opts),
5880 : : .dif_check_flags_exclude_mask = 0
5881 : : };
5882 : : int rc;
5883 : :
5884 : 4 : ut_init_bdev(NULL);
5885 : :
5886 : 4 : bdev = allocate_bdev("bdev0");
5887 : 4 : bdev->md_interleave = false;
5888 : 4 : bdev->md_len = 8;
5889 : :
5890 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5891 : 4 : CU_ASSERT(rc == 0);
5892 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5893 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5894 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5895 : 4 : CU_ASSERT(io_ch != NULL);
5896 : :
5897 : : /* Test invalid ext_opts size */
5898 : 4 : ext_io_opts.size = 0;
5899 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5900 : 4 : CU_ASSERT(rc == -EINVAL);
5901 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5902 : 4 : CU_ASSERT(rc == -EINVAL);
5903 : :
5904 : 4 : ext_io_opts.size = sizeof(ext_io_opts) * 2;
5905 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5906 : 4 : CU_ASSERT(rc == -EINVAL);
5907 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5908 : 4 : CU_ASSERT(rc == -EINVAL);
5909 : :
5910 : 4 : ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) +
5911 : : sizeof(ext_io_opts.metadata) - 1;
5912 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5913 : 4 : CU_ASSERT(rc == -EINVAL);
5914 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5915 : 4 : CU_ASSERT(rc == -EINVAL);
5916 : :
5917 : 4 : spdk_put_io_channel(io_ch);
5918 : 4 : spdk_bdev_close(desc);
5919 : 4 : free_bdev(bdev);
5920 : 4 : ut_fini_bdev();
5921 : 4 : }
5922 : :
5923 : : static void
5924 : 4 : bdev_io_ext_split(void)
5925 : : {
5926 : : struct spdk_bdev *bdev;
5927 : 4 : struct spdk_bdev_desc *desc = NULL;
5928 : : struct spdk_io_channel *io_ch;
5929 : 4 : char io_buf[512];
5930 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5931 : : struct ut_expected_io *expected_io;
5932 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5933 : : .metadata = (void *)0xFF000000,
5934 : : .size = sizeof(ext_io_opts),
5935 : : .dif_check_flags_exclude_mask = 0
5936 : : };
5937 : : int rc;
5938 : :
5939 : 4 : ut_init_bdev(NULL);
5940 : :
5941 : 4 : bdev = allocate_bdev("bdev0");
5942 : 4 : bdev->md_interleave = false;
5943 : 4 : bdev->md_len = 8;
5944 : :
5945 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5946 : 4 : CU_ASSERT(rc == 0);
5947 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5948 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5949 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5950 : 4 : CU_ASSERT(io_ch != NULL);
5951 : :
5952 : : /* Check that IO request with ext_opts and metadata is split correctly
5953 : : * Offset 14, length 8, payload 0xF000
5954 : : * Child - Offset 14, length 2, payload 0xF000
5955 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
5956 : : */
5957 : 4 : bdev->optimal_io_boundary = 16;
5958 : 4 : bdev->split_on_optimal_io_boundary = true;
5959 : 4 : bdev->md_interleave = false;
5960 : 4 : bdev->md_len = 8;
5961 : :
5962 : 4 : iov.iov_base = (void *)0xF000;
5963 : 4 : iov.iov_len = 4096;
5964 : 4 : memset(&ext_io_opts, 0, sizeof(ext_io_opts));
5965 : 4 : ext_io_opts.metadata = (void *)0xFF000000;
5966 : 4 : ext_io_opts.size = sizeof(ext_io_opts);
5967 : 4 : g_io_done = false;
5968 : :
5969 : : /* read */
5970 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
5971 : 4 : expected_io->md_buf = ext_io_opts.metadata;
5972 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
5973 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5974 : :
5975 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
5976 : 4 : expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
5977 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
5978 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5979 : :
5980 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts);
5981 : 4 : CU_ASSERT(rc == 0);
5982 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5983 : :
5984 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5985 : 4 : stub_complete_io(2);
5986 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5987 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5988 : :
5989 : : /* write */
5990 : 4 : g_io_done = false;
5991 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
5992 : 4 : expected_io->md_buf = ext_io_opts.metadata;
5993 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
5994 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5995 : :
5996 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1);
5997 : 4 : expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
5998 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
5999 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6000 : :
6001 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts);
6002 : 4 : CU_ASSERT(rc == 0);
6003 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6004 : :
6005 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6006 : 4 : stub_complete_io(2);
6007 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6008 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6009 : :
6010 : 4 : spdk_put_io_channel(io_ch);
6011 : 4 : spdk_bdev_close(desc);
6012 : 4 : free_bdev(bdev);
6013 : 4 : ut_fini_bdev();
6014 : 4 : }
6015 : :
6016 : : static void
6017 : 4 : bdev_io_ext_bounce_buffer(void)
6018 : : {
6019 : : struct spdk_bdev *bdev;
6020 : 4 : struct spdk_bdev_desc *desc = NULL;
6021 : : struct spdk_io_channel *io_ch;
6022 : 4 : char io_buf[512];
6023 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
6024 : : struct ut_expected_io *expected_io, *aux_io;
6025 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
6026 : : .metadata = (void *)0xFF000000,
6027 : : .size = sizeof(ext_io_opts),
6028 : : .dif_check_flags_exclude_mask = 0
6029 : : };
6030 : : int rc;
6031 : :
6032 : 4 : ut_init_bdev(NULL);
6033 : :
6034 : 4 : bdev = allocate_bdev("bdev0");
6035 : 4 : bdev->md_interleave = false;
6036 : 4 : bdev->md_len = 8;
6037 : :
6038 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6039 : 4 : CU_ASSERT(rc == 0);
6040 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6041 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6042 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6043 : 4 : CU_ASSERT(io_ch != NULL);
6044 : :
6045 : : /* Verify data pull/push
6046 : : * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */
6047 : 4 : ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef;
6048 : :
6049 : : /* read */
6050 : 4 : g_io_done = false;
6051 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
6052 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6053 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6054 : :
6055 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6056 : :
6057 : 4 : CU_ASSERT(rc == 0);
6058 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6059 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6060 : 4 : stub_complete_io(1);
6061 [ - + ]: 4 : CU_ASSERT(g_memory_domain_push_data_called == true);
6062 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6063 : :
6064 : : /* write */
6065 : 4 : g_io_done = false;
6066 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6067 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6068 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6069 : :
6070 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6071 : :
6072 : 4 : CU_ASSERT(rc == 0);
6073 [ - + ]: 4 : CU_ASSERT(g_memory_domain_pull_data_called == true);
6074 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6075 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6076 : 4 : stub_complete_io(1);
6077 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6078 : :
6079 : : /* Verify the request is queued after receiving ENOMEM from pull */
6080 : 4 : g_io_done = false;
6081 : 4 : aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6082 : 4 : ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len);
6083 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link);
6084 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL);
6085 : 4 : CU_ASSERT(rc == 0);
6086 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6087 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6088 : :
6089 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6090 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6091 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6092 : :
6093 : 4 : MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM);
6094 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6095 : 4 : CU_ASSERT(rc == 0);
6096 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6097 : : /* The second IO has been queued */
6098 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6099 : :
6100 [ - - - + ]: 4 : MOCK_CLEAR(spdk_memory_domain_pull_data);
6101 : 4 : g_memory_domain_pull_data_called = false;
6102 : 4 : stub_complete_io(1);
6103 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6104 [ - + ]: 4 : CU_ASSERT(g_memory_domain_pull_data_called == true);
6105 : : /* The second IO should be submitted now */
6106 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6107 : 4 : g_io_done = false;
6108 : 4 : stub_complete_io(1);
6109 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6110 : :
6111 : : /* Verify the request is queued after receiving ENOMEM from push */
6112 : 4 : g_io_done = false;
6113 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
6114 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6115 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6116 : :
6117 : 4 : MOCK_SET(spdk_memory_domain_push_data, -ENOMEM);
6118 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6119 : 4 : CU_ASSERT(rc == 0);
6120 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6121 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6122 : :
6123 : 4 : aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6124 : 4 : ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len);
6125 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link);
6126 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL);
6127 : 4 : CU_ASSERT(rc == 0);
6128 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6129 : :
6130 : 4 : stub_complete_io(1);
6131 : : /* The IO isn't done yet, it's still waiting on push */
6132 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6133 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6134 [ - - - + ]: 4 : MOCK_CLEAR(spdk_memory_domain_push_data);
6135 : 4 : g_memory_domain_push_data_called = false;
6136 : : /* Completing the second IO should also trigger push on the first one */
6137 : 4 : stub_complete_io(1);
6138 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6139 [ - + ]: 4 : CU_ASSERT(g_memory_domain_push_data_called == true);
6140 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6141 : :
6142 : 4 : spdk_put_io_channel(io_ch);
6143 : 4 : spdk_bdev_close(desc);
6144 : 4 : free_bdev(bdev);
6145 : 4 : ut_fini_bdev();
6146 : 4 : }
6147 : :
6148 : : static void
6149 : 4 : bdev_register_uuid_alias(void)
6150 : : {
6151 : : struct spdk_bdev *bdev, *second;
6152 : 4 : char uuid[SPDK_UUID_STRING_LEN];
6153 : : int rc;
6154 : :
6155 : 4 : ut_init_bdev(NULL);
6156 : 4 : bdev = allocate_bdev("bdev0");
6157 : :
6158 : : /* Make sure an UUID was generated */
6159 : 4 : CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid));
6160 : :
6161 : : /* Check that an UUID alias was registered */
6162 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6163 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6164 : :
6165 : : /* Unregister the bdev */
6166 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6167 : 4 : poll_threads();
6168 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6169 : :
6170 : : /* Check the same, but this time register the bdev with non-zero UUID */
6171 : 4 : rc = spdk_bdev_register(bdev);
6172 : 4 : CU_ASSERT_EQUAL(rc, 0);
6173 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6174 : :
6175 : : /* Unregister the bdev */
6176 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6177 : 4 : poll_threads();
6178 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6179 : :
6180 : : /* Regiser the bdev using UUID as the name */
6181 : 4 : bdev->name = uuid;
6182 : 4 : rc = spdk_bdev_register(bdev);
6183 : 4 : CU_ASSERT_EQUAL(rc, 0);
6184 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6185 : :
6186 : : /* Unregister the bdev */
6187 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6188 : 4 : poll_threads();
6189 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6190 : :
6191 : : /* Check that it's not possible to register two bdevs with the same UUIDs */
6192 : 4 : bdev->name = "bdev0";
6193 : 4 : second = allocate_bdev("bdev1");
6194 : 4 : spdk_uuid_copy(&bdev->uuid, &second->uuid);
6195 : 4 : rc = spdk_bdev_register(bdev);
6196 : 4 : CU_ASSERT_EQUAL(rc, -EEXIST);
6197 : :
6198 : : /* Regenerate the UUID and re-check */
6199 : 4 : spdk_uuid_generate(&bdev->uuid);
6200 : 4 : rc = spdk_bdev_register(bdev);
6201 : 4 : CU_ASSERT_EQUAL(rc, 0);
6202 : :
6203 : : /* And check that both bdevs can be retrieved through their UUIDs */
6204 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6205 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6206 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid);
6207 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second);
6208 : :
6209 : 4 : free_bdev(second);
6210 : 4 : free_bdev(bdev);
6211 : 4 : ut_fini_bdev();
6212 : 4 : }
6213 : :
6214 : : static void
6215 : 4 : bdev_unregister_by_name(void)
6216 : : {
6217 : : struct spdk_bdev *bdev;
6218 : : int rc;
6219 : :
6220 : 4 : bdev = allocate_bdev("bdev");
6221 : :
6222 : 4 : g_event_type1 = 0xFF;
6223 : 4 : g_unregister_arg = NULL;
6224 : 4 : g_unregister_rc = -1;
6225 : :
6226 : 4 : rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6227 : 4 : CU_ASSERT(rc == -ENODEV);
6228 : :
6229 : 4 : rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6230 : 4 : CU_ASSERT(rc == -ENODEV);
6231 : :
6232 : 4 : rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6233 : 4 : CU_ASSERT(rc == 0);
6234 : :
6235 : : /* Check that unregister callback is delayed */
6236 : 4 : CU_ASSERT(g_unregister_arg == NULL);
6237 : 4 : CU_ASSERT(g_unregister_rc == -1);
6238 : :
6239 : 4 : poll_threads();
6240 : :
6241 : : /* Event callback shall not be issued because device was closed */
6242 : 4 : CU_ASSERT(g_event_type1 == 0xFF);
6243 : : /* Unregister callback is issued */
6244 : 4 : CU_ASSERT(g_unregister_arg == (void *)0x12345678);
6245 : 4 : CU_ASSERT(g_unregister_rc == 0);
6246 : :
6247 : 4 : free_bdev(bdev);
6248 : 4 : }
6249 : :
6250 : : static int
6251 : 44 : count_bdevs(void *ctx, struct spdk_bdev *bdev)
6252 : : {
6253 : 44 : int *count = ctx;
6254 : :
6255 : 44 : (*count)++;
6256 : :
6257 : 44 : return 0;
6258 : : }
6259 : :
6260 : : static void
6261 : 4 : for_each_bdev_test(void)
6262 : : {
6263 : : struct spdk_bdev *bdev[8];
6264 : 4 : int rc, count;
6265 : :
6266 : 4 : bdev[0] = allocate_bdev("bdev0");
6267 : 4 : bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING;
6268 : :
6269 : 4 : bdev[1] = allocate_bdev("bdev1");
6270 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
6271 : 4 : CU_ASSERT(rc == 0);
6272 : :
6273 : 4 : bdev[2] = allocate_bdev("bdev2");
6274 : :
6275 : 4 : bdev[3] = allocate_bdev("bdev3");
6276 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
6277 : 4 : CU_ASSERT(rc == 0);
6278 : :
6279 : 4 : bdev[4] = allocate_bdev("bdev4");
6280 : :
6281 : 4 : bdev[5] = allocate_bdev("bdev5");
6282 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
6283 : 4 : CU_ASSERT(rc == 0);
6284 : :
6285 : 4 : bdev[6] = allocate_bdev("bdev6");
6286 : :
6287 : 4 : bdev[7] = allocate_bdev("bdev7");
6288 : :
6289 : 4 : count = 0;
6290 : 4 : rc = spdk_for_each_bdev(&count, count_bdevs);
6291 : 4 : CU_ASSERT(rc == 0);
6292 : 4 : CU_ASSERT(count == 7);
6293 : :
6294 : 4 : count = 0;
6295 : 4 : rc = spdk_for_each_bdev_leaf(&count, count_bdevs);
6296 : 4 : CU_ASSERT(rc == 0);
6297 : 4 : CU_ASSERT(count == 4);
6298 : :
6299 : 4 : bdev[0]->internal.status = SPDK_BDEV_STATUS_READY;
6300 : 4 : free_bdev(bdev[0]);
6301 : 4 : free_bdev(bdev[1]);
6302 : 4 : free_bdev(bdev[2]);
6303 : 4 : free_bdev(bdev[3]);
6304 : 4 : free_bdev(bdev[4]);
6305 : 4 : free_bdev(bdev[5]);
6306 : 4 : free_bdev(bdev[6]);
6307 : 4 : free_bdev(bdev[7]);
6308 : 4 : }
6309 : :
6310 : : static void
6311 : 4 : bdev_seek_test(void)
6312 : : {
6313 : : struct spdk_bdev *bdev;
6314 : 4 : struct spdk_bdev_desc *desc = NULL;
6315 : : struct spdk_io_channel *io_ch;
6316 : : int rc;
6317 : :
6318 : 4 : ut_init_bdev(NULL);
6319 : 4 : poll_threads();
6320 : :
6321 : 4 : bdev = allocate_bdev("bdev0");
6322 : :
6323 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6324 : 4 : CU_ASSERT(rc == 0);
6325 : 4 : poll_threads();
6326 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6327 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6328 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6329 : 4 : CU_ASSERT(io_ch != NULL);
6330 : :
6331 : : /* Seek data not supported */
6332 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false);
6333 : 4 : rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL);
6334 : 4 : CU_ASSERT(rc == 0);
6335 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6336 : 4 : poll_threads();
6337 : 4 : CU_ASSERT(g_seek_offset == 0);
6338 : :
6339 : : /* Seek hole not supported */
6340 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false);
6341 : 4 : rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL);
6342 : 4 : CU_ASSERT(rc == 0);
6343 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6344 : 4 : poll_threads();
6345 : 4 : CU_ASSERT(g_seek_offset == UINT64_MAX);
6346 : :
6347 : : /* Seek data supported */
6348 : 4 : g_seek_data_offset = 12345;
6349 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true);
6350 : 4 : rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL);
6351 : 4 : CU_ASSERT(rc == 0);
6352 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6353 : 4 : stub_complete_io(1);
6354 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6355 : 4 : CU_ASSERT(g_seek_offset == 12345);
6356 : :
6357 : : /* Seek hole supported */
6358 : 4 : g_seek_hole_offset = 67890;
6359 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true);
6360 : 4 : rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL);
6361 : 4 : CU_ASSERT(rc == 0);
6362 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6363 : 4 : stub_complete_io(1);
6364 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6365 : 4 : CU_ASSERT(g_seek_offset == 67890);
6366 : :
6367 : 4 : spdk_put_io_channel(io_ch);
6368 : 4 : spdk_bdev_close(desc);
6369 : 4 : free_bdev(bdev);
6370 : 4 : ut_fini_bdev();
6371 : 4 : }
6372 : :
6373 : : static void
6374 : 4 : bdev_copy(void)
6375 : : {
6376 : : struct spdk_bdev *bdev;
6377 : 4 : struct spdk_bdev_desc *desc = NULL;
6378 : : struct spdk_io_channel *ioch;
6379 : : struct ut_expected_io *expected_io;
6380 : : uint64_t src_offset, num_blocks;
6381 : : uint32_t num_completed;
6382 : : int rc;
6383 : :
6384 : 4 : ut_init_bdev(NULL);
6385 : 4 : bdev = allocate_bdev("bdev");
6386 : :
6387 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
6388 : 4 : CU_ASSERT_EQUAL(rc, 0);
6389 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6390 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6391 : 4 : ioch = spdk_bdev_get_io_channel(desc);
6392 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
6393 : :
6394 : 4 : fn_table.submit_request = stub_submit_request;
6395 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
6396 : :
6397 : : /* First test that if the bdev supports copy, the request won't be split */
6398 : 4 : bdev->md_len = 0;
6399 : 4 : bdev->blocklen = 512;
6400 : 4 : num_blocks = 128;
6401 : 4 : src_offset = bdev->blockcnt - num_blocks;
6402 : :
6403 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
6404 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6405 : :
6406 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6407 : 4 : CU_ASSERT_EQUAL(rc, 0);
6408 : 4 : num_completed = stub_complete_io(1);
6409 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6410 : :
6411 : : /* Check that if copy is not supported it'll still work */
6412 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0);
6413 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6414 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0);
6415 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6416 : :
6417 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
6418 : :
6419 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6420 : 4 : CU_ASSERT_EQUAL(rc, 0);
6421 : 4 : num_completed = stub_complete_io(1);
6422 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6423 : 4 : num_completed = stub_complete_io(1);
6424 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6425 : :
6426 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
6427 : 4 : spdk_put_io_channel(ioch);
6428 : 4 : spdk_bdev_close(desc);
6429 : 4 : free_bdev(bdev);
6430 : 4 : ut_fini_bdev();
6431 : 4 : }
6432 : :
6433 : : static void
6434 : 4 : bdev_copy_split_test(void)
6435 : : {
6436 : : struct spdk_bdev *bdev;
6437 : 4 : struct spdk_bdev_desc *desc = NULL;
6438 : : struct spdk_io_channel *ioch;
6439 : : struct spdk_bdev_channel *bdev_ch;
6440 : : struct ut_expected_io *expected_io;
6441 : 4 : struct spdk_bdev_opts bdev_opts = {};
6442 : : uint32_t i, num_outstanding;
6443 : : uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children;
6444 : : int rc;
6445 : :
6446 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
6447 : 4 : bdev_opts.bdev_io_pool_size = 512;
6448 : 4 : bdev_opts.bdev_io_cache_size = 64;
6449 : 4 : rc = spdk_bdev_set_opts(&bdev_opts);
6450 : 4 : CU_ASSERT(rc == 0);
6451 : :
6452 : 4 : ut_init_bdev(NULL);
6453 : 4 : bdev = allocate_bdev("bdev");
6454 : :
6455 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
6456 : 4 : CU_ASSERT_EQUAL(rc, 0);
6457 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6458 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6459 : 4 : ioch = spdk_bdev_get_io_channel(desc);
6460 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
6461 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
6462 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
6463 : :
6464 : 4 : fn_table.submit_request = stub_submit_request;
6465 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
6466 : :
6467 : : /* Case 1: First test the request won't be split */
6468 : 4 : num_blocks = 32;
6469 : 4 : src_offset = bdev->blockcnt - num_blocks;
6470 : :
6471 : 4 : g_io_done = false;
6472 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
6473 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6474 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6475 : 4 : CU_ASSERT_EQUAL(rc, 0);
6476 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6477 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6478 : 4 : stub_complete_io(1);
6479 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6480 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6481 : :
6482 : : /* Case 2: Test the split with 2 children requests */
6483 : 4 : max_copy_blocks = 8;
6484 : 4 : bdev->max_copy = max_copy_blocks;
6485 : 4 : num_children = 2;
6486 : 4 : num_blocks = max_copy_blocks * num_children;
6487 : 4 : offset = 0;
6488 : 4 : src_offset = bdev->blockcnt - num_blocks;
6489 : :
6490 : 4 : g_io_done = false;
6491 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6492 : 8 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset,
6493 : : src_offset + offset, max_copy_blocks);
6494 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6495 : 8 : offset += max_copy_blocks;
6496 : : }
6497 : :
6498 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6499 : 4 : CU_ASSERT_EQUAL(rc, 0);
6500 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6501 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children);
6502 : 4 : stub_complete_io(num_children);
6503 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6504 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6505 : :
6506 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
6507 : 4 : num_children = 15;
6508 : 4 : num_blocks = max_copy_blocks * num_children;
6509 : 4 : offset = 0;
6510 : 4 : src_offset = bdev->blockcnt - num_blocks;
6511 : :
6512 : 4 : g_io_done = false;
6513 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
6514 : 60 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset,
6515 : : src_offset + offset, max_copy_blocks);
6516 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6517 : 60 : offset += max_copy_blocks;
6518 : : }
6519 : :
6520 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6521 : 4 : CU_ASSERT_EQUAL(rc, 0);
6522 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6523 : :
6524 [ + + ]: 12 : while (num_children > 0) {
6525 : 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
6526 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6527 : 8 : stub_complete_io(num_outstanding);
6528 : 8 : num_children -= num_outstanding;
6529 : : }
6530 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6531 : :
6532 : : /* Case 4: Same test scenario as the case 2 but the configuration is different.
6533 : : * Copy is not supported.
6534 : : */
6535 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
6536 : :
6537 : 4 : num_children = 2;
6538 : 4 : max_copy_blocks = spdk_bdev_get_max_copy(bdev);
6539 : 4 : num_blocks = max_copy_blocks * num_children;
6540 : 4 : src_offset = bdev->blockcnt - num_blocks;
6541 : 4 : offset = 0;
6542 : :
6543 : 4 : g_io_done = false;
6544 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6545 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset,
6546 : : max_copy_blocks, 0);
6547 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6548 : 8 : src_offset += max_copy_blocks;
6549 : : }
6550 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6551 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset,
6552 : : max_copy_blocks, 0);
6553 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6554 : 8 : offset += max_copy_blocks;
6555 : : }
6556 : :
6557 : 4 : src_offset = bdev->blockcnt - num_blocks;
6558 : 4 : offset = 0;
6559 : :
6560 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL);
6561 : 4 : CU_ASSERT_EQUAL(rc, 0);
6562 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6563 : :
6564 [ + + ]: 8 : while (num_children > 0) {
6565 : 4 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
6566 : :
6567 : : /* One copy request is split into one read and one write requests. */
6568 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6569 : 4 : stub_complete_io(num_outstanding);
6570 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6571 : 4 : stub_complete_io(num_outstanding);
6572 : :
6573 : 4 : num_children -= num_outstanding;
6574 : : }
6575 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6576 : :
6577 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
6578 : :
6579 : 4 : spdk_put_io_channel(ioch);
6580 : 4 : spdk_bdev_close(desc);
6581 : 4 : free_bdev(bdev);
6582 : 4 : ut_fini_bdev();
6583 : 4 : }
6584 : :
6585 : : static void
6586 : 4 : examine_claim_v1(struct spdk_bdev *bdev)
6587 : : {
6588 : : int rc;
6589 : :
6590 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if);
6591 : 4 : CU_ASSERT(rc == 0);
6592 : 4 : }
6593 : :
6594 : : static void
6595 : 16 : examine_no_lock_held(struct spdk_bdev *bdev)
6596 : : {
6597 : 16 : CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock));
6598 : 16 : CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock));
6599 : 16 : }
6600 : :
6601 : : struct examine_claim_v2_ctx {
6602 : : struct ut_examine_ctx examine_ctx;
6603 : : enum spdk_bdev_claim_type claim_type;
6604 : : struct spdk_bdev_desc *desc;
6605 : : };
6606 : :
6607 : : static void
6608 : 4 : examine_claim_v2(struct spdk_bdev *bdev)
6609 : : {
6610 : 4 : struct examine_claim_v2_ctx *ctx = bdev->ctxt;
6611 : : int rc;
6612 : :
6613 : 4 : rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc);
6614 : 4 : CU_ASSERT(rc == 0);
6615 : :
6616 : 4 : rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if);
6617 : 4 : CU_ASSERT(rc == 0);
6618 : 4 : }
6619 : :
6620 : : static void
6621 : 4 : examine_locks(void)
6622 : : {
6623 : : struct spdk_bdev *bdev;
6624 : 4 : struct ut_examine_ctx ctx = { 0 };
6625 : 4 : struct examine_claim_v2_ctx v2_ctx;
6626 : :
6627 : : /* Without any claims, one code path is taken */
6628 : 4 : ctx.examine_config = examine_no_lock_held;
6629 : 4 : ctx.examine_disk = examine_no_lock_held;
6630 : 4 : bdev = allocate_bdev_ctx("bdev0", &ctx);
6631 : 4 : CU_ASSERT(ctx.examine_config_count == 1);
6632 : 4 : CU_ASSERT(ctx.examine_disk_count == 1);
6633 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6634 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == NULL);
6635 : 4 : free_bdev(bdev);
6636 : :
6637 : : /* Exercise another path that is taken when examine_config() takes a v1 claim. */
6638 [ - + ]: 4 : memset(&ctx, 0, sizeof(ctx));
6639 : 4 : ctx.examine_config = examine_claim_v1;
6640 : 4 : ctx.examine_disk = examine_no_lock_held;
6641 : 4 : bdev = allocate_bdev_ctx("bdev0", &ctx);
6642 : 4 : CU_ASSERT(ctx.examine_config_count == 1);
6643 : 4 : CU_ASSERT(ctx.examine_disk_count == 1);
6644 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
6645 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if);
6646 : 4 : spdk_bdev_module_release_bdev(bdev);
6647 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6648 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == NULL);
6649 : 4 : free_bdev(bdev);
6650 : :
6651 : : /* Exercise the final path that comes with v2 claims. */
6652 [ - + ]: 4 : memset(&v2_ctx, 0, sizeof(v2_ctx));
6653 : 4 : v2_ctx.examine_ctx.examine_config = examine_claim_v2;
6654 : 4 : v2_ctx.examine_ctx.examine_disk = examine_no_lock_held;
6655 : 4 : v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
6656 : 4 : bdev = allocate_bdev_ctx("bdev0", &v2_ctx);
6657 : 4 : CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1);
6658 : 4 : CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1);
6659 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6660 : 4 : spdk_bdev_close(v2_ctx.desc);
6661 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6662 : 4 : free_bdev(bdev);
6663 : 4 : }
6664 : :
6665 : : #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \
6666 : : do { \
6667 : : uint32_t len = 0; \
6668 : : struct spdk_bdev_module_claim *claim; \
6669 : : TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \
6670 : : len++; \
6671 : : } \
6672 : : CU_ASSERT(len == expect); \
6673 : : } while (0)
6674 : :
6675 : : static void
6676 : 4 : claim_v2_rwo(void)
6677 : : {
6678 : : struct spdk_bdev *bdev;
6679 : 4 : struct spdk_bdev_desc *desc;
6680 : 4 : struct spdk_bdev_desc *desc2;
6681 : 4 : struct spdk_bdev_claim_opts opts;
6682 : : int rc;
6683 : :
6684 : 4 : bdev = allocate_bdev("bdev0");
6685 : :
6686 : : /* Claim without options */
6687 : 4 : desc = NULL;
6688 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6689 : 4 : CU_ASSERT(rc == 0);
6690 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6691 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6692 : : &bdev_ut_if);
6693 : 4 : CU_ASSERT(rc == 0);
6694 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6695 : 4 : CU_ASSERT(desc->claim != NULL);
6696 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6697 : 4 : CU_ASSERT(strcmp(desc->claim->name, "") == 0);
6698 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6699 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6700 : :
6701 : : /* Release the claim by closing the descriptor */
6702 : 4 : spdk_bdev_close(desc);
6703 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6704 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6705 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6706 : :
6707 : : /* Claim with options */
6708 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6709 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6710 : 4 : desc = NULL;
6711 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6712 : 4 : CU_ASSERT(rc == 0);
6713 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6714 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts,
6715 : : &bdev_ut_if);
6716 : 4 : CU_ASSERT(rc == 0);
6717 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6718 : 4 : CU_ASSERT(desc->claim != NULL);
6719 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6720 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6721 : 4 : memset(&opts, 0, sizeof(opts));
6722 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6723 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6724 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6725 : :
6726 : : /* The claim blocks new writers. */
6727 : 4 : desc2 = NULL;
6728 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6729 : 4 : CU_ASSERT(rc == -EPERM);
6730 : 4 : CU_ASSERT(desc2 == NULL);
6731 : :
6732 : : /* New readers are allowed */
6733 : 4 : desc2 = NULL;
6734 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6735 : 4 : CU_ASSERT(rc == 0);
6736 : 4 : CU_ASSERT(desc2 != NULL);
6737 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6738 : :
6739 : : /* No new v2 RWO claims are allowed */
6740 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6741 : : &bdev_ut_if);
6742 : 4 : CU_ASSERT(rc == -EPERM);
6743 : :
6744 : : /* No new v2 ROM claims are allowed */
6745 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6746 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6747 : : &bdev_ut_if);
6748 : 4 : CU_ASSERT(rc == -EPERM);
6749 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6750 : :
6751 : : /* No new v2 RWM claims are allowed */
6752 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6753 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6754 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6755 : : &bdev_ut_if);
6756 : 4 : CU_ASSERT(rc == -EPERM);
6757 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6758 : :
6759 : : /* No new v1 claims are allowed */
6760 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
6761 : 4 : CU_ASSERT(rc == -EPERM);
6762 : :
6763 : : /* None of the above changed the existing claim */
6764 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6765 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6766 : :
6767 : : /* Closing the first descriptor now allows a new claim and it is promoted to rw. */
6768 : 4 : spdk_bdev_close(desc);
6769 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6770 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6771 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6772 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6773 : : &bdev_ut_if);
6774 : 4 : CU_ASSERT(rc == 0);
6775 : 4 : CU_ASSERT(desc2->claim != NULL);
6776 [ - + ]: 4 : CU_ASSERT(desc2->write);
6777 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6778 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
6779 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6780 : 4 : spdk_bdev_close(desc2);
6781 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6782 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6783 : :
6784 : : /* Cannot claim with a key */
6785 : 4 : desc = NULL;
6786 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6787 : 4 : CU_ASSERT(rc == 0);
6788 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6789 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6790 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6791 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts,
6792 : : &bdev_ut_if);
6793 : 4 : CU_ASSERT(rc == -EINVAL);
6794 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6795 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6796 : 4 : spdk_bdev_close(desc);
6797 : :
6798 : : /* Clean up */
6799 : 4 : free_bdev(bdev);
6800 : 4 : }
6801 : :
6802 : : static void
6803 : 4 : claim_v2_rom(void)
6804 : : {
6805 : : struct spdk_bdev *bdev;
6806 : 4 : struct spdk_bdev_desc *desc;
6807 : 4 : struct spdk_bdev_desc *desc2;
6808 : 4 : struct spdk_bdev_claim_opts opts;
6809 : : int rc;
6810 : :
6811 : 4 : bdev = allocate_bdev("bdev0");
6812 : :
6813 : : /* Claim without options */
6814 : 4 : desc = NULL;
6815 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6816 : 4 : CU_ASSERT(rc == 0);
6817 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6818 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6819 : : &bdev_ut_if);
6820 : 4 : CU_ASSERT(rc == 0);
6821 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6822 : 4 : CU_ASSERT(desc->claim != NULL);
6823 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6824 : 4 : CU_ASSERT(strcmp(desc->claim->name, "") == 0);
6825 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6826 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6827 : :
6828 : : /* Release the claim by closing the descriptor */
6829 : 4 : spdk_bdev_close(desc);
6830 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6831 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6832 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6833 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6834 : :
6835 : : /* Claim with options */
6836 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6837 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6838 : 4 : desc = NULL;
6839 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6840 : 4 : CU_ASSERT(rc == 0);
6841 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6842 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts,
6843 : : &bdev_ut_if);
6844 : 4 : CU_ASSERT(rc == 0);
6845 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6846 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc->claim != NULL);
6847 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6848 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6849 : 4 : memset(&opts, 0, sizeof(opts));
6850 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6851 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6852 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6853 : :
6854 : : /* The claim blocks new writers. */
6855 : 4 : desc2 = NULL;
6856 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6857 : 4 : CU_ASSERT(rc == -EPERM);
6858 : 4 : CU_ASSERT(desc2 == NULL);
6859 : :
6860 : : /* New readers are allowed */
6861 : 4 : desc2 = NULL;
6862 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6863 : 4 : CU_ASSERT(rc == 0);
6864 : 4 : CU_ASSERT(desc2 != NULL);
6865 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6866 : :
6867 : : /* No new v2 RWO claims are allowed */
6868 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6869 : : &bdev_ut_if);
6870 : 4 : CU_ASSERT(rc == -EPERM);
6871 : :
6872 : : /* No new v2 RWM claims are allowed */
6873 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6874 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6875 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6876 : : &bdev_ut_if);
6877 : 4 : CU_ASSERT(rc == -EPERM);
6878 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6879 : :
6880 : : /* No new v1 claims are allowed */
6881 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
6882 : 4 : CU_ASSERT(rc == -EPERM);
6883 : :
6884 : : /* None of the above messed up the existing claim */
6885 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6886 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6887 : :
6888 : : /* New v2 ROM claims are allowed and the descriptor stays read-only. */
6889 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6890 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6891 : : &bdev_ut_if);
6892 : 4 : CU_ASSERT(rc == 0);
6893 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6894 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6895 : 4 : CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim);
6896 [ + + ]: 12 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 2);
6897 : :
6898 : : /* Claim remains when closing the first descriptor */
6899 : 4 : spdk_bdev_close(desc);
6900 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6901 : 4 : CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs));
6902 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
6903 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6904 : :
6905 : : /* Claim removed when closing the other descriptor */
6906 : 4 : spdk_bdev_close(desc2);
6907 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6908 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6909 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6910 : :
6911 : : /* Cannot claim with a key */
6912 : 4 : desc = NULL;
6913 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6914 : 4 : CU_ASSERT(rc == 0);
6915 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6916 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6917 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6918 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts,
6919 : : &bdev_ut_if);
6920 : 4 : CU_ASSERT(rc == -EINVAL);
6921 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6922 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6923 : 4 : spdk_bdev_close(desc);
6924 : :
6925 : : /* Cannot claim with a read-write descriptor */
6926 : 4 : desc = NULL;
6927 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6928 : 4 : CU_ASSERT(rc == 0);
6929 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6930 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6931 : : &bdev_ut_if);
6932 : 4 : CU_ASSERT(rc == -EINVAL);
6933 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6934 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6935 : 4 : spdk_bdev_close(desc);
6936 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6937 : :
6938 : : /* Clean up */
6939 : 4 : free_bdev(bdev);
6940 : 4 : }
6941 : :
6942 : : static void
6943 : 4 : claim_v2_rwm(void)
6944 : : {
6945 : : struct spdk_bdev *bdev;
6946 : 4 : struct spdk_bdev_desc *desc;
6947 : 4 : struct spdk_bdev_desc *desc2;
6948 : 4 : struct spdk_bdev_claim_opts opts;
6949 : 4 : char good_key, bad_key;
6950 : : int rc;
6951 : :
6952 : 4 : bdev = allocate_bdev("bdev0");
6953 : :
6954 : : /* Claim without options should fail */
6955 : 4 : desc = NULL;
6956 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6957 : 4 : CU_ASSERT(rc == 0);
6958 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6959 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL,
6960 : : &bdev_ut_if);
6961 : 4 : CU_ASSERT(rc == -EINVAL);
6962 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6963 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6964 : 4 : CU_ASSERT(desc->claim == NULL);
6965 : :
6966 : : /* Claim with options */
6967 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6968 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6969 : 4 : opts.shared_claim_key = (uint64_t)&good_key;
6970 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6971 : : &bdev_ut_if);
6972 : 4 : CU_ASSERT(rc == 0);
6973 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
6974 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc->claim != NULL);
6975 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6976 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6977 : 4 : memset(&opts, 0, sizeof(opts));
6978 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6979 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6980 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6981 : :
6982 : : /* The claim blocks new writers. */
6983 : 4 : desc2 = NULL;
6984 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6985 : 4 : CU_ASSERT(rc == -EPERM);
6986 : 4 : CU_ASSERT(desc2 == NULL);
6987 : :
6988 : : /* New readers are allowed */
6989 : 4 : desc2 = NULL;
6990 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6991 : 4 : CU_ASSERT(rc == 0);
6992 : 4 : CU_ASSERT(desc2 != NULL);
6993 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6994 : :
6995 : : /* No new v2 RWO claims are allowed */
6996 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6997 : : &bdev_ut_if);
6998 : 4 : CU_ASSERT(rc == -EPERM);
6999 : :
7000 : : /* No new v2 ROM claims are allowed and the descriptor stays read-only. */
7001 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7002 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
7003 : : &bdev_ut_if);
7004 : 4 : CU_ASSERT(rc == -EPERM);
7005 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7006 : :
7007 : : /* No new v1 claims are allowed */
7008 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7009 : 4 : CU_ASSERT(rc == -EPERM);
7010 : :
7011 : : /* No new v2 RWM claims are allowed if the key does not match */
7012 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7013 : 4 : opts.shared_claim_key = (uint64_t)&bad_key;
7014 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7015 : : &bdev_ut_if);
7016 : 4 : CU_ASSERT(rc == -EPERM);
7017 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7018 : :
7019 : : /* None of the above messed up the existing claim */
7020 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7021 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7022 : :
7023 : : /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */
7024 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7025 : 4 : opts.shared_claim_key = (uint64_t)&good_key;
7026 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7027 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7028 : : &bdev_ut_if);
7029 : 4 : CU_ASSERT(rc == 0);
7030 [ - + ]: 4 : CU_ASSERT(desc2->write);
7031 : 4 : CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim);
7032 [ + + ]: 12 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 2);
7033 : :
7034 : : /* Claim remains when closing the first descriptor */
7035 : 4 : spdk_bdev_close(desc);
7036 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
7037 : 4 : CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs));
7038 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
7039 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7040 : :
7041 : : /* Claim removed when closing the other descriptor */
7042 : 4 : spdk_bdev_close(desc2);
7043 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7044 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7045 : :
7046 : : /* Cannot claim without a key */
7047 : 4 : desc = NULL;
7048 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7049 : 4 : CU_ASSERT(rc == 0);
7050 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7051 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7052 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7053 : : &bdev_ut_if);
7054 : 4 : CU_ASSERT(rc == -EINVAL);
7055 : 4 : spdk_bdev_close(desc);
7056 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7057 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7058 : :
7059 : : /* Clean up */
7060 : 4 : free_bdev(bdev);
7061 : 4 : }
7062 : :
7063 : : static void
7064 : 4 : claim_v2_existing_writer(void)
7065 : : {
7066 : : struct spdk_bdev *bdev;
7067 : 4 : struct spdk_bdev_desc *desc;
7068 : 4 : struct spdk_bdev_desc *desc2;
7069 : 4 : struct spdk_bdev_claim_opts opts;
7070 : : enum spdk_bdev_claim_type type;
7071 : 4 : enum spdk_bdev_claim_type types[] = {
7072 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7073 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7074 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7075 : : };
7076 : : size_t i;
7077 : : int rc;
7078 : :
7079 : 4 : bdev = allocate_bdev("bdev0");
7080 : :
7081 : 4 : desc = NULL;
7082 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7083 : 4 : CU_ASSERT(rc == 0);
7084 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7085 : 4 : desc2 = NULL;
7086 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
7087 : 4 : CU_ASSERT(rc == 0);
7088 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc2 != NULL);
7089 : :
7090 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7091 : 12 : type = types[i];
7092 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7093 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7094 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7095 : : }
7096 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7097 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) {
7098 : 4 : CU_ASSERT(rc == -EINVAL);
7099 : : } else {
7100 : 8 : CU_ASSERT(rc == -EPERM);
7101 : : }
7102 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7103 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if);
7104 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) {
7105 : 4 : CU_ASSERT(rc == -EINVAL);
7106 : : } else {
7107 : 8 : CU_ASSERT(rc == -EPERM);
7108 : : }
7109 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7110 : : }
7111 : :
7112 : 4 : spdk_bdev_close(desc);
7113 : 4 : spdk_bdev_close(desc2);
7114 : :
7115 : : /* Clean up */
7116 : 4 : free_bdev(bdev);
7117 : 4 : }
7118 : :
7119 : : static void
7120 : 4 : claim_v2_existing_v1(void)
7121 : : {
7122 : : struct spdk_bdev *bdev;
7123 : 4 : struct spdk_bdev_desc *desc;
7124 : 4 : struct spdk_bdev_claim_opts opts;
7125 : : enum spdk_bdev_claim_type type;
7126 : 4 : enum spdk_bdev_claim_type types[] = {
7127 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7128 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7129 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7130 : : };
7131 : : size_t i;
7132 : : int rc;
7133 : :
7134 : 4 : bdev = allocate_bdev("bdev0");
7135 : :
7136 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7137 : 4 : CU_ASSERT(rc == 0);
7138 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
7139 : :
7140 : 4 : desc = NULL;
7141 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7142 : 4 : CU_ASSERT(rc == 0);
7143 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7144 : :
7145 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7146 : 12 : type = types[i];
7147 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7148 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7149 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7150 : : }
7151 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7152 : 12 : CU_ASSERT(rc == -EPERM);
7153 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
7154 : : }
7155 : :
7156 : 4 : spdk_bdev_module_release_bdev(bdev);
7157 : 4 : spdk_bdev_close(desc);
7158 : :
7159 : : /* Clean up */
7160 : 4 : free_bdev(bdev);
7161 : 4 : }
7162 : :
7163 : : static void
7164 : 4 : claim_v1_existing_v2(void)
7165 : : {
7166 : : struct spdk_bdev *bdev;
7167 : 4 : struct spdk_bdev_desc *desc;
7168 : 4 : struct spdk_bdev_claim_opts opts;
7169 : : enum spdk_bdev_claim_type type;
7170 : 4 : enum spdk_bdev_claim_type types[] = {
7171 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7172 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7173 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7174 : : };
7175 : : size_t i;
7176 : : int rc;
7177 : :
7178 : 4 : bdev = allocate_bdev("bdev0");
7179 : :
7180 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7181 : 12 : type = types[i];
7182 : :
7183 : 12 : desc = NULL;
7184 : 12 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7185 : 12 : CU_ASSERT(rc == 0);
7186 [ - + ]: 12 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7187 : :
7188 : : /* Get a v2 claim */
7189 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7190 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7191 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7192 : : }
7193 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7194 : 12 : CU_ASSERT(rc == 0);
7195 : :
7196 : : /* Fail to get a v1 claim */
7197 : 12 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7198 : 12 : CU_ASSERT(rc == -EPERM);
7199 : :
7200 : 12 : spdk_bdev_close(desc);
7201 : :
7202 : : /* Now v1 succeeds */
7203 : 12 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7204 : 12 : CU_ASSERT(rc == 0)
7205 : 12 : spdk_bdev_module_release_bdev(bdev);
7206 : : }
7207 : :
7208 : : /* Clean up */
7209 : 4 : free_bdev(bdev);
7210 : 4 : }
7211 : :
7212 : : static int ut_examine_claimed_init0(void);
7213 : : static int ut_examine_claimed_init1(void);
7214 : : static void ut_examine_claimed_config0(struct spdk_bdev *bdev);
7215 : : static void ut_examine_claimed_disk0(struct spdk_bdev *bdev);
7216 : : static void ut_examine_claimed_config1(struct spdk_bdev *bdev);
7217 : : static void ut_examine_claimed_disk1(struct spdk_bdev *bdev);
7218 : :
7219 : : #define UT_MAX_EXAMINE_MODS 2
7220 : : struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = {
7221 : : {
7222 : : .name = "vbdev_ut_examine0",
7223 : : .module_init = ut_examine_claimed_init0,
7224 : : .module_fini = vbdev_ut_module_fini,
7225 : : .examine_config = ut_examine_claimed_config0,
7226 : : .examine_disk = ut_examine_claimed_disk0,
7227 : : },
7228 : : {
7229 : : .name = "vbdev_ut_examine1",
7230 : : .module_init = ut_examine_claimed_init1,
7231 : : .module_fini = vbdev_ut_module_fini,
7232 : : .examine_config = ut_examine_claimed_config1,
7233 : : .examine_disk = ut_examine_claimed_disk1,
7234 : : }
7235 : : };
7236 : :
7237 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0])
7238 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1])
7239 : :
7240 : : struct ut_examine_claimed_ctx {
7241 : : uint32_t examine_config_count;
7242 : : uint32_t examine_disk_count;
7243 : :
7244 : : /* Claim type to take, with these options */
7245 : : enum spdk_bdev_claim_type claim_type;
7246 : : struct spdk_bdev_claim_opts claim_opts;
7247 : :
7248 : : /* Expected return value from spdk_bdev_module_claim_bdev_desc() */
7249 : : int expect_claim_err;
7250 : :
7251 : : /* Descriptor used for a claim */
7252 : : struct spdk_bdev_desc *desc;
7253 : : } examine_claimed_ctx[UT_MAX_EXAMINE_MODS];
7254 : :
7255 : : bool ut_testing_examine_claimed;
7256 : :
7257 : : /*
7258 : : * Store the order in which the modules were initialized,
7259 : : * since we have no guarantee on the order of execution of the constructors.
7260 : : * Modules are examined in reverse order of their initialization.
7261 : : */
7262 : : static int g_ut_examine_claimed_order[UT_MAX_EXAMINE_MODS];
7263 : : static int
7264 : 320 : ut_examine_claimed_init(uint32_t modnum)
7265 : : {
7266 : : static int current = UT_MAX_EXAMINE_MODS;
7267 : :
7268 : : /* Only do this for thre first initialization of the bdev framework */
7269 [ + + ]: 320 : if (current == 0) {
7270 : 312 : return 0;
7271 : : }
7272 : 8 : g_ut_examine_claimed_order[modnum] = --current;
7273 : :
7274 : 8 : return 0;
7275 : : }
7276 : :
7277 : : static int
7278 : 160 : ut_examine_claimed_init0(void)
7279 : : {
7280 : 160 : return ut_examine_claimed_init(0);
7281 : : }
7282 : :
7283 : : static int
7284 : 160 : ut_examine_claimed_init1(void)
7285 : : {
7286 : 160 : return ut_examine_claimed_init(1);
7287 : : }
7288 : :
7289 : : static void
7290 : 16 : reset_examine_claimed_ctx(void)
7291 : : {
7292 : : struct ut_examine_claimed_ctx *ctx;
7293 : : uint32_t i;
7294 : :
7295 [ + + ]: 48 : for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) {
7296 : 32 : ctx = &examine_claimed_ctx[i];
7297 [ + + ]: 32 : if (ctx->desc != NULL) {
7298 : 20 : spdk_bdev_close(ctx->desc);
7299 : : }
7300 [ - + ]: 32 : memset(ctx, 0, sizeof(*ctx));
7301 : 32 : spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts));
7302 : : }
7303 : 16 : }
7304 : :
7305 : : static void
7306 : 640 : examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum)
7307 : : {
7308 [ - + ]: 640 : SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS);
7309 : 640 : struct spdk_bdev_module *module = &examine_claimed_mods[modnum];
7310 : 640 : struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum];
7311 : : int rc;
7312 : :
7313 [ + + + + ]: 640 : if (!ut_testing_examine_claimed) {
7314 : 616 : spdk_bdev_module_examine_done(module);
7315 : 616 : return;
7316 : : }
7317 : :
7318 : 24 : ctx->examine_config_count++;
7319 : :
7320 [ + + ]: 24 : if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) {
7321 : 20 : rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts,
7322 : : &ctx->desc);
7323 : 20 : CU_ASSERT(rc == 0);
7324 : :
7325 : 20 : rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module);
7326 : 20 : CU_ASSERT(rc == ctx->expect_claim_err);
7327 : : }
7328 : 24 : spdk_bdev_module_examine_done(module);
7329 : : }
7330 : :
7331 : : static void
7332 : 320 : ut_examine_claimed_config0(struct spdk_bdev *bdev)
7333 : : {
7334 : 320 : examine_claimed_config(bdev, g_ut_examine_claimed_order[0]);
7335 : 320 : }
7336 : :
7337 : : static void
7338 : 320 : ut_examine_claimed_config1(struct spdk_bdev *bdev)
7339 : : {
7340 : 320 : examine_claimed_config(bdev, g_ut_examine_claimed_order[1]);
7341 : 320 : }
7342 : :
7343 : : static void
7344 : 616 : examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum)
7345 : : {
7346 [ - + ]: 616 : SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS);
7347 : 616 : struct spdk_bdev_module *module = &examine_claimed_mods[modnum];
7348 : 616 : struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum];
7349 : :
7350 [ + + + + ]: 616 : if (!ut_testing_examine_claimed) {
7351 : 600 : spdk_bdev_module_examine_done(module);
7352 : 600 : return;
7353 : : }
7354 : :
7355 : 16 : ctx->examine_disk_count++;
7356 : :
7357 : 16 : spdk_bdev_module_examine_done(module);
7358 : : }
7359 : :
7360 : : static void
7361 : 308 : ut_examine_claimed_disk0(struct spdk_bdev *bdev)
7362 : : {
7363 : 308 : examine_claimed_disk(bdev, 0);
7364 : 308 : }
7365 : :
7366 : : static void
7367 : 308 : ut_examine_claimed_disk1(struct spdk_bdev *bdev)
7368 : : {
7369 : 308 : examine_claimed_disk(bdev, 1);
7370 : 308 : }
7371 : :
7372 : : static void
7373 : 4 : examine_claimed(void)
7374 : : {
7375 : : struct spdk_bdev *bdev;
7376 : 4 : struct spdk_bdev_module *mod = examine_claimed_mods;
7377 : 4 : struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx;
7378 : :
7379 : 4 : ut_testing_examine_claimed = true;
7380 : 4 : reset_examine_claimed_ctx();
7381 : :
7382 : : /*
7383 : : * With one module claiming, both modules' examine_config should be called, but only the
7384 : : * claiming module's examine_disk should be called.
7385 : : */
7386 : 4 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7387 : 4 : bdev = allocate_bdev("bdev0");
7388 : 4 : CU_ASSERT(ctx[0].examine_config_count == 1);
7389 : 4 : CU_ASSERT(ctx[0].examine_disk_count == 1);
7390 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL);
7391 : 4 : CU_ASSERT(ctx[0].desc->claim->module == &mod[0]);
7392 : 4 : CU_ASSERT(ctx[1].examine_config_count == 1);
7393 : 4 : CU_ASSERT(ctx[1].examine_disk_count == 0);
7394 : 4 : CU_ASSERT(ctx[1].desc == NULL);
7395 : 4 : reset_examine_claimed_ctx();
7396 : 4 : free_bdev(bdev);
7397 : :
7398 : : /*
7399 : : * With two modules claiming, both modules' examine_config and examine_disk should be
7400 : : * called.
7401 : : */
7402 : 4 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7403 : 4 : ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7404 : 4 : bdev = allocate_bdev("bdev0");
7405 : 4 : CU_ASSERT(ctx[0].examine_config_count == 1);
7406 : 4 : CU_ASSERT(ctx[0].examine_disk_count == 1);
7407 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL);
7408 : 4 : CU_ASSERT(ctx[0].desc->claim->module == &mod[0]);
7409 : 4 : CU_ASSERT(ctx[1].examine_config_count == 1);
7410 : 4 : CU_ASSERT(ctx[1].examine_disk_count == 1);
7411 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL);
7412 : 4 : CU_ASSERT(ctx[1].desc->claim->module == &mod[1]);
7413 : 4 : reset_examine_claimed_ctx();
7414 : 4 : free_bdev(bdev);
7415 : :
7416 : : /*
7417 : : * If two vbdev modules try to claim with conflicting claim types, the module that was added
7418 : : * last wins. The winner gets the claim and is the only one that has its examine_disk
7419 : : * callback invoked.
7420 : : */
7421 : 4 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7422 : 4 : ctx[0].expect_claim_err = -EPERM;
7423 : 4 : ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE;
7424 : 4 : bdev = allocate_bdev("bdev0");
7425 : 4 : CU_ASSERT(ctx[0].examine_config_count == 1);
7426 : 4 : CU_ASSERT(ctx[0].examine_disk_count == 0);
7427 : 4 : CU_ASSERT(ctx[1].examine_config_count == 1);
7428 : 4 : CU_ASSERT(ctx[1].examine_disk_count == 1);
7429 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL);
7430 : 4 : CU_ASSERT(ctx[1].desc->claim->module == &mod[1]);
7431 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
7432 : 4 : reset_examine_claimed_ctx();
7433 : 4 : free_bdev(bdev);
7434 : :
7435 : 4 : ut_testing_examine_claimed = false;
7436 : 4 : }
7437 : :
7438 : : int
7439 : 4 : main(int argc, char **argv)
7440 : : {
7441 : 4 : CU_pSuite suite = NULL;
7442 : : unsigned int num_failures;
7443 : :
7444 : 4 : CU_initialize_registry();
7445 : :
7446 : 4 : suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown);
7447 : :
7448 : 4 : CU_ADD_TEST(suite, bytes_to_blocks_test);
7449 : 4 : CU_ADD_TEST(suite, num_blocks_test);
7450 : 4 : CU_ADD_TEST(suite, io_valid_test);
7451 : 4 : CU_ADD_TEST(suite, open_write_test);
7452 : 4 : CU_ADD_TEST(suite, claim_test);
7453 : 4 : CU_ADD_TEST(suite, alias_add_del_test);
7454 : 4 : CU_ADD_TEST(suite, get_device_stat_test);
7455 : 4 : CU_ADD_TEST(suite, bdev_io_types_test);
7456 : 4 : CU_ADD_TEST(suite, bdev_io_wait_test);
7457 : 4 : CU_ADD_TEST(suite, bdev_io_spans_split_test);
7458 : 4 : CU_ADD_TEST(suite, bdev_io_boundary_split_test);
7459 : 4 : CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test);
7460 : 4 : CU_ADD_TEST(suite, bdev_io_mix_split_test);
7461 : 4 : CU_ADD_TEST(suite, bdev_io_split_with_io_wait);
7462 : 4 : CU_ADD_TEST(suite, bdev_io_write_unit_split_test);
7463 : 4 : CU_ADD_TEST(suite, bdev_io_alignment_with_boundary);
7464 : 4 : CU_ADD_TEST(suite, bdev_io_alignment);
7465 : 4 : CU_ADD_TEST(suite, bdev_histograms);
7466 : 4 : CU_ADD_TEST(suite, bdev_write_zeroes);
7467 : 4 : CU_ADD_TEST(suite, bdev_compare_and_write);
7468 : 4 : CU_ADD_TEST(suite, bdev_compare);
7469 : 4 : CU_ADD_TEST(suite, bdev_compare_emulated);
7470 : 4 : CU_ADD_TEST(suite, bdev_zcopy_write);
7471 : 4 : CU_ADD_TEST(suite, bdev_zcopy_read);
7472 : 4 : CU_ADD_TEST(suite, bdev_open_while_hotremove);
7473 : 4 : CU_ADD_TEST(suite, bdev_close_while_hotremove);
7474 : 4 : CU_ADD_TEST(suite, bdev_open_ext_test);
7475 : 4 : CU_ADD_TEST(suite, bdev_open_ext_unregister);
7476 : 4 : CU_ADD_TEST(suite, bdev_set_io_timeout);
7477 : 4 : CU_ADD_TEST(suite, bdev_set_qd_sampling);
7478 : 4 : CU_ADD_TEST(suite, lba_range_overlap);
7479 : 4 : CU_ADD_TEST(suite, lock_lba_range_check_ranges);
7480 : 4 : CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding);
7481 : 4 : CU_ADD_TEST(suite, lock_lba_range_overlapped);
7482 : 4 : CU_ADD_TEST(suite, bdev_quiesce);
7483 : 4 : CU_ADD_TEST(suite, bdev_io_abort);
7484 : 4 : CU_ADD_TEST(suite, bdev_unmap);
7485 : 4 : CU_ADD_TEST(suite, bdev_write_zeroes_split_test);
7486 : 4 : CU_ADD_TEST(suite, bdev_set_options_test);
7487 : 4 : CU_ADD_TEST(suite, bdev_get_memory_domains);
7488 : 4 : CU_ADD_TEST(suite, bdev_io_ext);
7489 : 4 : CU_ADD_TEST(suite, bdev_io_ext_no_opts);
7490 : 4 : CU_ADD_TEST(suite, bdev_io_ext_invalid_opts);
7491 : 4 : CU_ADD_TEST(suite, bdev_io_ext_split);
7492 : 4 : CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer);
7493 : 4 : CU_ADD_TEST(suite, bdev_register_uuid_alias);
7494 : 4 : CU_ADD_TEST(suite, bdev_unregister_by_name);
7495 : 4 : CU_ADD_TEST(suite, for_each_bdev_test);
7496 : 4 : CU_ADD_TEST(suite, bdev_seek_test);
7497 : 4 : CU_ADD_TEST(suite, bdev_copy);
7498 : 4 : CU_ADD_TEST(suite, bdev_copy_split_test);
7499 : 4 : CU_ADD_TEST(suite, examine_locks);
7500 : 4 : CU_ADD_TEST(suite, claim_v2_rwo);
7501 : 4 : CU_ADD_TEST(suite, claim_v2_rom);
7502 : 4 : CU_ADD_TEST(suite, claim_v2_rwm);
7503 : 4 : CU_ADD_TEST(suite, claim_v2_existing_writer);
7504 : 4 : CU_ADD_TEST(suite, claim_v2_existing_v1);
7505 : 4 : CU_ADD_TEST(suite, claim_v1_existing_v2);
7506 : 4 : CU_ADD_TEST(suite, examine_claimed);
7507 : :
7508 : 4 : allocate_cores(1);
7509 : 4 : allocate_threads(1);
7510 : 4 : set_thread(0);
7511 : :
7512 : 4 : num_failures = spdk_ut_run_tests(argc, argv, NULL);
7513 : 4 : CU_cleanup_registry();
7514 : :
7515 : 4 : free_threads();
7516 : 4 : free_cores();
7517 : :
7518 : 4 : return num_failures;
7519 : : }
|