Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2015 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6 : * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
7 : */
8 :
9 : #include "nvme_internal.h"
10 :
11 : static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
12 : struct spdk_nvme_qpair *qpair,
13 : const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
14 : uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
15 : void *cb_arg, uint32_t opc, uint32_t io_flags,
16 : uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
17 : void *accel_sequence, int *rc);
18 :
19 : static bool
20 1 : nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
21 : uint32_t sectors_per_stripe, uint32_t qdepth)
22 : {
23 1 : uint32_t child_per_io = UINT32_MAX;
24 :
25 : /* After a namespace is destroyed(e.g. hotplug), all the fields associated with the
26 : * namespace will be cleared to zero, the function will return TRUE for this case,
27 : * and -EINVAL will be returned to caller.
28 : */
29 1 : if (sectors_per_stripe > 0) {
30 0 : child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe;
31 1 : } else if (sectors_per_max_io > 0) {
32 1 : child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io;
33 : }
34 :
35 1 : SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io);
36 :
37 1 : return child_per_io >= qdepth;
38 : }
39 :
40 : static inline int
41 2 : nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io,
42 : uint32_t sectors_per_stripe, uint32_t qdepth, int rc)
43 : {
44 2 : assert(rc);
45 3 : if (rc == -ENOMEM &&
46 1 : nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) {
47 1 : return -EINVAL;
48 : }
49 1 : return rc;
50 : }
51 :
52 : static inline bool
53 220 : _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags)
54 : {
55 255 : return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) &&
56 35 : (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) &&
57 261 : (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) &&
58 6 : (ns->md_size == 8);
59 : }
60 :
61 : static inline uint32_t
62 120 : _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags)
63 : {
64 120 : return _nvme_md_excluded_from_xfer(ns, io_flags) ?
65 120 : ns->sector_size : ns->extended_lba_size;
66 : }
67 :
68 : static inline uint32_t
69 100 : _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags)
70 : {
71 100 : return _nvme_md_excluded_from_xfer(ns, io_flags) ?
72 100 : ns->sectors_per_max_io_no_md : ns->sectors_per_max_io;
73 : }
74 :
75 : static struct nvme_request *
76 56 : _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
77 : const struct nvme_payload *payload,
78 : uint32_t payload_offset, uint32_t md_offset,
79 : uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
80 : uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
81 : struct nvme_request *parent, bool check_sgl, int *rc)
82 : {
83 : struct nvme_request *child;
84 :
85 56 : child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
86 : cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, NULL, rc);
87 56 : if (child == NULL) {
88 1 : nvme_request_free_children(parent);
89 1 : nvme_free_request(parent);
90 1 : return NULL;
91 : }
92 :
93 55 : nvme_request_add_child(parent, child);
94 55 : return child;
95 : }
96 :
97 : static struct nvme_request *
98 14 : _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
99 : struct spdk_nvme_qpair *qpair,
100 : const struct nvme_payload *payload,
101 : uint32_t payload_offset, uint32_t md_offset,
102 : uint64_t lba, uint32_t lba_count,
103 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
104 : uint32_t io_flags, struct nvme_request *req,
105 : uint32_t sectors_per_max_io, uint32_t sector_mask,
106 : uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
107 : void *accel_sequence, int *rc)
108 : {
109 14 : uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
110 14 : uint32_t remaining_lba_count = lba_count;
111 : struct nvme_request *child;
112 :
113 14 : if (spdk_unlikely(accel_sequence != NULL)) {
114 0 : SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
115 0 : *rc = -EINVAL;
116 0 : return NULL;
117 : }
118 :
119 69 : while (remaining_lba_count > 0) {
120 56 : lba_count = sectors_per_max_io - (lba & sector_mask);
121 56 : lba_count = spdk_min(remaining_lba_count, lba_count);
122 :
123 56 : child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
124 : lba, lba_count, cb_fn, cb_arg, opc,
125 : io_flags, apptag_mask, apptag, cdw13, req, true, rc);
126 56 : if (child == NULL) {
127 1 : return NULL;
128 : }
129 :
130 55 : remaining_lba_count -= lba_count;
131 55 : lba += lba_count;
132 55 : payload_offset += lba_count * sector_size;
133 55 : md_offset += lba_count * ns->md_size;
134 : }
135 :
136 13 : return req;
137 : }
138 :
139 : static inline bool
140 141 : _is_io_flags_valid(uint32_t io_flags)
141 : {
142 141 : if (io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK) {
143 : /* Invalid io_flags */
144 3 : SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags);
145 3 : return false;
146 : }
147 :
148 138 : return true;
149 : }
150 :
151 : static inline bool
152 2 : _is_accel_sequence_valid(struct spdk_nvme_qpair *qpair, void *seq)
153 : {
154 : /* An accel sequence can only be executed if the controller supports accel and a qpair is
155 : * part of a of a poll group */
156 2 : return seq == NULL || ((qpair->ctrlr->flags & SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED) &&
157 0 : qpair->poll_group != NULL);
158 : }
159 :
160 : static void
161 85 : _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
162 : uint32_t opc, uint64_t lba, uint32_t lba_count,
163 : uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag,
164 : uint32_t cdw13)
165 : {
166 : struct spdk_nvme_cmd *cmd;
167 :
168 85 : assert(_is_io_flags_valid(io_flags));
169 :
170 85 : cmd = &req->cmd;
171 85 : cmd->opc = opc;
172 85 : cmd->nsid = ns->id;
173 :
174 85 : *(uint64_t *)&cmd->cdw10 = lba;
175 :
176 85 : if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
177 13 : switch (ns->pi_type) {
178 1 : case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
179 : case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
180 1 : cmd->cdw14 = (uint32_t)lba;
181 1 : break;
182 : }
183 72 : }
184 :
185 85 : cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
186 :
187 85 : cmd->cdw12 = lba_count - 1;
188 85 : cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
189 :
190 85 : cmd->cdw13 = cdw13;
191 :
192 85 : cmd->cdw15 = apptag_mask;
193 85 : cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
194 85 : }
195 :
196 : static struct nvme_request *
197 18 : _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
198 : struct spdk_nvme_qpair *qpair,
199 : const struct nvme_payload *payload,
200 : uint32_t payload_offset, uint32_t md_offset,
201 : uint64_t lba, uint32_t lba_count,
202 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
203 : uint32_t io_flags, struct nvme_request *req,
204 : uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
205 : void *accel_sequence, int *rc)
206 : {
207 18 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
208 18 : spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
209 18 : void *sgl_cb_arg = req->payload.contig_or_cb_arg;
210 : bool start_valid, end_valid, last_sge, child_equals_parent;
211 18 : uint64_t child_lba = lba;
212 18 : uint32_t req_current_length = 0;
213 18 : uint32_t child_length = 0;
214 18 : uint32_t sge_length;
215 18 : uint32_t page_size = qpair->ctrlr->page_size;
216 18 : uintptr_t address;
217 :
218 18 : reset_sgl_fn(sgl_cb_arg, payload_offset);
219 18 : next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
220 36 : while (req_current_length < req->payload_size) {
221 :
222 19 : if (sge_length == 0) {
223 0 : continue;
224 19 : } else if (req_current_length + sge_length > req->payload_size) {
225 5 : sge_length = req->payload_size - req_current_length;
226 : }
227 :
228 : /*
229 : * The start of the SGE is invalid if the start address is not page aligned,
230 : * unless it is the first SGE in the child request.
231 : */
232 19 : start_valid = child_length == 0 || _is_page_aligned(address, page_size);
233 :
234 : /* Boolean for whether this is the last SGE in the parent request. */
235 19 : last_sge = (req_current_length + sge_length == req->payload_size);
236 :
237 : /*
238 : * The end of the SGE is invalid if the end address is not page aligned,
239 : * unless it is the last SGE in the parent request.
240 : */
241 19 : end_valid = last_sge || _is_page_aligned(address + sge_length, page_size);
242 :
243 : /*
244 : * This child request equals the parent request, meaning that no splitting
245 : * was required for the parent request (the one passed into this function).
246 : * In this case, we do not create a child request at all - we just send
247 : * the original request as a single request at the end of this function.
248 : */
249 19 : child_equals_parent = (child_length + sge_length == req->payload_size);
250 :
251 19 : if (start_valid) {
252 : /*
253 : * The start of the SGE is valid, so advance the length parameters,
254 : * to include this SGE with previous SGEs for this child request
255 : * (if any). If it is not valid, we do not advance the length
256 : * parameters nor get the next SGE, because we must send what has
257 : * been collected before this SGE as a child request.
258 : */
259 19 : child_length += sge_length;
260 19 : req_current_length += sge_length;
261 19 : if (req_current_length < req->payload_size) {
262 2 : next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
263 : /*
264 : * If the next SGE is not page aligned, we will need to create a
265 : * child request for what we have so far, and then start a new
266 : * child request for the next SGE.
267 : */
268 2 : start_valid = _is_page_aligned(address, page_size);
269 : }
270 : }
271 :
272 19 : if (start_valid && end_valid && !last_sge) {
273 1 : continue;
274 : }
275 :
276 : /*
277 : * We need to create a split here. Send what we have accumulated so far as a child
278 : * request. Checking if child_equals_parent allows us to *not* create a child request
279 : * when no splitting is required - in that case we will fall-through and just create
280 : * a single request with no children for the entire I/O.
281 : */
282 18 : if (!child_equals_parent) {
283 : struct nvme_request *child;
284 : uint32_t child_lba_count;
285 :
286 1 : if ((child_length % ns->extended_lba_size) != 0) {
287 1 : SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
288 : child_length, ns->extended_lba_size);
289 1 : *rc = -EINVAL;
290 1 : return NULL;
291 : }
292 0 : if (spdk_unlikely(accel_sequence != NULL)) {
293 0 : SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
294 0 : *rc = -EINVAL;
295 0 : return NULL;
296 : }
297 :
298 0 : child_lba_count = child_length / ns->extended_lba_size;
299 : /*
300 : * Note the last parameter is set to "false" - this tells the recursive
301 : * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
302 : * since we have already verified it here.
303 : */
304 0 : child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
305 : child_lba, child_lba_count,
306 : cb_fn, cb_arg, opc, io_flags,
307 : apptag_mask, apptag, cdw13, req, false, rc);
308 0 : if (child == NULL) {
309 0 : return NULL;
310 : }
311 0 : payload_offset += child_length;
312 0 : md_offset += child_lba_count * ns->md_size;
313 0 : child_lba += child_lba_count;
314 0 : child_length = 0;
315 : }
316 : }
317 :
318 17 : if (child_length == req->payload_size) {
319 : /* No splitting was required, so setup the whole payload as one request. */
320 17 : _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
321 : }
322 :
323 17 : return req;
324 : }
325 :
326 : static struct nvme_request *
327 0 : _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
328 : struct spdk_nvme_qpair *qpair,
329 : const struct nvme_payload *payload,
330 : uint32_t payload_offset, uint32_t md_offset,
331 : uint64_t lba, uint32_t lba_count,
332 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
333 : uint32_t io_flags, struct nvme_request *req,
334 : uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
335 : void *accel_sequence, int *rc)
336 : {
337 0 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
338 0 : spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
339 0 : void *sgl_cb_arg = req->payload.contig_or_cb_arg;
340 0 : uint64_t child_lba = lba;
341 0 : uint32_t req_current_length = 0;
342 0 : uint32_t child_length = 0;
343 0 : uint32_t sge_length;
344 : uint16_t max_sges, num_sges;
345 0 : uintptr_t address;
346 :
347 0 : max_sges = ns->ctrlr->max_sges;
348 :
349 0 : reset_sgl_fn(sgl_cb_arg, payload_offset);
350 0 : num_sges = 0;
351 :
352 0 : while (req_current_length < req->payload_size) {
353 0 : next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
354 :
355 0 : if (req_current_length + sge_length > req->payload_size) {
356 0 : sge_length = req->payload_size - req_current_length;
357 : }
358 :
359 0 : child_length += sge_length;
360 0 : req_current_length += sge_length;
361 0 : num_sges++;
362 :
363 0 : if (num_sges < max_sges && req_current_length < req->payload_size) {
364 0 : continue;
365 : }
366 :
367 : /*
368 : * We need to create a split here. Send what we have accumulated so far as a child
369 : * request. Checking if the child equals the full payload allows us to *not*
370 : * create a child request when no splitting is required - in that case we will
371 : * fall-through and just create a single request with no children for the entire I/O.
372 : */
373 0 : if (child_length != req->payload_size) {
374 : struct nvme_request *child;
375 : uint32_t child_lba_count;
376 :
377 0 : if ((child_length % ns->extended_lba_size) != 0) {
378 0 : SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
379 : child_length, ns->extended_lba_size);
380 0 : *rc = -EINVAL;
381 0 : return NULL;
382 : }
383 0 : if (spdk_unlikely(accel_sequence != NULL)) {
384 0 : SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
385 0 : *rc = -EINVAL;
386 0 : return NULL;
387 : }
388 :
389 0 : child_lba_count = child_length / ns->extended_lba_size;
390 : /*
391 : * Note the last parameter is set to "false" - this tells the recursive
392 : * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
393 : * since we have already verified it here.
394 : */
395 0 : child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
396 : child_lba, child_lba_count,
397 : cb_fn, cb_arg, opc, io_flags,
398 : apptag_mask, apptag, cdw13, req, false, rc);
399 0 : if (child == NULL) {
400 0 : return NULL;
401 : }
402 0 : payload_offset += child_length;
403 0 : md_offset += child_lba_count * ns->md_size;
404 0 : child_lba += child_lba_count;
405 0 : child_length = 0;
406 0 : num_sges = 0;
407 : }
408 : }
409 :
410 0 : if (child_length == req->payload_size) {
411 : /* No splitting was required, so setup the whole payload as one request. */
412 0 : _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
413 : }
414 :
415 0 : return req;
416 : }
417 :
418 : static inline struct nvme_request *
419 100 : _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
420 : const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
421 : uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
422 : uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
423 : void *accel_sequence, int *rc)
424 : {
425 : struct nvme_request *req;
426 100 : uint32_t sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
427 100 : uint32_t sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags);
428 100 : uint32_t sectors_per_stripe = ns->sectors_per_stripe;
429 :
430 100 : assert(rc != NULL);
431 100 : assert(*rc == 0);
432 :
433 100 : req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size,
434 : cb_fn, cb_arg);
435 100 : if (req == NULL) {
436 1 : *rc = -ENOMEM;
437 1 : return NULL;
438 : }
439 :
440 99 : req->payload_offset = payload_offset;
441 99 : req->md_offset = md_offset;
442 99 : req->accel_sequence = accel_sequence;
443 :
444 : /* Zone append commands cannot be split. */
445 99 : if (opc == SPDK_NVME_OPC_ZONE_APPEND) {
446 3 : assert(ns->csi == SPDK_NVME_CSI_ZNS);
447 : /*
448 : * As long as we disable driver-assisted striping for Zone append commands,
449 : * _nvme_ns_cmd_rw() should never cause a proper request to be split.
450 : * If a request is split, after all, error handling is done in caller functions.
451 : */
452 3 : sectors_per_stripe = 0;
453 : }
454 :
455 : /*
456 : * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
457 : * If this controller defines a stripe boundary and this I/O spans a stripe
458 : * boundary, split the request into multiple requests and submit each
459 : * separately to hardware.
460 : */
461 99 : if (sectors_per_stripe > 0 &&
462 7 : (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
463 1 : return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
464 : cb_fn,
465 : cb_arg, opc,
466 : io_flags, req, sectors_per_stripe, sectors_per_stripe - 1,
467 : apptag_mask, apptag, cdw13, accel_sequence, rc);
468 98 : } else if (lba_count > sectors_per_max_io) {
469 13 : return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
470 : cb_fn,
471 : cb_arg, opc,
472 : io_flags, req, sectors_per_max_io, 0, apptag_mask,
473 : apptag, cdw13, accel_sequence, rc);
474 85 : } else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
475 18 : if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
476 0 : return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
477 : lba, lba_count, cb_fn, cb_arg, opc, io_flags,
478 : req, apptag_mask, apptag, cdw13,
479 : accel_sequence, rc);
480 : } else {
481 18 : return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
482 : lba, lba_count, cb_fn, cb_arg, opc, io_flags,
483 : req, apptag_mask, apptag, cdw13,
484 : accel_sequence, rc);
485 : }
486 : }
487 :
488 67 : _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
489 67 : return req;
490 : }
491 :
492 : int
493 1 : spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
494 : uint64_t lba,
495 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
496 : uint32_t io_flags)
497 : {
498 : struct nvme_request *req;
499 1 : struct nvme_payload payload;
500 1 : int rc = 0;
501 :
502 1 : if (!_is_io_flags_valid(io_flags)) {
503 0 : return -EINVAL;
504 : }
505 :
506 1 : payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
507 :
508 1 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
509 : SPDK_NVME_OPC_COMPARE,
510 : io_flags, 0,
511 : 0, 0, false, NULL, &rc);
512 1 : if (req != NULL) {
513 1 : return nvme_qpair_submit_request(qpair, req);
514 : } else {
515 0 : return nvme_ns_map_failure_rc(lba_count,
516 : ns->sectors_per_max_io,
517 : ns->sectors_per_stripe,
518 0 : qpair->ctrlr->opts.io_queue_requests,
519 : rc);
520 : }
521 : }
522 :
523 : int
524 6 : spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
525 : void *buffer,
526 : void *metadata,
527 : uint64_t lba,
528 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
529 : uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
530 : {
531 : struct nvme_request *req;
532 6 : struct nvme_payload payload;
533 6 : int rc = 0;
534 :
535 6 : if (!_is_io_flags_valid(io_flags)) {
536 0 : return -EINVAL;
537 : }
538 :
539 6 : payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
540 :
541 6 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
542 : SPDK_NVME_OPC_COMPARE,
543 : io_flags,
544 : apptag_mask, apptag, 0, false, NULL, &rc);
545 6 : if (req != NULL) {
546 6 : return nvme_qpair_submit_request(qpair, req);
547 : } else {
548 0 : return nvme_ns_map_failure_rc(lba_count,
549 : ns->sectors_per_max_io,
550 : ns->sectors_per_stripe,
551 0 : qpair->ctrlr->opts.io_queue_requests,
552 : rc);
553 : }
554 : }
555 :
556 : int
557 2 : spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
558 : uint64_t lba, uint32_t lba_count,
559 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
560 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
561 : spdk_nvme_req_next_sge_cb next_sge_fn)
562 : {
563 : struct nvme_request *req;
564 2 : struct nvme_payload payload;
565 2 : int rc = 0;
566 :
567 2 : if (!_is_io_flags_valid(io_flags)) {
568 0 : return -EINVAL;
569 : }
570 :
571 2 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
572 1 : return -EINVAL;
573 : }
574 :
575 1 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
576 :
577 1 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
578 : SPDK_NVME_OPC_COMPARE,
579 : io_flags, 0, 0, 0, true, NULL, &rc);
580 1 : if (req != NULL) {
581 1 : return nvme_qpair_submit_request(qpair, req);
582 : } else {
583 0 : return nvme_ns_map_failure_rc(lba_count,
584 : ns->sectors_per_max_io,
585 : ns->sectors_per_stripe,
586 0 : qpair->ctrlr->opts.io_queue_requests,
587 : rc);
588 : }
589 : }
590 :
591 : int
592 6 : spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
593 : uint64_t lba, uint32_t lba_count,
594 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
595 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
596 : spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
597 : uint16_t apptag_mask, uint16_t apptag)
598 : {
599 : struct nvme_request *req;
600 6 : struct nvme_payload payload;
601 6 : int rc = 0;
602 :
603 6 : if (!_is_io_flags_valid(io_flags)) {
604 0 : return -EINVAL;
605 : }
606 :
607 6 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
608 0 : return -EINVAL;
609 : }
610 :
611 6 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
612 :
613 6 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
614 : SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true,
615 : NULL, &rc);
616 6 : if (req != NULL) {
617 6 : return nvme_qpair_submit_request(qpair, req);
618 : } else {
619 0 : return nvme_ns_map_failure_rc(lba_count,
620 : ns->sectors_per_max_io,
621 : ns->sectors_per_stripe,
622 0 : qpair->ctrlr->opts.io_queue_requests,
623 : rc);
624 : }
625 : }
626 :
627 : int
628 10 : spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
629 : uint64_t lba,
630 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
631 : uint32_t io_flags)
632 : {
633 : struct nvme_request *req;
634 10 : struct nvme_payload payload;
635 10 : int rc = 0;
636 :
637 10 : if (!_is_io_flags_valid(io_flags)) {
638 0 : return -EINVAL;
639 : }
640 :
641 10 : payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
642 :
643 10 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
644 : io_flags, 0,
645 : 0, 0, false, NULL, &rc);
646 10 : if (req != NULL) {
647 9 : return nvme_qpair_submit_request(qpair, req);
648 : } else {
649 1 : return nvme_ns_map_failure_rc(lba_count,
650 : ns->sectors_per_max_io,
651 : ns->sectors_per_stripe,
652 1 : qpair->ctrlr->opts.io_queue_requests,
653 : rc);
654 : }
655 : }
656 :
657 : int
658 1 : spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
659 : void *metadata,
660 : uint64_t lba,
661 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
662 : uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
663 : {
664 : struct nvme_request *req;
665 1 : struct nvme_payload payload;
666 1 : int rc = 0;
667 :
668 1 : if (!_is_io_flags_valid(io_flags)) {
669 0 : return -EINVAL;
670 : }
671 :
672 1 : payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
673 :
674 1 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
675 : io_flags,
676 : apptag_mask, apptag, 0, false, NULL, &rc);
677 1 : if (req != NULL) {
678 1 : return nvme_qpair_submit_request(qpair, req);
679 : } else {
680 0 : return nvme_ns_map_failure_rc(lba_count,
681 : ns->sectors_per_max_io,
682 : ns->sectors_per_stripe,
683 0 : qpair->ctrlr->opts.io_queue_requests,
684 : rc);
685 : }
686 : }
687 :
688 : int
689 2 : spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
690 : uint64_t lba, uint32_t lba_count,
691 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
692 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
693 : spdk_nvme_req_next_sge_cb next_sge_fn)
694 : {
695 : struct nvme_request *req;
696 2 : struct nvme_payload payload;
697 2 : int rc = 0;
698 :
699 2 : if (!_is_io_flags_valid(io_flags)) {
700 0 : return -EINVAL;
701 : }
702 :
703 2 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
704 1 : return -EINVAL;
705 : }
706 :
707 1 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
708 :
709 1 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
710 : io_flags, 0, 0, 0, true, NULL, &rc);
711 1 : if (req != NULL) {
712 1 : return nvme_qpair_submit_request(qpair, req);
713 : } else {
714 0 : return nvme_ns_map_failure_rc(lba_count,
715 : ns->sectors_per_max_io,
716 : ns->sectors_per_stripe,
717 0 : qpair->ctrlr->opts.io_queue_requests,
718 : rc);
719 : }
720 : }
721 :
722 : int
723 2 : spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
724 : uint64_t lba, uint32_t lba_count,
725 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
726 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
727 : spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
728 : uint16_t apptag_mask, uint16_t apptag)
729 : {
730 : struct nvme_request *req;
731 2 : struct nvme_payload payload;
732 2 : int rc = 0;
733 :
734 2 : if (!_is_io_flags_valid(io_flags)) {
735 0 : return -EINVAL;
736 : }
737 :
738 2 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
739 1 : return -EINVAL;
740 : }
741 :
742 1 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
743 :
744 1 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
745 : io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
746 1 : if (req != NULL) {
747 1 : return nvme_qpair_submit_request(qpair, req);
748 : } else {
749 0 : return nvme_ns_map_failure_rc(lba_count,
750 : ns->sectors_per_max_io,
751 : ns->sectors_per_stripe,
752 0 : qpair->ctrlr->opts.io_queue_requests,
753 : rc);
754 : }
755 : }
756 :
757 : int
758 4 : spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
759 : uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
760 : void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
761 : spdk_nvme_req_next_sge_cb next_sge_fn,
762 : struct spdk_nvme_ns_cmd_ext_io_opts *opts)
763 : {
764 : struct nvme_request *req;
765 4 : struct nvme_payload payload;
766 : void *seq;
767 4 : int rc = 0;
768 :
769 4 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
770 2 : return -EINVAL;
771 : }
772 :
773 2 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
774 :
775 2 : if (opts) {
776 2 : if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
777 1 : return -EINVAL;
778 : }
779 :
780 1 : seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
781 1 : if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
782 0 : return -EINVAL;
783 : }
784 :
785 1 : payload.opts = opts;
786 1 : payload.md = opts->metadata;
787 1 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
788 1 : opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc);
789 :
790 : } else {
791 0 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
792 : 0, 0, 0, 0, true, NULL, &rc);
793 : }
794 :
795 1 : if (req != NULL) {
796 1 : return nvme_qpair_submit_request(qpair, req);
797 : } else {
798 0 : return nvme_ns_map_failure_rc(lba_count,
799 : ns->sectors_per_max_io,
800 : ns->sectors_per_stripe,
801 0 : qpair->ctrlr->opts.io_queue_requests,
802 : rc);
803 : }
804 : }
805 :
806 : int
807 3 : spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
808 : void *buffer, uint64_t lba,
809 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
810 : uint32_t io_flags)
811 : {
812 : struct nvme_request *req;
813 3 : struct nvme_payload payload;
814 3 : int rc = 0;
815 :
816 3 : if (!_is_io_flags_valid(io_flags)) {
817 1 : return -EINVAL;
818 : }
819 :
820 2 : payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
821 :
822 2 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
823 : io_flags, 0, 0, 0, false, NULL, &rc);
824 2 : if (req != NULL) {
825 2 : return nvme_qpair_submit_request(qpair, req);
826 : } else {
827 0 : return nvme_ns_map_failure_rc(lba_count,
828 : ns->sectors_per_max_io,
829 : ns->sectors_per_stripe,
830 0 : qpair->ctrlr->opts.io_queue_requests,
831 : rc);
832 : }
833 : }
834 :
835 : static int
836 6 : nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags)
837 : {
838 : uint32_t sector_size;
839 :
840 : /* Not all NVMe Zoned Namespaces support the zone append command. */
841 6 : if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) {
842 0 : return -EINVAL;
843 : }
844 :
845 6 : sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
846 :
847 : /* Fail a too large zone append command early. */
848 6 : if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) {
849 3 : return -EINVAL;
850 : }
851 :
852 3 : return 0;
853 : }
854 :
855 : int
856 4 : nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
857 : void *buffer, void *metadata, uint64_t zslba,
858 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
859 : uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
860 : {
861 : struct nvme_request *req;
862 4 : struct nvme_payload payload;
863 4 : int rc = 0;
864 :
865 4 : if (!_is_io_flags_valid(io_flags)) {
866 0 : return -EINVAL;
867 : }
868 :
869 4 : rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
870 4 : if (rc) {
871 2 : return rc;
872 : }
873 :
874 2 : payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
875 :
876 2 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
877 : SPDK_NVME_OPC_ZONE_APPEND,
878 : io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
879 2 : if (req != NULL) {
880 : /*
881 : * Zone append commands cannot be split (num_children has to be 0).
882 : * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split
883 : * to happen, since a too large request would have already been failed by
884 : * nvme_ns_cmd_check_zone_append(), since zasl <= mdts.
885 : */
886 2 : assert(req->num_children == 0);
887 2 : if (req->num_children) {
888 0 : nvme_request_free_children(req);
889 0 : nvme_free_request(req);
890 0 : return -EINVAL;
891 : }
892 2 : return nvme_qpair_submit_request(qpair, req);
893 : } else {
894 0 : return nvme_ns_map_failure_rc(lba_count,
895 : ns->sectors_per_max_io,
896 : ns->sectors_per_stripe,
897 0 : qpair->ctrlr->opts.io_queue_requests,
898 : rc);
899 : }
900 : }
901 :
902 : int
903 2 : nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
904 : uint64_t zslba, uint32_t lba_count,
905 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
906 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
907 : spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
908 : uint16_t apptag_mask, uint16_t apptag)
909 : {
910 : struct nvme_request *req;
911 2 : struct nvme_payload payload;
912 2 : int rc = 0;
913 :
914 2 : if (!_is_io_flags_valid(io_flags)) {
915 0 : return -EINVAL;
916 : }
917 :
918 2 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
919 0 : return -EINVAL;
920 : }
921 :
922 2 : rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
923 2 : if (rc) {
924 1 : return rc;
925 : }
926 :
927 1 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
928 :
929 1 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
930 : SPDK_NVME_OPC_ZONE_APPEND,
931 : io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
932 1 : if (req != NULL) {
933 : /*
934 : * Zone append commands cannot be split (num_children has to be 0).
935 : * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split.
936 : * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp()
937 : * do not always cause a request to be split. These functions verify payload size,
938 : * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs).
939 : * If any of the verifications fail, they will split the request.
940 : * In our case, a split is very unlikely, since we already verified the size using
941 : * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions
942 : * in order to perform the verification part. If they do cause a split, we return
943 : * an error here. For proper requests, these functions will never cause a split.
944 : */
945 1 : if (req->num_children) {
946 0 : nvme_request_free_children(req);
947 0 : nvme_free_request(req);
948 0 : return -EINVAL;
949 : }
950 1 : return nvme_qpair_submit_request(qpair, req);
951 : } else {
952 0 : return nvme_ns_map_failure_rc(lba_count,
953 : ns->sectors_per_max_io,
954 : ns->sectors_per_stripe,
955 0 : qpair->ctrlr->opts.io_queue_requests,
956 : rc);
957 : }
958 : }
959 :
960 : int
961 7 : spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
962 : void *buffer, void *metadata, uint64_t lba,
963 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
964 : uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
965 : {
966 : struct nvme_request *req;
967 7 : struct nvme_payload payload;
968 7 : int rc = 0;
969 :
970 7 : if (!_is_io_flags_valid(io_flags)) {
971 0 : return -EINVAL;
972 : }
973 :
974 7 : payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
975 :
976 7 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
977 : io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
978 7 : if (req != NULL) {
979 7 : return nvme_qpair_submit_request(qpair, req);
980 : } else {
981 0 : return nvme_ns_map_failure_rc(lba_count,
982 : ns->sectors_per_max_io,
983 : ns->sectors_per_stripe,
984 0 : qpair->ctrlr->opts.io_queue_requests,
985 : rc);
986 : }
987 : }
988 :
989 : int
990 4 : spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
991 : uint64_t lba, uint32_t lba_count,
992 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
993 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
994 : spdk_nvme_req_next_sge_cb next_sge_fn)
995 : {
996 : struct nvme_request *req;
997 4 : struct nvme_payload payload;
998 4 : int rc = 0;
999 :
1000 4 : if (!_is_io_flags_valid(io_flags)) {
1001 0 : return -EINVAL;
1002 : }
1003 :
1004 4 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
1005 1 : return -EINVAL;
1006 : }
1007 :
1008 3 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
1009 :
1010 3 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1011 : io_flags, 0, 0, 0, true, NULL, &rc);
1012 3 : if (req != NULL) {
1013 2 : return nvme_qpair_submit_request(qpair, req);
1014 : } else {
1015 1 : return nvme_ns_map_failure_rc(lba_count,
1016 : ns->sectors_per_max_io,
1017 : ns->sectors_per_stripe,
1018 1 : qpair->ctrlr->opts.io_queue_requests,
1019 : rc);
1020 : }
1021 : }
1022 :
1023 : int
1024 0 : spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1025 : uint64_t lba, uint32_t lba_count,
1026 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1027 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1028 : spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1029 : uint16_t apptag_mask, uint16_t apptag)
1030 : {
1031 : struct nvme_request *req;
1032 0 : struct nvme_payload payload;
1033 0 : int rc = 0;
1034 :
1035 0 : if (!_is_io_flags_valid(io_flags)) {
1036 0 : return -EINVAL;
1037 : }
1038 :
1039 0 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
1040 0 : return -EINVAL;
1041 : }
1042 :
1043 0 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
1044 :
1045 0 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1046 : io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
1047 0 : if (req != NULL) {
1048 0 : return nvme_qpair_submit_request(qpair, req);
1049 : } else {
1050 0 : return nvme_ns_map_failure_rc(lba_count,
1051 : ns->sectors_per_max_io,
1052 : ns->sectors_per_stripe,
1053 0 : qpair->ctrlr->opts.io_queue_requests,
1054 : rc);
1055 : }
1056 : }
1057 :
1058 : int
1059 4 : spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
1060 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1061 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1062 : spdk_nvme_req_next_sge_cb next_sge_fn,
1063 : struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1064 : {
1065 : struct nvme_request *req;
1066 4 : struct nvme_payload payload;
1067 : void *seq;
1068 4 : int rc = 0;
1069 :
1070 4 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
1071 2 : return -EINVAL;
1072 : }
1073 :
1074 2 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
1075 :
1076 2 : if (opts) {
1077 2 : if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
1078 1 : return -EINVAL;
1079 : }
1080 :
1081 1 : seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
1082 1 : if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
1083 0 : return -EINVAL;
1084 : }
1085 :
1086 1 : payload.opts = opts;
1087 1 : payload.md = opts->metadata;
1088 1 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1089 1 : opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc);
1090 :
1091 : } else {
1092 0 : req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1093 : 0, 0, 0, 0, true, NULL, &rc);
1094 : }
1095 :
1096 1 : if (req != NULL) {
1097 1 : return nvme_qpair_submit_request(qpair, req);
1098 : } else {
1099 0 : return nvme_ns_map_failure_rc(lba_count,
1100 : ns->sectors_per_max_io,
1101 : ns->sectors_per_stripe,
1102 0 : qpair->ctrlr->opts.io_queue_requests,
1103 : rc);
1104 : }
1105 : }
1106 :
1107 : int
1108 1 : spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1109 : uint64_t lba, uint32_t lba_count,
1110 : spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1111 : uint32_t io_flags)
1112 : {
1113 : struct nvme_request *req;
1114 : struct spdk_nvme_cmd *cmd;
1115 : uint64_t *tmp_lba;
1116 :
1117 1 : if (!_is_io_flags_valid(io_flags)) {
1118 0 : return -EINVAL;
1119 : }
1120 :
1121 1 : if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
1122 0 : return -EINVAL;
1123 : }
1124 :
1125 1 : req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1126 1 : if (req == NULL) {
1127 0 : return -ENOMEM;
1128 : }
1129 :
1130 1 : cmd = &req->cmd;
1131 1 : cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES;
1132 1 : cmd->nsid = ns->id;
1133 :
1134 1 : tmp_lba = (uint64_t *)&cmd->cdw10;
1135 1 : *tmp_lba = lba;
1136 1 : cmd->cdw12 = lba_count - 1;
1137 1 : cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
1138 1 : cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
1139 :
1140 1 : return nvme_qpair_submit_request(qpair, req);
1141 : }
1142 :
1143 : int
1144 1 : spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1145 : uint64_t lba, uint32_t lba_count,
1146 : spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1147 : uint32_t io_flags)
1148 : {
1149 : struct nvme_request *req;
1150 : struct spdk_nvme_cmd *cmd;
1151 :
1152 1 : if (!_is_io_flags_valid(io_flags)) {
1153 0 : return -EINVAL;
1154 : }
1155 :
1156 1 : if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
1157 0 : return -EINVAL;
1158 : }
1159 :
1160 1 : req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1161 1 : if (req == NULL) {
1162 0 : return -ENOMEM;
1163 : }
1164 :
1165 1 : cmd = &req->cmd;
1166 1 : cmd->opc = SPDK_NVME_OPC_VERIFY;
1167 1 : cmd->nsid = ns->id;
1168 :
1169 1 : *(uint64_t *)&cmd->cdw10 = lba;
1170 1 : cmd->cdw12 = lba_count - 1;
1171 1 : cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
1172 1 : cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
1173 :
1174 1 : return nvme_qpair_submit_request(qpair, req);
1175 : }
1176 :
1177 : int
1178 1 : spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1179 : uint64_t lba, uint32_t lba_count,
1180 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1181 : {
1182 : struct nvme_request *req;
1183 : struct spdk_nvme_cmd *cmd;
1184 : uint64_t *tmp_lba;
1185 :
1186 1 : if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
1187 0 : return -EINVAL;
1188 : }
1189 :
1190 1 : req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1191 1 : if (req == NULL) {
1192 0 : return -ENOMEM;
1193 : }
1194 :
1195 1 : cmd = &req->cmd;
1196 1 : cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE;
1197 1 : cmd->nsid = ns->id;
1198 :
1199 1 : tmp_lba = (uint64_t *)&cmd->cdw10;
1200 1 : *tmp_lba = lba;
1201 1 : cmd->cdw12 = lba_count - 1;
1202 :
1203 1 : return nvme_qpair_submit_request(qpair, req);
1204 : }
1205 :
1206 : int
1207 3 : spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1208 : uint32_t type,
1209 : const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1210 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1211 : {
1212 : struct nvme_request *req;
1213 : struct spdk_nvme_cmd *cmd;
1214 :
1215 3 : if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) {
1216 1 : return -EINVAL;
1217 : }
1218 :
1219 2 : if (ranges == NULL) {
1220 0 : return -EINVAL;
1221 : }
1222 :
1223 2 : req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
1224 : num_ranges * sizeof(struct spdk_nvme_dsm_range),
1225 : cb_fn, cb_arg, true);
1226 2 : if (req == NULL) {
1227 0 : return -ENOMEM;
1228 : }
1229 :
1230 2 : cmd = &req->cmd;
1231 2 : cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1232 2 : cmd->nsid = ns->id;
1233 :
1234 2 : cmd->cdw10_bits.dsm.nr = num_ranges - 1;
1235 2 : cmd->cdw11 = type;
1236 :
1237 2 : return nvme_qpair_submit_request(qpair, req);
1238 : }
1239 :
1240 : int
1241 3 : spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1242 : const struct spdk_nvme_scc_source_range *ranges,
1243 : uint16_t num_ranges, uint64_t dest_lba,
1244 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1245 : {
1246 : struct nvme_request *req;
1247 : struct spdk_nvme_cmd *cmd;
1248 :
1249 3 : if (num_ranges == 0) {
1250 1 : return -EINVAL;
1251 : }
1252 :
1253 2 : if (ranges == NULL) {
1254 0 : return -EINVAL;
1255 : }
1256 :
1257 2 : req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
1258 : num_ranges * sizeof(struct spdk_nvme_scc_source_range),
1259 : cb_fn, cb_arg, true);
1260 2 : if (req == NULL) {
1261 0 : return -ENOMEM;
1262 : }
1263 :
1264 2 : cmd = &req->cmd;
1265 2 : cmd->opc = SPDK_NVME_OPC_COPY;
1266 2 : cmd->nsid = ns->id;
1267 :
1268 2 : *(uint64_t *)&cmd->cdw10 = dest_lba;
1269 2 : cmd->cdw12 = num_ranges - 1;
1270 :
1271 2 : return nvme_qpair_submit_request(qpair, req);
1272 : }
1273 :
1274 : int
1275 1 : spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1276 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1277 : {
1278 : struct nvme_request *req;
1279 : struct spdk_nvme_cmd *cmd;
1280 :
1281 1 : req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1282 1 : if (req == NULL) {
1283 0 : return -ENOMEM;
1284 : }
1285 :
1286 1 : cmd = &req->cmd;
1287 1 : cmd->opc = SPDK_NVME_OPC_FLUSH;
1288 1 : cmd->nsid = ns->id;
1289 :
1290 1 : return nvme_qpair_submit_request(qpair, req);
1291 : }
1292 :
1293 : int
1294 1 : spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
1295 : struct spdk_nvme_qpair *qpair,
1296 : struct spdk_nvme_reservation_register_data *payload,
1297 : bool ignore_key,
1298 : enum spdk_nvme_reservation_register_action action,
1299 : enum spdk_nvme_reservation_register_cptpl cptpl,
1300 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1301 : {
1302 : struct nvme_request *req;
1303 : struct spdk_nvme_cmd *cmd;
1304 :
1305 1 : req = nvme_allocate_request_user_copy(qpair,
1306 : payload, sizeof(struct spdk_nvme_reservation_register_data),
1307 : cb_fn, cb_arg, true);
1308 1 : if (req == NULL) {
1309 0 : return -ENOMEM;
1310 : }
1311 :
1312 1 : cmd = &req->cmd;
1313 1 : cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER;
1314 1 : cmd->nsid = ns->id;
1315 :
1316 1 : cmd->cdw10_bits.resv_register.rrega = action;
1317 1 : cmd->cdw10_bits.resv_register.iekey = ignore_key;
1318 1 : cmd->cdw10_bits.resv_register.cptpl = cptpl;
1319 :
1320 1 : return nvme_qpair_submit_request(qpair, req);
1321 : }
1322 :
1323 : int
1324 1 : spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
1325 : struct spdk_nvme_qpair *qpair,
1326 : struct spdk_nvme_reservation_key_data *payload,
1327 : bool ignore_key,
1328 : enum spdk_nvme_reservation_release_action action,
1329 : enum spdk_nvme_reservation_type type,
1330 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1331 : {
1332 : struct nvme_request *req;
1333 : struct spdk_nvme_cmd *cmd;
1334 :
1335 1 : req = nvme_allocate_request_user_copy(qpair,
1336 : payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
1337 : cb_arg, true);
1338 1 : if (req == NULL) {
1339 0 : return -ENOMEM;
1340 : }
1341 :
1342 1 : cmd = &req->cmd;
1343 1 : cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1344 1 : cmd->nsid = ns->id;
1345 :
1346 1 : cmd->cdw10_bits.resv_release.rrela = action;
1347 1 : cmd->cdw10_bits.resv_release.iekey = ignore_key;
1348 1 : cmd->cdw10_bits.resv_release.rtype = type;
1349 :
1350 1 : return nvme_qpair_submit_request(qpair, req);
1351 : }
1352 :
1353 : int
1354 1 : spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
1355 : struct spdk_nvme_qpair *qpair,
1356 : struct spdk_nvme_reservation_acquire_data *payload,
1357 : bool ignore_key,
1358 : enum spdk_nvme_reservation_acquire_action action,
1359 : enum spdk_nvme_reservation_type type,
1360 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1361 : {
1362 : struct nvme_request *req;
1363 : struct spdk_nvme_cmd *cmd;
1364 :
1365 1 : req = nvme_allocate_request_user_copy(qpair,
1366 : payload, sizeof(struct spdk_nvme_reservation_acquire_data),
1367 : cb_fn, cb_arg, true);
1368 1 : if (req == NULL) {
1369 0 : return -ENOMEM;
1370 : }
1371 :
1372 1 : cmd = &req->cmd;
1373 1 : cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE;
1374 1 : cmd->nsid = ns->id;
1375 :
1376 1 : cmd->cdw10_bits.resv_acquire.racqa = action;
1377 1 : cmd->cdw10_bits.resv_acquire.iekey = ignore_key;
1378 1 : cmd->cdw10_bits.resv_acquire.rtype = type;
1379 :
1380 1 : return nvme_qpair_submit_request(qpair, req);
1381 : }
1382 :
1383 : int
1384 1 : spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
1385 : struct spdk_nvme_qpair *qpair,
1386 : void *payload, uint32_t len,
1387 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1388 : {
1389 : uint32_t num_dwords;
1390 : struct nvme_request *req;
1391 : struct spdk_nvme_cmd *cmd;
1392 :
1393 1 : if (len & 0x3) {
1394 0 : return -EINVAL;
1395 : }
1396 :
1397 1 : req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
1398 1 : if (req == NULL) {
1399 0 : return -ENOMEM;
1400 : }
1401 :
1402 1 : cmd = &req->cmd;
1403 1 : cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT;
1404 1 : cmd->nsid = ns->id;
1405 :
1406 1 : num_dwords = (len >> 2);
1407 1 : cmd->cdw10 = num_dwords - 1; /* 0-based */
1408 :
1409 1 : return nvme_qpair_submit_request(qpair, req);
1410 : }
1411 :
1412 : int
1413 2 : spdk_nvme_ns_cmd_io_mgmt_recv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1414 : void *payload, uint32_t len, uint8_t mo, uint16_t mos,
1415 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1416 : {
1417 : uint32_t num_dwords;
1418 : struct nvme_request *req;
1419 : struct spdk_nvme_cmd *cmd;
1420 :
1421 2 : if (len & 0x3) {
1422 1 : return -EINVAL;
1423 : }
1424 :
1425 1 : req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
1426 1 : if (req == NULL) {
1427 0 : return -ENOMEM;
1428 : }
1429 :
1430 1 : cmd = &req->cmd;
1431 1 : cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE;
1432 1 : cmd->nsid = ns->id;
1433 :
1434 1 : cmd->cdw10_bits.mgmt_send_recv.mo = mo;
1435 1 : cmd->cdw10_bits.mgmt_send_recv.mos = mos;
1436 :
1437 1 : num_dwords = (len >> 2);
1438 1 : cmd->cdw11 = num_dwords - 1; /* 0-based */
1439 :
1440 1 : return nvme_qpair_submit_request(qpair, req);
1441 : }
1442 :
1443 : int
1444 1 : spdk_nvme_ns_cmd_io_mgmt_send(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1445 : void *payload, uint32_t len, uint8_t mo, uint16_t mos,
1446 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1447 : {
1448 : struct nvme_request *req;
1449 : struct spdk_nvme_cmd *cmd;
1450 :
1451 1 : req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
1452 1 : if (req == NULL) {
1453 0 : return -ENOMEM;
1454 : }
1455 :
1456 1 : cmd = &req->cmd;
1457 1 : cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_SEND;
1458 1 : cmd->nsid = ns->id;
1459 :
1460 1 : cmd->cdw10_bits.mgmt_send_recv.mo = mo;
1461 1 : cmd->cdw10_bits.mgmt_send_recv.mos = mos;
1462 :
1463 1 : return nvme_qpair_submit_request(qpair, req);
1464 : }
|