LCOV - code coverage report
Current view: top level - lib/nvme - nvme_ns_cmd.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 626 798 78.4 %
Date: 2024-12-02 01:40:34 Functions: 42 46 91.3 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2015 Intel Corporation.
       3             :  *   All rights reserved.
       4             :  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
       5             :  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
       6             :  *   Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
       7             :  */
       8             : 
       9             : #include "nvme_internal.h"
      10             : 
      11             : static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
      12             :                 struct spdk_nvme_qpair *qpair,
      13             :                 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
      14             :                 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
      15             :                 void *cb_arg, uint32_t opc, uint32_t io_flags,
      16             :                 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
      17             :                 void *accel_sequence, int *rc);
      18             : 
      19             : static bool
      20           1 : nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
      21             :                              uint32_t sectors_per_stripe, uint32_t qdepth)
      22             : {
      23           1 :         uint32_t child_per_io = UINT32_MAX;
      24             : 
      25             :         /* After a namespace is destroyed(e.g. hotplug), all the fields associated with the
      26             :          * namespace will be cleared to zero, the function will return TRUE for this case,
      27             :          * and -EINVAL will be returned to caller.
      28             :          */
      29           1 :         if (sectors_per_stripe > 0) {
      30           0 :                 child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe;
      31           1 :         } else if (sectors_per_max_io > 0) {
      32           1 :                 child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io;
      33           1 :         }
      34             : 
      35           1 :         SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io);
      36             : 
      37           2 :         return child_per_io >= qdepth;
      38           1 : }
      39             : 
      40             : static inline int
      41           3 : nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io,
      42             :                        uint32_t sectors_per_stripe, uint32_t qdepth, int rc)
      43             : {
      44           3 :         assert(rc);
      45           3 :         if (rc == -ENOMEM &&
      46           1 :             nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) {
      47           1 :                 return -EINVAL;
      48             :         }
      49           2 :         return rc;
      50           3 : }
      51             : 
      52             : static inline bool
      53         230 : _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags)
      54             : {
      55         265 :         return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) &&
      56          35 :                (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) &&
      57           6 :                (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) &&
      58           6 :                (ns->md_size == 8);
      59             : }
      60             : 
      61             : static inline uint32_t
      62         125 : _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags)
      63             : {
      64         250 :         return _nvme_md_excluded_from_xfer(ns, io_flags) ?
      65         125 :                ns->sector_size : ns->extended_lba_size;
      66             : }
      67             : 
      68             : static inline uint32_t
      69         105 : _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags)
      70             : {
      71         210 :         return _nvme_md_excluded_from_xfer(ns, io_flags) ?
      72         105 :                ns->sectors_per_max_io_no_md : ns->sectors_per_max_io;
      73             : }
      74             : 
      75             : static struct nvme_request *
      76          58 : _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
      77             :                         const struct nvme_payload *payload,
      78             :                         uint32_t payload_offset, uint32_t md_offset,
      79             :                         uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
      80             :                         uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
      81             :                         struct nvme_request *parent, bool check_sgl, int *rc)
      82             : {
      83          58 :         struct nvme_request     *child;
      84             : 
      85          58 :         child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
      86          58 :                                 cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, NULL, rc);
      87          58 :         if (child == NULL) {
      88           1 :                 nvme_request_free_children(parent);
      89           1 :                 nvme_free_request(parent);
      90           1 :                 return NULL;
      91             :         }
      92             : 
      93          57 :         nvme_request_add_child(parent, child);
      94          57 :         return child;
      95          58 : }
      96             : 
      97             : static struct nvme_request *
      98          14 : _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
      99             :                            struct spdk_nvme_qpair *qpair,
     100             :                            const struct nvme_payload *payload,
     101             :                            uint32_t payload_offset, uint32_t md_offset,
     102             :                            uint64_t lba, uint32_t lba_count,
     103             :                            spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
     104             :                            uint32_t io_flags, struct nvme_request *req,
     105             :                            uint32_t sectors_per_max_io, uint32_t sector_mask,
     106             :                            uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
     107             :                            void *accel_sequence, int *rc)
     108             : {
     109          14 :         uint32_t                sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
     110          14 :         uint32_t                remaining_lba_count = lba_count;
     111          14 :         struct nvme_request     *child;
     112             : 
     113          14 :         if (spdk_unlikely(accel_sequence != NULL)) {
     114           0 :                 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
     115           0 :                 *rc = -EINVAL;
     116           0 :                 return NULL;
     117             :         }
     118             : 
     119          69 :         while (remaining_lba_count > 0) {
     120          56 :                 lba_count = sectors_per_max_io - (lba & sector_mask);
     121          56 :                 lba_count = spdk_min(remaining_lba_count, lba_count);
     122             : 
     123         112 :                 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
     124          56 :                                                 lba, lba_count, cb_fn, cb_arg, opc,
     125          56 :                                                 io_flags, apptag_mask, apptag, cdw13, req, true, rc);
     126          56 :                 if (child == NULL) {
     127           1 :                         return NULL;
     128             :                 }
     129             : 
     130          55 :                 remaining_lba_count -= lba_count;
     131          55 :                 lba += lba_count;
     132          55 :                 payload_offset += lba_count * sector_size;
     133          55 :                 md_offset += lba_count * ns->md_size;
     134             :         }
     135             : 
     136          13 :         return req;
     137          14 : }
     138             : 
     139             : static inline bool
     140         148 : _is_io_flags_valid(uint32_t io_flags)
     141             : {
     142         148 :         if (spdk_unlikely(io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK)) {
     143             :                 /* Invalid io_flags */
     144           3 :                 SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags);
     145           3 :                 return false;
     146             :         }
     147             : 
     148         145 :         return true;
     149         148 : }
     150             : 
     151             : static inline bool
     152           2 : _is_accel_sequence_valid(struct spdk_nvme_qpair *qpair, void *seq)
     153             : {
     154             :         /* An accel sequence can only be executed if the controller supports accel and a qpair is
     155             :          * part of a of a poll group */
     156           2 :         if (spdk_likely(seq == NULL || ((qpair->ctrlr->flags & SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED) &&
     157             :                                         qpair->poll_group != NULL))) {
     158           2 :                 return true;
     159             :         }
     160             : 
     161           0 :         return false;
     162           2 : }
     163             : 
     164             : static void
     165          88 : _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
     166             :                            uint32_t opc, uint64_t lba, uint32_t lba_count,
     167             :                            uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag,
     168             :                            uint32_t cdw13)
     169             : {
     170          88 :         struct spdk_nvme_cmd    *cmd;
     171             : 
     172          88 :         assert(_is_io_flags_valid(io_flags));
     173             : 
     174          88 :         cmd = &req->cmd;
     175          88 :         cmd->opc = opc;
     176          88 :         cmd->nsid = ns->id;
     177             : 
     178          88 :         *(uint64_t *)&cmd->cdw10 = lba;
     179             : 
     180          88 :         if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
     181          13 :                 switch (ns->pi_type) {
     182             :                 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
     183             :                 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
     184           1 :                         cmd->cdw14 = (uint32_t)lba;
     185           1 :                         break;
     186             :                 }
     187          13 :         }
     188             : 
     189          88 :         cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
     190             : 
     191          88 :         cmd->cdw12 = lba_count - 1;
     192          88 :         cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
     193             : 
     194          88 :         cmd->cdw13 = cdw13;
     195             : 
     196          88 :         cmd->cdw15 = apptag_mask;
     197          88 :         cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
     198          88 : }
     199             : 
     200             : static struct nvme_request *
     201          18 : _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
     202             :                                struct spdk_nvme_qpair *qpair,
     203             :                                const struct nvme_payload *payload,
     204             :                                uint32_t payload_offset, uint32_t md_offset,
     205             :                                uint64_t lba, uint32_t lba_count,
     206             :                                spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
     207             :                                uint32_t io_flags, struct nvme_request *req,
     208             :                                uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
     209             :                                void *accel_sequence, int *rc)
     210             : {
     211          18 :         spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
     212          18 :         spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
     213          18 :         void *sgl_cb_arg = req->payload.contig_or_cb_arg;
     214          18 :         bool start_valid, end_valid, last_sge, child_equals_parent;
     215          18 :         uint64_t child_lba = lba;
     216          18 :         uint32_t req_current_length = 0;
     217          18 :         uint32_t child_length = 0;
     218          18 :         uint32_t sge_length;
     219          18 :         uint32_t page_size = qpair->ctrlr->page_size;
     220          18 :         uintptr_t address;
     221             : 
     222          18 :         reset_sgl_fn(sgl_cb_arg, payload_offset);
     223          18 :         next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
     224          36 :         while (req_current_length < req->payload_size) {
     225             : 
     226          19 :                 if (sge_length == 0) {
     227           0 :                         continue;
     228          19 :                 } else if (req_current_length + sge_length > req->payload_size) {
     229           5 :                         sge_length = req->payload_size - req_current_length;
     230           5 :                 }
     231             : 
     232             :                 /*
     233             :                  * The start of the SGE is invalid if the start address is not page aligned,
     234             :                  *  unless it is the first SGE in the child request.
     235             :                  */
     236          19 :                 start_valid = child_length == 0 || _is_page_aligned(address, page_size);
     237             : 
     238             :                 /* Boolean for whether this is the last SGE in the parent request. */
     239          19 :                 last_sge = (req_current_length + sge_length == req->payload_size);
     240             : 
     241             :                 /*
     242             :                  * The end of the SGE is invalid if the end address is not page aligned,
     243             :                  *  unless it is the last SGE in the parent request.
     244             :                  */
     245          19 :                 end_valid = last_sge || _is_page_aligned(address + sge_length, page_size);
     246             : 
     247             :                 /*
     248             :                  * This child request equals the parent request, meaning that no splitting
     249             :                  *  was required for the parent request (the one passed into this function).
     250             :                  *  In this case, we do not create a child request at all - we just send
     251             :                  *  the original request as a single request at the end of this function.
     252             :                  */
     253          19 :                 child_equals_parent = (child_length + sge_length == req->payload_size);
     254             : 
     255          19 :                 if (start_valid) {
     256             :                         /*
     257             :                          * The start of the SGE is valid, so advance the length parameters,
     258             :                          *  to include this SGE with previous SGEs for this child request
     259             :                          *  (if any).  If it is not valid, we do not advance the length
     260             :                          *  parameters nor get the next SGE, because we must send what has
     261             :                          *  been collected before this SGE as a child request.
     262             :                          */
     263          19 :                         child_length += sge_length;
     264          19 :                         req_current_length += sge_length;
     265          19 :                         if (req_current_length < req->payload_size) {
     266           2 :                                 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
     267             :                                 /*
     268             :                                  * If the next SGE is not page aligned, we will need to create a
     269             :                                  *  child request for what we have so far, and then start a new
     270             :                                  *  child request for the next SGE.
     271             :                                  */
     272           2 :                                 start_valid = _is_page_aligned(address, page_size);
     273           2 :                         }
     274          19 :                 }
     275             : 
     276          19 :                 if (start_valid && end_valid && !last_sge) {
     277           1 :                         continue;
     278             :                 }
     279             : 
     280             :                 /*
     281             :                  * We need to create a split here.  Send what we have accumulated so far as a child
     282             :                  *  request.  Checking if child_equals_parent allows us to *not* create a child request
     283             :                  *  when no splitting is required - in that case we will fall-through and just create
     284             :                  *  a single request with no children for the entire I/O.
     285             :                  */
     286          18 :                 if (!child_equals_parent) {
     287           1 :                         struct nvme_request *child;
     288           1 :                         uint32_t child_lba_count;
     289             : 
     290           1 :                         if ((child_length % ns->extended_lba_size) != 0) {
     291           1 :                                 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
     292             :                                             child_length, ns->extended_lba_size);
     293           1 :                                 *rc = -EINVAL;
     294           1 :                                 return NULL;
     295             :                         }
     296           0 :                         if (spdk_unlikely(accel_sequence != NULL)) {
     297           0 :                                 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
     298           0 :                                 *rc = -EINVAL;
     299           0 :                                 return NULL;
     300             :                         }
     301             : 
     302           0 :                         child_lba_count = child_length / ns->extended_lba_size;
     303             :                         /*
     304             :                          * Note the last parameter is set to "false" - this tells the recursive
     305             :                          *  call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
     306             :                          *  since we have already verified it here.
     307             :                          */
     308           0 :                         child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
     309           0 :                                                         child_lba, child_lba_count,
     310           0 :                                                         cb_fn, cb_arg, opc, io_flags,
     311           0 :                                                         apptag_mask, apptag, cdw13, req, false, rc);
     312           0 :                         if (child == NULL) {
     313           0 :                                 return NULL;
     314             :                         }
     315           0 :                         payload_offset += child_length;
     316           0 :                         md_offset += child_lba_count * ns->md_size;
     317           0 :                         child_lba += child_lba_count;
     318           0 :                         child_length = 0;
     319           1 :                 }
     320             :         }
     321             : 
     322          17 :         if (child_length == req->payload_size) {
     323             :                 /* No splitting was required, so setup the whole payload as one request. */
     324          17 :                 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
     325          17 :         }
     326             : 
     327          17 :         return req;
     328          18 : }
     329             : 
     330             : static struct nvme_request *
     331           3 : _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
     332             :                                struct spdk_nvme_qpair *qpair,
     333             :                                const struct nvme_payload *payload,
     334             :                                uint32_t payload_offset, uint32_t md_offset,
     335             :                                uint64_t lba, uint32_t lba_count,
     336             :                                spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
     337             :                                uint32_t io_flags, struct nvme_request *req,
     338             :                                uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
     339             :                                void *accel_sequence, int *rc)
     340             : {
     341           3 :         spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
     342           3 :         spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
     343           3 :         void *sgl_cb_arg = req->payload.contig_or_cb_arg;
     344           3 :         uint64_t child_lba = lba;
     345           3 :         uint32_t req_current_length = 0;
     346           3 :         uint32_t accumulated_length = 0;
     347           3 :         uint32_t sge_length;
     348           3 :         uint16_t max_sges, num_sges;
     349           3 :         uintptr_t address;
     350             : 
     351           3 :         max_sges = ns->ctrlr->max_sges;
     352             : 
     353           3 :         reset_sgl_fn(sgl_cb_arg, payload_offset);
     354           3 :         num_sges = 0;
     355             : 
     356           7 :         while (req_current_length < req->payload_size) {
     357           5 :                 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
     358             : 
     359           5 :                 if (req_current_length + sge_length > req->payload_size) {
     360           0 :                         sge_length = req->payload_size - req_current_length;
     361           0 :                 }
     362             : 
     363           5 :                 accumulated_length += sge_length;
     364           5 :                 req_current_length += sge_length;
     365           5 :                 num_sges++;
     366             : 
     367           5 :                 if (num_sges < max_sges && req_current_length < req->payload_size) {
     368           1 :                         continue;
     369             :                 }
     370             : 
     371             :                 /*
     372             :                  * We need to create a split here.  Send what we have accumulated so far as a child
     373             :                  *  request.  Checking if the child equals the full payload allows us to *not*
     374             :                  *  create a child request when no splitting is required - in that case we will
     375             :                  *  fall-through and just create a single request with no children for the entire I/O.
     376             :                  */
     377           4 :                 if (accumulated_length != req->payload_size) {
     378           3 :                         struct nvme_request *child;
     379           3 :                         uint32_t child_lba_count;
     380           3 :                         uint32_t child_length;
     381           3 :                         uint32_t extra_length;
     382             : 
     383           3 :                         child_length = accumulated_length;
     384             :                         /* Child length may not be a multiple of the block size! */
     385           3 :                         child_lba_count = child_length / ns->extended_lba_size;
     386           3 :                         extra_length = child_length - (child_lba_count * ns->extended_lba_size);
     387           3 :                         if (extra_length != 0) {
     388             :                                 /* The last SGE does not end on a block boundary. We need to cut it off. */
     389           2 :                                 if (extra_length >= child_length) {
     390           1 :                                         SPDK_ERRLOG("Unable to send I/O. Would require more than the supported number of "
     391             :                                                     "SGL Elements.");
     392           1 :                                         *rc = -EINVAL;
     393           1 :                                         return NULL;
     394             :                                 }
     395           1 :                                 child_length -= extra_length;
     396           1 :                         }
     397             : 
     398           2 :                         if (spdk_unlikely(accel_sequence != NULL)) {
     399           0 :                                 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
     400           0 :                                 *rc = -EINVAL;
     401           0 :                                 return NULL;
     402             :                         }
     403             : 
     404             :                         /*
     405             :                          * Note the last parameter is set to "false" - this tells the recursive
     406             :                          *  call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
     407             :                          *  since we have already verified it here.
     408             :                          */
     409           4 :                         child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
     410           2 :                                                         child_lba, child_lba_count,
     411           2 :                                                         cb_fn, cb_arg, opc, io_flags,
     412           2 :                                                         apptag_mask, apptag, cdw13, req, false, rc);
     413           2 :                         if (child == NULL) {
     414           0 :                                 return NULL;
     415             :                         }
     416           2 :                         payload_offset += child_length;
     417           2 :                         md_offset += child_lba_count * ns->md_size;
     418           2 :                         child_lba += child_lba_count;
     419           2 :                         accumulated_length -= child_length;
     420           2 :                         num_sges = accumulated_length > 0;
     421           3 :                 }
     422             :         }
     423             : 
     424           2 :         if (accumulated_length == req->payload_size) {
     425             :                 /* No splitting was required, so setup the whole payload as one request. */
     426           1 :                 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
     427           1 :         }
     428             : 
     429           2 :         return req;
     430           3 : }
     431             : 
     432             : static inline struct nvme_request *
     433         105 : _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     434             :                 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
     435             :                 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
     436             :                 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
     437             :                 void *accel_sequence, int *rc)
     438             : {
     439         105 :         struct nvme_request     *req;
     440         105 :         uint32_t                sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
     441         105 :         uint32_t                sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags);
     442         105 :         uint32_t                sectors_per_stripe = ns->sectors_per_stripe;
     443             : 
     444         105 :         assert(rc != NULL);
     445         105 :         assert(*rc == 0);
     446             : 
     447         105 :         req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size,
     448         105 :                                     cb_fn, cb_arg);
     449         105 :         if (req == NULL) {
     450           1 :                 *rc = -ENOMEM;
     451           1 :                 return NULL;
     452             :         }
     453             : 
     454         104 :         req->payload_offset = payload_offset;
     455         104 :         req->md_offset = md_offset;
     456         104 :         req->accel_sequence = accel_sequence;
     457             : 
     458             :         /* Zone append commands cannot be split. */
     459         104 :         if (opc == SPDK_NVME_OPC_ZONE_APPEND) {
     460           3 :                 assert(ns->csi == SPDK_NVME_CSI_ZNS);
     461             :                 /*
     462             :                  * As long as we disable driver-assisted striping for Zone append commands,
     463             :                  * _nvme_ns_cmd_rw() should never cause a proper request to be split.
     464             :                  * If a request is split, after all, error handling is done in caller functions.
     465             :                  */
     466           3 :                 sectors_per_stripe = 0;
     467           3 :         }
     468             : 
     469             :         /*
     470             :          * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
     471             :          * If this controller defines a stripe boundary and this I/O spans a stripe
     472             :          *  boundary, split the request into multiple requests and submit each
     473             :          *  separately to hardware.
     474             :          */
     475         104 :         if (sectors_per_stripe > 0 &&
     476           7 :             (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
     477           2 :                 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
     478           1 :                                                   cb_fn,
     479           1 :                                                   cb_arg, opc,
     480           1 :                                                   io_flags, req, sectors_per_stripe, sectors_per_stripe - 1,
     481           1 :                                                   apptag_mask, apptag, cdw13,  accel_sequence, rc);
     482         103 :         } else if (lba_count > sectors_per_max_io) {
     483          26 :                 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
     484          13 :                                                   cb_fn,
     485          13 :                                                   cb_arg, opc,
     486          13 :                                                   io_flags, req, sectors_per_max_io, 0, apptag_mask,
     487          13 :                                                   apptag, cdw13, accel_sequence, rc);
     488          90 :         } else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
     489          21 :                 if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
     490           6 :                         return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
     491           3 :                                                               lba, lba_count, cb_fn, cb_arg, opc, io_flags,
     492           3 :                                                               req, apptag_mask, apptag, cdw13,
     493           3 :                                                               accel_sequence, rc);
     494             :                 } else {
     495          36 :                         return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
     496          18 :                                                               lba, lba_count, cb_fn, cb_arg, opc, io_flags,
     497          18 :                                                               req, apptag_mask, apptag, cdw13,
     498          18 :                                                               accel_sequence, rc);
     499             :                 }
     500             :         }
     501             : 
     502          69 :         _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
     503          69 :         return req;
     504         105 : }
     505             : 
     506             : int
     507           1 : spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     508             :                          uint64_t lba,
     509             :                          uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     510             :                          uint32_t io_flags)
     511             : {
     512           1 :         struct nvme_request *req;
     513           1 :         struct nvme_payload payload;
     514           1 :         int rc = 0;
     515             : 
     516           1 :         if (!_is_io_flags_valid(io_flags)) {
     517           0 :                 return -EINVAL;
     518             :         }
     519             : 
     520           1 :         payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
     521             : 
     522           2 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
     523             :                               SPDK_NVME_OPC_COMPARE,
     524           1 :                               io_flags, 0,
     525             :                               0, 0, false, NULL, &rc);
     526           1 :         if (req != NULL) {
     527           1 :                 return nvme_qpair_submit_request(qpair, req);
     528             :         } else {
     529           0 :                 return nvme_ns_map_failure_rc(lba_count,
     530           0 :                                               ns->sectors_per_max_io,
     531           0 :                                               ns->sectors_per_stripe,
     532           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     533           0 :                                               rc);
     534             :         }
     535           1 : }
     536             : 
     537             : int
     538           6 : spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     539             :                                  void *buffer,
     540             :                                  void *metadata,
     541             :                                  uint64_t lba,
     542             :                                  uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     543             :                                  uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
     544             : {
     545           6 :         struct nvme_request *req;
     546           6 :         struct nvme_payload payload;
     547           6 :         int rc = 0;
     548             : 
     549           6 :         if (!_is_io_flags_valid(io_flags)) {
     550           0 :                 return -EINVAL;
     551             :         }
     552             : 
     553           6 :         payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
     554             : 
     555          12 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
     556             :                               SPDK_NVME_OPC_COMPARE,
     557           6 :                               io_flags,
     558           6 :                               apptag_mask, apptag, 0, false, NULL, &rc);
     559           6 :         if (req != NULL) {
     560           6 :                 return nvme_qpair_submit_request(qpair, req);
     561             :         } else {
     562           0 :                 return nvme_ns_map_failure_rc(lba_count,
     563           0 :                                               ns->sectors_per_max_io,
     564           0 :                                               ns->sectors_per_stripe,
     565           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     566           0 :                                               rc);
     567             :         }
     568           6 : }
     569             : 
     570             : int
     571           2 : spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     572             :                           uint64_t lba, uint32_t lba_count,
     573             :                           spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     574             :                           spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     575             :                           spdk_nvme_req_next_sge_cb next_sge_fn)
     576             : {
     577           2 :         struct nvme_request *req;
     578           2 :         struct nvme_payload payload;
     579           2 :         int rc = 0;
     580             : 
     581           2 :         if (!_is_io_flags_valid(io_flags)) {
     582           0 :                 return -EINVAL;
     583             :         }
     584             : 
     585           2 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     586           1 :                 return -EINVAL;
     587             :         }
     588             : 
     589           1 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
     590             : 
     591           2 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
     592             :                               SPDK_NVME_OPC_COMPARE,
     593           1 :                               io_flags, 0, 0, 0, true, NULL, &rc);
     594           1 :         if (req != NULL) {
     595           1 :                 return nvme_qpair_submit_request(qpair, req);
     596             :         } else {
     597           0 :                 return nvme_ns_map_failure_rc(lba_count,
     598           0 :                                               ns->sectors_per_max_io,
     599           0 :                                               ns->sectors_per_stripe,
     600           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     601           0 :                                               rc);
     602             :         }
     603           2 : }
     604             : 
     605             : int
     606           6 : spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     607             :                                   uint64_t lba, uint32_t lba_count,
     608             :                                   spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     609             :                                   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     610             :                                   spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
     611             :                                   uint16_t apptag_mask, uint16_t apptag)
     612             : {
     613           6 :         struct nvme_request *req;
     614           6 :         struct nvme_payload payload;
     615           6 :         int rc = 0;
     616             : 
     617           6 :         if (!_is_io_flags_valid(io_flags)) {
     618           0 :                 return -EINVAL;
     619             :         }
     620             : 
     621           6 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     622           0 :                 return -EINVAL;
     623             :         }
     624             : 
     625           6 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
     626             : 
     627          12 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
     628           6 :                               SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true,
     629             :                               NULL, &rc);
     630           6 :         if (req != NULL) {
     631           6 :                 return nvme_qpair_submit_request(qpair, req);
     632             :         } else {
     633           0 :                 return nvme_ns_map_failure_rc(lba_count,
     634           0 :                                               ns->sectors_per_max_io,
     635           0 :                                               ns->sectors_per_stripe,
     636           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     637           0 :                                               rc);
     638             :         }
     639           6 : }
     640             : 
     641             : int
     642          10 : spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     643             :                       uint64_t lba,
     644             :                       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     645             :                       uint32_t io_flags)
     646             : {
     647          10 :         struct nvme_request *req;
     648          10 :         struct nvme_payload payload;
     649          10 :         int rc = 0;
     650             : 
     651          10 :         if (!_is_io_flags_valid(io_flags)) {
     652           0 :                 return -EINVAL;
     653             :         }
     654             : 
     655          10 :         payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
     656             : 
     657          20 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
     658          10 :                               io_flags, 0,
     659             :                               0, 0, false, NULL, &rc);
     660          10 :         if (req != NULL) {
     661           9 :                 return nvme_qpair_submit_request(qpair, req);
     662             :         } else {
     663           2 :                 return nvme_ns_map_failure_rc(lba_count,
     664           1 :                                               ns->sectors_per_max_io,
     665           1 :                                               ns->sectors_per_stripe,
     666           1 :                                               qpair->ctrlr->opts.io_queue_requests,
     667           1 :                                               rc);
     668             :         }
     669          10 : }
     670             : 
     671             : int
     672           1 : spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     673             :                               void *metadata,
     674             :                               uint64_t lba,
     675             :                               uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     676             :                               uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
     677             : {
     678           1 :         struct nvme_request *req;
     679           1 :         struct nvme_payload payload;
     680           1 :         int rc = 0;
     681             : 
     682           1 :         if (!_is_io_flags_valid(io_flags)) {
     683           0 :                 return -EINVAL;
     684             :         }
     685             : 
     686           1 :         payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
     687             : 
     688           2 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
     689           1 :                               io_flags,
     690           1 :                               apptag_mask, apptag, 0, false, NULL, &rc);
     691           1 :         if (req != NULL) {
     692           1 :                 return nvme_qpair_submit_request(qpair, req);
     693             :         } else {
     694           0 :                 return nvme_ns_map_failure_rc(lba_count,
     695           0 :                                               ns->sectors_per_max_io,
     696           0 :                                               ns->sectors_per_stripe,
     697           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     698           0 :                                               rc);
     699             :         }
     700           1 : }
     701             : 
     702             : static int
     703           0 : nvme_ns_cmd_rw_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     704             :                    uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     705             :                    struct spdk_nvme_ns_cmd_ext_io_opts *opts, enum spdk_nvme_nvm_opcode opc)
     706             : {
     707           0 :         struct nvme_request *req;
     708           0 :         struct nvme_payload payload;
     709           0 :         void *seq;
     710           0 :         int rc = 0;
     711             : 
     712           0 :         assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE);
     713           0 :         assert(opts);
     714             : 
     715           0 :         payload = NVME_PAYLOAD_CONTIG(buffer, opts->metadata);
     716             : 
     717           0 :         if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
     718           0 :                 return -EINVAL;
     719             :         }
     720             : 
     721           0 :         seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
     722           0 :         if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
     723           0 :                 return -EINVAL;
     724             :         }
     725             : 
     726           0 :         payload.opts = opts;
     727             : 
     728           0 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags,
     729           0 :                               opts->apptag_mask, opts->apptag, 0, false, seq, &rc);
     730           0 :         if (spdk_unlikely(req == NULL)) {
     731           0 :                 return nvme_ns_map_failure_rc(lba_count,
     732           0 :                                               ns->sectors_per_max_io,
     733           0 :                                               ns->sectors_per_stripe,
     734           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     735           0 :                                               rc);
     736             :         }
     737             : 
     738           0 :         return nvme_qpair_submit_request(qpair, req);
     739           0 : }
     740             : 
     741             : int
     742           0 : spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     743             :                           uint64_t lba,
     744             :                           uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     745             :                           struct spdk_nvme_ns_cmd_ext_io_opts *opts)
     746             : {
     747           0 :         return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts,
     748             :                                   SPDK_NVME_OPC_READ);
     749             : }
     750             : 
     751             : int
     752           6 : spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     753             :                        uint64_t lba, uint32_t lba_count,
     754             :                        spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     755             :                        spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     756             :                        spdk_nvme_req_next_sge_cb next_sge_fn)
     757             : {
     758           6 :         struct nvme_request *req;
     759           6 :         struct nvme_payload payload;
     760           6 :         int rc = 0;
     761             : 
     762           6 :         if (!_is_io_flags_valid(io_flags)) {
     763           0 :                 return -EINVAL;
     764             :         }
     765             : 
     766           6 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     767           2 :                 return -EINVAL;
     768             :         }
     769             : 
     770           4 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
     771             : 
     772           8 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
     773           4 :                               io_flags, 0, 0, 0, true, NULL, &rc);
     774           4 :         if (req != NULL) {
     775           3 :                 return nvme_qpair_submit_request(qpair, req);
     776             :         } else {
     777           2 :                 return nvme_ns_map_failure_rc(lba_count,
     778           1 :                                               ns->sectors_per_max_io,
     779           1 :                                               ns->sectors_per_stripe,
     780           1 :                                               qpair->ctrlr->opts.io_queue_requests,
     781           1 :                                               rc);
     782             :         }
     783           6 : }
     784             : 
     785             : int
     786           2 : spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     787             :                                uint64_t lba, uint32_t lba_count,
     788             :                                spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     789             :                                spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     790             :                                spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
     791             :                                uint16_t apptag_mask, uint16_t apptag)
     792             : {
     793           2 :         struct nvme_request *req;
     794           2 :         struct nvme_payload payload;
     795           2 :         int rc = 0;
     796             : 
     797           2 :         if (!_is_io_flags_valid(io_flags)) {
     798           0 :                 return -EINVAL;
     799             :         }
     800             : 
     801           2 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     802           1 :                 return -EINVAL;
     803             :         }
     804             : 
     805           1 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
     806             : 
     807           2 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
     808           1 :                               io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
     809           1 :         if (req != NULL) {
     810           1 :                 return nvme_qpair_submit_request(qpair, req);
     811             :         } else {
     812           0 :                 return nvme_ns_map_failure_rc(lba_count,
     813           0 :                                               ns->sectors_per_max_io,
     814           0 :                                               ns->sectors_per_stripe,
     815           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     816           0 :                                               rc);
     817             :         }
     818           2 : }
     819             : 
     820             : static int
     821           8 : nvme_ns_cmd_rwv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
     822             :                     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     823             :                     spdk_nvme_req_next_sge_cb next_sge_fn, struct spdk_nvme_ns_cmd_ext_io_opts *opts,
     824             :                     enum spdk_nvme_nvm_opcode opc)
     825             : {
     826           8 :         struct nvme_request *req;
     827           8 :         struct nvme_payload payload;
     828           8 :         void *seq;
     829           8 :         int rc = 0;
     830             : 
     831           8 :         assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE);
     832             : 
     833           8 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     834           4 :                 return -EINVAL;
     835             :         }
     836             : 
     837           4 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
     838             : 
     839           4 :         if (opts) {
     840           4 :                 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
     841           2 :                         return -EINVAL;
     842             :                 }
     843             : 
     844           2 :                 seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
     845           2 :                 if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
     846           0 :                         return -EINVAL;
     847             :                 }
     848             : 
     849           2 :                 payload.opts = opts;
     850           2 :                 payload.md = opts->metadata;
     851           4 :                 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags,
     852           2 :                                       opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc);
     853             : 
     854           2 :         } else {
     855           0 :                 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, 0, 0, 0, 0,
     856             :                                       true, NULL, &rc);
     857             :         }
     858             : 
     859           2 :         if (req == NULL) {
     860           0 :                 return nvme_ns_map_failure_rc(lba_count,
     861           0 :                                               ns->sectors_per_max_io,
     862           0 :                                               ns->sectors_per_stripe,
     863           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     864           0 :                                               rc);
     865             :         }
     866             : 
     867           2 :         return nvme_qpair_submit_request(qpair, req);
     868           8 : }
     869             : 
     870             : int
     871           4 : spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     872             :                            uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
     873             :                            void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     874             :                            spdk_nvme_req_next_sge_cb next_sge_fn,
     875             :                            struct spdk_nvme_ns_cmd_ext_io_opts *opts)
     876             : {
     877           8 :         return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn,
     878           4 :                                    opts, SPDK_NVME_OPC_READ);
     879             : }
     880             : 
     881             : int
     882           3 : spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     883             :                        void *buffer, uint64_t lba,
     884             :                        uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     885             :                        uint32_t io_flags)
     886             : {
     887           3 :         struct nvme_request *req;
     888           3 :         struct nvme_payload payload;
     889           3 :         int rc = 0;
     890             : 
     891           3 :         if (!_is_io_flags_valid(io_flags)) {
     892           1 :                 return -EINVAL;
     893             :         }
     894             : 
     895           2 :         payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
     896             : 
     897           4 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
     898           2 :                               io_flags, 0, 0, 0, false, NULL, &rc);
     899           2 :         if (req != NULL) {
     900           2 :                 return nvme_qpair_submit_request(qpair, req);
     901             :         } else {
     902           0 :                 return nvme_ns_map_failure_rc(lba_count,
     903           0 :                                               ns->sectors_per_max_io,
     904           0 :                                               ns->sectors_per_stripe,
     905           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     906           0 :                                               rc);
     907             :         }
     908           3 : }
     909             : 
     910             : static int
     911           6 : nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags)
     912             : {
     913           6 :         uint32_t sector_size;
     914             : 
     915             :         /* Not all NVMe Zoned Namespaces support the zone append command. */
     916           6 :         if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) {
     917           0 :                 return -EINVAL;
     918             :         }
     919             : 
     920           6 :         sector_size =  _nvme_get_host_buffer_sector_size(ns, io_flags);
     921             : 
     922             :         /* Fail a too large zone append command early. */
     923           6 :         if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) {
     924           3 :                 return -EINVAL;
     925             :         }
     926             : 
     927           3 :         return 0;
     928           6 : }
     929             : 
     930             : int
     931           4 : nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     932             :                                 void *buffer, void *metadata, uint64_t zslba,
     933             :                                 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     934             :                                 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
     935             : {
     936           4 :         struct nvme_request *req;
     937           4 :         struct nvme_payload payload;
     938           4 :         int rc = 0;
     939             : 
     940           4 :         if (!_is_io_flags_valid(io_flags)) {
     941           0 :                 return -EINVAL;
     942             :         }
     943             : 
     944           4 :         rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
     945           4 :         if (rc) {
     946           2 :                 return rc;
     947             :         }
     948             : 
     949           2 :         payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
     950             : 
     951           4 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
     952             :                               SPDK_NVME_OPC_ZONE_APPEND,
     953           2 :                               io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
     954           2 :         if (req != NULL) {
     955             :                 /*
     956             :                  * Zone append commands cannot be split (num_children has to be 0).
     957             :                  * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split
     958             :                  * to happen, since a too large request would have already been failed by
     959             :                  * nvme_ns_cmd_check_zone_append(), since zasl <= mdts.
     960             :                  */
     961           2 :                 assert(req->num_children == 0);
     962           2 :                 if (req->num_children) {
     963           0 :                         nvme_request_free_children(req);
     964           0 :                         nvme_free_request(req);
     965           0 :                         return -EINVAL;
     966             :                 }
     967           2 :                 return nvme_qpair_submit_request(qpair, req);
     968             :         } else {
     969           0 :                 return nvme_ns_map_failure_rc(lba_count,
     970           0 :                                               ns->sectors_per_max_io,
     971           0 :                                               ns->sectors_per_stripe,
     972           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     973           0 :                                               rc);
     974             :         }
     975           4 : }
     976             : 
     977             : int
     978           2 : nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     979             :                                  uint64_t zslba, uint32_t lba_count,
     980             :                                  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     981             :                                  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     982             :                                  spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
     983             :                                  uint16_t apptag_mask, uint16_t apptag)
     984             : {
     985           2 :         struct nvme_request *req;
     986           2 :         struct nvme_payload payload;
     987           2 :         int rc = 0;
     988             : 
     989           2 :         if (!_is_io_flags_valid(io_flags)) {
     990           0 :                 return -EINVAL;
     991             :         }
     992             : 
     993           2 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     994           0 :                 return -EINVAL;
     995             :         }
     996             : 
     997           2 :         rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
     998           2 :         if (rc) {
     999           1 :                 return rc;
    1000             :         }
    1001             : 
    1002           1 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
    1003             : 
    1004           2 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
    1005             :                               SPDK_NVME_OPC_ZONE_APPEND,
    1006           1 :                               io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
    1007           1 :         if (req != NULL) {
    1008             :                 /*
    1009             :                  * Zone append commands cannot be split (num_children has to be 0).
    1010             :                  * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split.
    1011             :                  * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp()
    1012             :                  * do not always cause a request to be split. These functions verify payload size,
    1013             :                  * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs).
    1014             :                  * If any of the verifications fail, they will split the request.
    1015             :                  * In our case, a split is very unlikely, since we already verified the size using
    1016             :                  * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions
    1017             :                  * in order to perform the verification part. If they do cause a split, we return
    1018             :                  * an error here. For proper requests, these functions will never cause a split.
    1019             :                  */
    1020           1 :                 if (req->num_children) {
    1021           0 :                         nvme_request_free_children(req);
    1022           0 :                         nvme_free_request(req);
    1023           0 :                         return -EINVAL;
    1024             :                 }
    1025           1 :                 return nvme_qpair_submit_request(qpair, req);
    1026             :         } else {
    1027           0 :                 return nvme_ns_map_failure_rc(lba_count,
    1028           0 :                                               ns->sectors_per_max_io,
    1029           0 :                                               ns->sectors_per_stripe,
    1030           0 :                                               qpair->ctrlr->opts.io_queue_requests,
    1031           0 :                                               rc);
    1032             :         }
    1033           2 : }
    1034             : 
    1035             : int
    1036           7 : spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1037             :                                void *buffer, void *metadata, uint64_t lba,
    1038             :                                uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1039             :                                uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
    1040             : {
    1041           7 :         struct nvme_request *req;
    1042           7 :         struct nvme_payload payload;
    1043           7 :         int rc = 0;
    1044             : 
    1045           7 :         if (!_is_io_flags_valid(io_flags)) {
    1046           0 :                 return -EINVAL;
    1047             :         }
    1048             : 
    1049           7 :         payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
    1050             : 
    1051          14 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
    1052           7 :                               io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
    1053           7 :         if (req != NULL) {
    1054           7 :                 return nvme_qpair_submit_request(qpair, req);
    1055             :         } else {
    1056           0 :                 return nvme_ns_map_failure_rc(lba_count,
    1057           0 :                                               ns->sectors_per_max_io,
    1058           0 :                                               ns->sectors_per_stripe,
    1059           0 :                                               qpair->ctrlr->opts.io_queue_requests,
    1060           0 :                                               rc);
    1061             :         }
    1062           7 : }
    1063             : 
    1064             : int
    1065           0 : spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1066             :                            void *buffer, uint64_t lba,
    1067             :                            uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1068             :                            struct spdk_nvme_ns_cmd_ext_io_opts *opts)
    1069             : {
    1070           0 :         return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts,
    1071             :                                   SPDK_NVME_OPC_WRITE);
    1072             : }
    1073             : 
    1074             : int
    1075           4 : spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1076             :                         uint64_t lba, uint32_t lba_count,
    1077             :                         spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
    1078             :                         spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
    1079             :                         spdk_nvme_req_next_sge_cb next_sge_fn)
    1080             : {
    1081           4 :         struct nvme_request *req;
    1082           4 :         struct nvme_payload payload;
    1083           4 :         int rc = 0;
    1084             : 
    1085           4 :         if (!_is_io_flags_valid(io_flags)) {
    1086           0 :                 return -EINVAL;
    1087             :         }
    1088             : 
    1089           4 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
    1090           1 :                 return -EINVAL;
    1091             :         }
    1092             : 
    1093           3 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
    1094             : 
    1095           6 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
    1096           3 :                               io_flags, 0, 0, 0, true, NULL, &rc);
    1097           3 :         if (req != NULL) {
    1098           2 :                 return nvme_qpair_submit_request(qpair, req);
    1099             :         } else {
    1100           2 :                 return nvme_ns_map_failure_rc(lba_count,
    1101           1 :                                               ns->sectors_per_max_io,
    1102           1 :                                               ns->sectors_per_stripe,
    1103           1 :                                               qpair->ctrlr->opts.io_queue_requests,
    1104           1 :                                               rc);
    1105             :         }
    1106           4 : }
    1107             : 
    1108             : int
    1109           0 : spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1110             :                                 uint64_t lba, uint32_t lba_count,
    1111             :                                 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
    1112             :                                 spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
    1113             :                                 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
    1114             :                                 uint16_t apptag_mask, uint16_t apptag)
    1115             : {
    1116           0 :         struct nvme_request *req;
    1117           0 :         struct nvme_payload payload;
    1118           0 :         int rc = 0;
    1119             : 
    1120           0 :         if (!_is_io_flags_valid(io_flags)) {
    1121           0 :                 return -EINVAL;
    1122             :         }
    1123             : 
    1124           0 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
    1125           0 :                 return -EINVAL;
    1126             :         }
    1127             : 
    1128           0 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
    1129             : 
    1130           0 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
    1131           0 :                               io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
    1132           0 :         if (req != NULL) {
    1133           0 :                 return nvme_qpair_submit_request(qpair, req);
    1134             :         } else {
    1135           0 :                 return nvme_ns_map_failure_rc(lba_count,
    1136           0 :                                               ns->sectors_per_max_io,
    1137           0 :                                               ns->sectors_per_stripe,
    1138           0 :                                               qpair->ctrlr->opts.io_queue_requests,
    1139           0 :                                               rc);
    1140             :         }
    1141           0 : }
    1142             : 
    1143             : int
    1144           4 : spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
    1145             :                             uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1146             :                             spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
    1147             :                             spdk_nvme_req_next_sge_cb next_sge_fn,
    1148             :                             struct spdk_nvme_ns_cmd_ext_io_opts *opts)
    1149             : {
    1150           8 :         return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn,
    1151           4 :                                    opts, SPDK_NVME_OPC_WRITE);
    1152             : }
    1153             : 
    1154             : int
    1155           1 : spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1156             :                               uint64_t lba, uint32_t lba_count,
    1157             :                               spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1158             :                               uint32_t io_flags)
    1159             : {
    1160           1 :         struct nvme_request     *req;
    1161           1 :         struct spdk_nvme_cmd    *cmd;
    1162           1 :         uint64_t                *tmp_lba;
    1163             : 
    1164           1 :         if (!_is_io_flags_valid(io_flags)) {
    1165           0 :                 return -EINVAL;
    1166             :         }
    1167             : 
    1168           1 :         if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
    1169           0 :                 return -EINVAL;
    1170             :         }
    1171             : 
    1172           1 :         req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
    1173           1 :         if (req == NULL) {
    1174           0 :                 return -ENOMEM;
    1175             :         }
    1176             : 
    1177           1 :         cmd = &req->cmd;
    1178           1 :         cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES;
    1179           1 :         cmd->nsid = ns->id;
    1180             : 
    1181           1 :         tmp_lba = (uint64_t *)&cmd->cdw10;
    1182           1 :         *tmp_lba = lba;
    1183           1 :         cmd->cdw12 = lba_count - 1;
    1184           1 :         cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
    1185           1 :         cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
    1186             : 
    1187           1 :         return nvme_qpair_submit_request(qpair, req);
    1188           1 : }
    1189             : 
    1190             : int
    1191           1 : spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1192             :                         uint64_t lba, uint32_t lba_count,
    1193             :                         spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1194             :                         uint32_t io_flags)
    1195             : {
    1196           1 :         struct nvme_request     *req;
    1197           1 :         struct spdk_nvme_cmd    *cmd;
    1198             : 
    1199           1 :         if (!_is_io_flags_valid(io_flags)) {
    1200           0 :                 return -EINVAL;
    1201             :         }
    1202             : 
    1203           1 :         if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
    1204           0 :                 return -EINVAL;
    1205             :         }
    1206             : 
    1207           1 :         req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
    1208           1 :         if (req == NULL) {
    1209           0 :                 return -ENOMEM;
    1210             :         }
    1211             : 
    1212           1 :         cmd = &req->cmd;
    1213           1 :         cmd->opc = SPDK_NVME_OPC_VERIFY;
    1214           1 :         cmd->nsid = ns->id;
    1215             : 
    1216           1 :         *(uint64_t *)&cmd->cdw10 = lba;
    1217           1 :         cmd->cdw12 = lba_count - 1;
    1218           1 :         cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
    1219           1 :         cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
    1220             : 
    1221           1 :         return nvme_qpair_submit_request(qpair, req);
    1222           1 : }
    1223             : 
    1224             : int
    1225           1 : spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1226             :                                      uint64_t lba, uint32_t lba_count,
    1227             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1228             : {
    1229           1 :         struct nvme_request     *req;
    1230           1 :         struct spdk_nvme_cmd    *cmd;
    1231           1 :         uint64_t                *tmp_lba;
    1232             : 
    1233           1 :         if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
    1234           0 :                 return -EINVAL;
    1235             :         }
    1236             : 
    1237           1 :         req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
    1238           1 :         if (req == NULL) {
    1239           0 :                 return -ENOMEM;
    1240             :         }
    1241             : 
    1242           1 :         cmd = &req->cmd;
    1243           1 :         cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE;
    1244           1 :         cmd->nsid = ns->id;
    1245             : 
    1246           1 :         tmp_lba = (uint64_t *)&cmd->cdw10;
    1247           1 :         *tmp_lba = lba;
    1248           1 :         cmd->cdw12 = lba_count - 1;
    1249             : 
    1250           1 :         return nvme_qpair_submit_request(qpair, req);
    1251           1 : }
    1252             : 
    1253             : int
    1254           3 : spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1255             :                                     uint32_t type,
    1256             :                                     const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
    1257             :                                     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1258             : {
    1259           3 :         struct nvme_request     *req;
    1260           3 :         struct spdk_nvme_cmd    *cmd;
    1261             : 
    1262           3 :         if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) {
    1263           1 :                 return -EINVAL;
    1264             :         }
    1265             : 
    1266           2 :         if (ranges == NULL) {
    1267           0 :                 return -EINVAL;
    1268             :         }
    1269             : 
    1270           4 :         req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
    1271           2 :                                               num_ranges * sizeof(struct spdk_nvme_dsm_range),
    1272           2 :                                               cb_fn, cb_arg, true);
    1273           2 :         if (req == NULL) {
    1274           0 :                 return -ENOMEM;
    1275             :         }
    1276             : 
    1277           2 :         cmd = &req->cmd;
    1278           2 :         cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
    1279           2 :         cmd->nsid = ns->id;
    1280             : 
    1281           2 :         cmd->cdw10_bits.dsm.nr = num_ranges - 1;
    1282           2 :         cmd->cdw11 = type;
    1283             : 
    1284           2 :         return nvme_qpair_submit_request(qpair, req);
    1285           3 : }
    1286             : 
    1287             : int
    1288           3 : spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1289             :                       const struct spdk_nvme_scc_source_range *ranges,
    1290             :                       uint16_t num_ranges, uint64_t dest_lba,
    1291             :                       spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1292             : {
    1293           3 :         struct nvme_request     *req;
    1294           3 :         struct spdk_nvme_cmd    *cmd;
    1295             : 
    1296           3 :         if (num_ranges == 0) {
    1297           1 :                 return -EINVAL;
    1298             :         }
    1299             : 
    1300           2 :         if (ranges == NULL) {
    1301           0 :                 return -EINVAL;
    1302             :         }
    1303             : 
    1304           4 :         req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
    1305           2 :                                               num_ranges * sizeof(struct spdk_nvme_scc_source_range),
    1306           2 :                                               cb_fn, cb_arg, true);
    1307           2 :         if (req == NULL) {
    1308           0 :                 return -ENOMEM;
    1309             :         }
    1310             : 
    1311           2 :         cmd = &req->cmd;
    1312           2 :         cmd->opc = SPDK_NVME_OPC_COPY;
    1313           2 :         cmd->nsid = ns->id;
    1314             : 
    1315           2 :         *(uint64_t *)&cmd->cdw10 = dest_lba;
    1316           2 :         cmd->cdw12 = num_ranges - 1;
    1317             : 
    1318           2 :         return nvme_qpair_submit_request(qpair, req);
    1319           3 : }
    1320             : 
    1321             : int
    1322           1 : spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1323             :                        spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1324             : {
    1325           1 :         struct nvme_request     *req;
    1326           1 :         struct spdk_nvme_cmd    *cmd;
    1327             : 
    1328           1 :         req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
    1329           1 :         if (req == NULL) {
    1330           0 :                 return -ENOMEM;
    1331             :         }
    1332             : 
    1333           1 :         cmd = &req->cmd;
    1334           1 :         cmd->opc = SPDK_NVME_OPC_FLUSH;
    1335           1 :         cmd->nsid = ns->id;
    1336             : 
    1337           1 :         return nvme_qpair_submit_request(qpair, req);
    1338           1 : }
    1339             : 
    1340             : int
    1341           1 : spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
    1342             :                                       struct spdk_nvme_qpair *qpair,
    1343             :                                       struct spdk_nvme_reservation_register_data *payload,
    1344             :                                       bool ignore_key,
    1345             :                                       enum spdk_nvme_reservation_register_action action,
    1346             :                                       enum spdk_nvme_reservation_register_cptpl cptpl,
    1347             :                                       spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1348             : {
    1349           1 :         struct nvme_request     *req;
    1350           1 :         struct spdk_nvme_cmd    *cmd;
    1351             : 
    1352           2 :         req = nvme_allocate_request_user_copy(qpair,
    1353           1 :                                               payload, sizeof(struct spdk_nvme_reservation_register_data),
    1354           1 :                                               cb_fn, cb_arg, true);
    1355           1 :         if (req == NULL) {
    1356           0 :                 return -ENOMEM;
    1357             :         }
    1358             : 
    1359           1 :         cmd = &req->cmd;
    1360           1 :         cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER;
    1361           1 :         cmd->nsid = ns->id;
    1362             : 
    1363           1 :         cmd->cdw10_bits.resv_register.rrega = action;
    1364           1 :         cmd->cdw10_bits.resv_register.iekey = ignore_key;
    1365           1 :         cmd->cdw10_bits.resv_register.cptpl = cptpl;
    1366             : 
    1367           1 :         return nvme_qpair_submit_request(qpair, req);
    1368           1 : }
    1369             : 
    1370             : int
    1371           1 : spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
    1372             :                                      struct spdk_nvme_qpair *qpair,
    1373             :                                      struct spdk_nvme_reservation_key_data *payload,
    1374             :                                      bool ignore_key,
    1375             :                                      enum spdk_nvme_reservation_release_action action,
    1376             :                                      enum spdk_nvme_reservation_type type,
    1377             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1378             : {
    1379           1 :         struct nvme_request     *req;
    1380           1 :         struct spdk_nvme_cmd    *cmd;
    1381             : 
    1382           2 :         req = nvme_allocate_request_user_copy(qpair,
    1383           1 :                                               payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
    1384           1 :                                               cb_arg, true);
    1385           1 :         if (req == NULL) {
    1386           0 :                 return -ENOMEM;
    1387             :         }
    1388             : 
    1389           1 :         cmd = &req->cmd;
    1390           1 :         cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
    1391           1 :         cmd->nsid = ns->id;
    1392             : 
    1393           1 :         cmd->cdw10_bits.resv_release.rrela = action;
    1394           1 :         cmd->cdw10_bits.resv_release.iekey = ignore_key;
    1395           1 :         cmd->cdw10_bits.resv_release.rtype = type;
    1396             : 
    1397           1 :         return nvme_qpair_submit_request(qpair, req);
    1398           1 : }
    1399             : 
    1400             : int
    1401           1 : spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
    1402             :                                      struct spdk_nvme_qpair *qpair,
    1403             :                                      struct spdk_nvme_reservation_acquire_data *payload,
    1404             :                                      bool ignore_key,
    1405             :                                      enum spdk_nvme_reservation_acquire_action action,
    1406             :                                      enum spdk_nvme_reservation_type type,
    1407             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1408             : {
    1409           1 :         struct nvme_request     *req;
    1410           1 :         struct spdk_nvme_cmd    *cmd;
    1411             : 
    1412           2 :         req = nvme_allocate_request_user_copy(qpair,
    1413           1 :                                               payload, sizeof(struct spdk_nvme_reservation_acquire_data),
    1414           1 :                                               cb_fn, cb_arg, true);
    1415           1 :         if (req == NULL) {
    1416           0 :                 return -ENOMEM;
    1417             :         }
    1418             : 
    1419           1 :         cmd = &req->cmd;
    1420           1 :         cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE;
    1421           1 :         cmd->nsid = ns->id;
    1422             : 
    1423           1 :         cmd->cdw10_bits.resv_acquire.racqa = action;
    1424           1 :         cmd->cdw10_bits.resv_acquire.iekey = ignore_key;
    1425           1 :         cmd->cdw10_bits.resv_acquire.rtype = type;
    1426             : 
    1427           1 :         return nvme_qpair_submit_request(qpair, req);
    1428           1 : }
    1429             : 
    1430             : int
    1431           1 : spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
    1432             :                                     struct spdk_nvme_qpair *qpair,
    1433             :                                     void *payload, uint32_t len,
    1434             :                                     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1435             : {
    1436           1 :         uint32_t                num_dwords;
    1437           1 :         struct nvme_request     *req;
    1438           1 :         struct spdk_nvme_cmd    *cmd;
    1439             : 
    1440           1 :         if (len & 0x3) {
    1441           0 :                 return -EINVAL;
    1442             :         }
    1443             : 
    1444           1 :         req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
    1445           1 :         if (req == NULL) {
    1446           0 :                 return -ENOMEM;
    1447             :         }
    1448             : 
    1449           1 :         cmd = &req->cmd;
    1450           1 :         cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT;
    1451           1 :         cmd->nsid = ns->id;
    1452             : 
    1453           1 :         num_dwords = (len >> 2);
    1454           1 :         cmd->cdw10 = num_dwords - 1; /* 0-based */
    1455             : 
    1456           1 :         return nvme_qpair_submit_request(qpair, req);
    1457           1 : }
    1458             : 
    1459             : int
    1460           2 : spdk_nvme_ns_cmd_io_mgmt_recv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1461             :                               void *payload, uint32_t len, uint8_t mo, uint16_t mos,
    1462             :                               spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1463             : {
    1464           2 :         uint32_t                num_dwords;
    1465           2 :         struct nvme_request     *req;
    1466           2 :         struct spdk_nvme_cmd    *cmd;
    1467             : 
    1468           2 :         if (len & 0x3) {
    1469           1 :                 return -EINVAL;
    1470             :         }
    1471             : 
    1472           1 :         req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
    1473           1 :         if (req == NULL) {
    1474           0 :                 return -ENOMEM;
    1475             :         }
    1476             : 
    1477           1 :         cmd = &req->cmd;
    1478           1 :         cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE;
    1479           1 :         cmd->nsid = ns->id;
    1480             : 
    1481           1 :         cmd->cdw10_bits.mgmt_send_recv.mo = mo;
    1482           1 :         cmd->cdw10_bits.mgmt_send_recv.mos = mos;
    1483             : 
    1484           1 :         num_dwords = (len >> 2);
    1485           1 :         cmd->cdw11 = num_dwords - 1; /* 0-based */
    1486             : 
    1487           1 :         return nvme_qpair_submit_request(qpair, req);
    1488           2 : }
    1489             : 
    1490             : int
    1491           1 : spdk_nvme_ns_cmd_io_mgmt_send(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1492             :                               void *payload, uint32_t len, uint8_t mo, uint16_t mos,
    1493             :                               spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1494             : {
    1495           1 :         struct nvme_request     *req;
    1496           1 :         struct spdk_nvme_cmd    *cmd;
    1497             : 
    1498           1 :         req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
    1499           1 :         if (req == NULL) {
    1500           0 :                 return -ENOMEM;
    1501             :         }
    1502             : 
    1503           1 :         cmd = &req->cmd;
    1504           1 :         cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_SEND;
    1505           1 :         cmd->nsid = ns->id;
    1506             : 
    1507           1 :         cmd->cdw10_bits.mgmt_send_recv.mo = mo;
    1508           1 :         cmd->cdw10_bits.mgmt_send_recv.mos = mos;
    1509             : 
    1510           1 :         return nvme_qpair_submit_request(qpair, req);
    1511           1 : }

Generated by: LCOV version 1.15