LCOV - code coverage report
Current view: top level - lib/nvme - nvme_ctrlr.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 1560 2612 59.7 %
Date: 2024-07-12 09:43:53 Functions: 137 201 68.2 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
       3             :  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
       4             :  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
       5             :  */
       6             : 
       7             : #include "spdk/stdinc.h"
       8             : 
       9             : #include "nvme_internal.h"
      10             : #include "nvme_io_msg.h"
      11             : 
      12             : #include "spdk/env.h"
      13             : #include "spdk/string.h"
      14             : #include "spdk/endian.h"
      15             : 
      16             : struct nvme_active_ns_ctx;
      17             : 
      18             : static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
      19             :                 struct nvme_async_event_request *aer);
      20             : static void nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx);
      21             : static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
      22             : static int nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns);
      23             : static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
      24             : static void nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr);
      25             : static void nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
      26             :                                  uint64_t timeout_in_ms);
      27             : 
      28             : static int
      29      477891 : nvme_ns_cmp(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2)
      30             : {
      31      477891 :         if (ns1->id < ns2->id) {
      32      164867 :                 return -1;
      33      313024 :         } else if (ns1->id > ns2->id) {
      34      276062 :                 return 1;
      35             :         } else {
      36       36962 :                 return 0;
      37             :         }
      38             : }
      39             : 
      40      603289 : RB_GENERATE_STATIC(nvme_ns_tree, spdk_nvme_ns, node, nvme_ns_cmp);
      41             : 
      42             : #define CTRLR_STRING(ctrlr) \
      43             :         ((ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP || ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_RDMA) ? \
      44             :         ctrlr->trid.subnqn : ctrlr->trid.traddr)
      45             : 
      46             : #define NVME_CTRLR_ERRLOG(ctrlr, format, ...) \
      47             :         SPDK_ERRLOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      48             : 
      49             : #define NVME_CTRLR_WARNLOG(ctrlr, format, ...) \
      50             :         SPDK_WARNLOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      51             : 
      52             : #define NVME_CTRLR_NOTICELOG(ctrlr, format, ...) \
      53             :         SPDK_NOTICELOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      54             : 
      55             : #define NVME_CTRLR_INFOLOG(ctrlr, format, ...) \
      56             :         SPDK_INFOLOG(nvme, "[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      57             : 
      58             : #ifdef DEBUG
      59             : #define NVME_CTRLR_DEBUGLOG(ctrlr, format, ...) \
      60             :         SPDK_DEBUGLOG(nvme, "[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      61             : #else
      62             : #define NVME_CTRLR_DEBUGLOG(ctrlr, ...) do { } while (0)
      63             : #endif
      64             : 
      65             : #define nvme_ctrlr_get_reg_async(ctrlr, reg, sz, cb_fn, cb_arg) \
      66             :         nvme_transport_ctrlr_get_reg_ ## sz ## _async(ctrlr, \
      67             :                 offsetof(struct spdk_nvme_registers, reg), cb_fn, cb_arg)
      68             : 
      69             : #define nvme_ctrlr_set_reg_async(ctrlr, reg, sz, val, cb_fn, cb_arg) \
      70             :         nvme_transport_ctrlr_set_reg_ ## sz ## _async(ctrlr, \
      71             :                 offsetof(struct spdk_nvme_registers, reg), val, cb_fn, cb_arg)
      72             : 
      73             : #define nvme_ctrlr_get_cc_async(ctrlr, cb_fn, cb_arg) \
      74             :         nvme_ctrlr_get_reg_async(ctrlr, cc, 4, cb_fn, cb_arg)
      75             : 
      76             : #define nvme_ctrlr_get_csts_async(ctrlr, cb_fn, cb_arg) \
      77             :         nvme_ctrlr_get_reg_async(ctrlr, csts, 4, cb_fn, cb_arg)
      78             : 
      79             : #define nvme_ctrlr_get_cap_async(ctrlr, cb_fn, cb_arg) \
      80             :         nvme_ctrlr_get_reg_async(ctrlr, cap, 8, cb_fn, cb_arg)
      81             : 
      82             : #define nvme_ctrlr_get_vs_async(ctrlr, cb_fn, cb_arg) \
      83             :         nvme_ctrlr_get_reg_async(ctrlr, vs, 4, cb_fn, cb_arg)
      84             : 
      85             : #define nvme_ctrlr_set_cc_async(ctrlr, value, cb_fn, cb_arg) \
      86             :         nvme_ctrlr_set_reg_async(ctrlr, cc, 4, value, cb_fn, cb_arg)
      87             : 
      88             : static int
      89           0 : nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
      90             : {
      91           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
      92             :                                               &cc->raw);
      93             : }
      94             : 
      95             : static int
      96           0 : nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
      97             : {
      98           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
      99             :                                               &csts->raw);
     100             : }
     101             : 
     102             : int
     103           0 : nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
     104             : {
     105           0 :         return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
     106             :                                               &cap->raw);
     107             : }
     108             : 
     109             : int
     110           1 : nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
     111             : {
     112           1 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
     113             :                                               &vs->raw);
     114             : }
     115             : 
     116             : int
     117           0 : nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
     118             : {
     119           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
     120             :                                               &cmbsz->raw);
     121             : }
     122             : 
     123             : int
     124           0 : nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap)
     125             : {
     126           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
     127             :                                               &pmrcap->raw);
     128             : }
     129             : 
     130             : int
     131           0 : nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo)
     132             : {
     133           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bpinfo.raw),
     134             :                                               &bpinfo->raw);
     135             : }
     136             : 
     137             : int
     138           0 : nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel)
     139             : {
     140           0 :         return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bprsel.raw),
     141             :                                               bprsel->raw);
     142             : }
     143             : 
     144             : int
     145           0 : nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value)
     146             : {
     147           0 :         return nvme_transport_ctrlr_set_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, bpmbl),
     148             :                                               bpmbl_value);
     149             : }
     150             : 
     151             : static int
     152           0 : nvme_ctrlr_set_nssr(struct spdk_nvme_ctrlr *ctrlr, uint32_t nssr_value)
     153             : {
     154           0 :         return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, nssr),
     155             :                                               nssr_value);
     156             : }
     157             : 
     158             : bool
     159          33 : nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr)
     160             : {
     161          35 :         return ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS &&
     162           2 :                ctrlr->opts.command_set == SPDK_NVME_CC_CSS_IOCS;
     163             : }
     164             : 
     165             : /* When the field in spdk_nvme_ctrlr_opts are changed and you change this function, please
     166             :  * also update the nvme_ctrl_opts_init function in nvme_ctrlr.c
     167             :  */
     168             : void
     169           2 : spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
     170             : {
     171           2 :         char host_id_str[SPDK_UUID_STRING_LEN];
     172             : 
     173           2 :         assert(opts);
     174             : 
     175           2 :         opts->opts_size = opts_size;
     176             : 
     177             : #define FIELD_OK(field) \
     178             :         offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
     179             : 
     180             : #define SET_FIELD(field, value) \
     181             :         if (offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size) { \
     182             :                 opts->field = value; \
     183             :         } \
     184             : 
     185           2 :         SET_FIELD(num_io_queues, DEFAULT_MAX_IO_QUEUES);
     186           2 :         SET_FIELD(use_cmb_sqs, false);
     187           2 :         SET_FIELD(no_shn_notification, false);
     188           2 :         SET_FIELD(arb_mechanism, SPDK_NVME_CC_AMS_RR);
     189           2 :         SET_FIELD(arbitration_burst, 0);
     190           2 :         SET_FIELD(low_priority_weight, 0);
     191           2 :         SET_FIELD(medium_priority_weight, 0);
     192           2 :         SET_FIELD(high_priority_weight, 0);
     193           2 :         SET_FIELD(keep_alive_timeout_ms, MIN_KEEP_ALIVE_TIMEOUT_IN_MS);
     194           2 :         SET_FIELD(transport_retry_count, SPDK_NVME_DEFAULT_RETRY_COUNT);
     195           2 :         SET_FIELD(io_queue_size, DEFAULT_IO_QUEUE_SIZE);
     196             : 
     197           2 :         if (nvme_driver_init() == 0) {
     198           2 :                 if (FIELD_OK(hostnqn)) {
     199           1 :                         spdk_uuid_fmt_lower(host_id_str, sizeof(host_id_str),
     200           1 :                                             &g_spdk_nvme_driver->default_extended_host_id);
     201           1 :                         snprintf(opts->hostnqn, sizeof(opts->hostnqn),
     202             :                                  "nqn.2014-08.org.nvmexpress:uuid:%s", host_id_str);
     203             :                 }
     204             : 
     205           2 :                 if (FIELD_OK(extended_host_id)) {
     206           1 :                         memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
     207             :                                sizeof(opts->extended_host_id));
     208             :                 }
     209             : 
     210             :         }
     211             : 
     212           2 :         SET_FIELD(io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
     213             : 
     214           2 :         if (FIELD_OK(src_addr)) {
     215           1 :                 memset(opts->src_addr, 0, sizeof(opts->src_addr));
     216             :         }
     217             : 
     218           2 :         if (FIELD_OK(src_svcid)) {
     219           1 :                 memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
     220             :         }
     221             : 
     222           2 :         if (FIELD_OK(host_id)) {
     223           1 :                 memset(opts->host_id, 0, sizeof(opts->host_id));
     224             :         }
     225             : 
     226           2 :         SET_FIELD(command_set, CHAR_BIT);
     227           2 :         SET_FIELD(admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
     228           2 :         SET_FIELD(header_digest, false);
     229           2 :         SET_FIELD(data_digest, false);
     230           2 :         SET_FIELD(disable_error_logging, false);
     231           2 :         SET_FIELD(transport_ack_timeout, SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT);
     232           2 :         SET_FIELD(admin_queue_size, DEFAULT_ADMIN_QUEUE_SIZE);
     233           2 :         SET_FIELD(fabrics_connect_timeout_us, NVME_FABRIC_CONNECT_COMMAND_TIMEOUT);
     234           2 :         SET_FIELD(disable_read_ana_log_page, false);
     235           2 :         SET_FIELD(disable_read_changed_ns_list_log_page, false);
     236           2 :         SET_FIELD(tls_psk, NULL);
     237           2 :         SET_FIELD(dhchap_key, NULL);
     238           2 :         SET_FIELD(dhchap_ctrlr_key, NULL);
     239           2 :         SET_FIELD(dhchap_digests,
     240             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA256) |
     241             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA384) |
     242             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA512));
     243           2 :         SET_FIELD(dhchap_dhgroups,
     244             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_NULL) |
     245             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_2048) |
     246             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_3072) |
     247             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_4096) |
     248             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_6144) |
     249             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_8192));
     250             : 
     251           2 :         if (FIELD_OK(psk)) {
     252           1 :                 memset(opts->psk, 0, sizeof(opts->psk));
     253             :         }
     254             : 
     255             : #undef FIELD_OK
     256             : #undef SET_FIELD
     257           2 : }
     258             : 
     259             : const struct spdk_nvme_ctrlr_opts *
     260           0 : spdk_nvme_ctrlr_get_opts(struct spdk_nvme_ctrlr *ctrlr)
     261             : {
     262           0 :         return &ctrlr->opts;
     263             : }
     264             : 
     265             : /**
     266             :  * This function will be called when the process allocates the IO qpair.
     267             :  * Note: the ctrlr_lock must be held when calling this function.
     268             :  */
     269             : static void
     270          15 : nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
     271             : {
     272             :         struct spdk_nvme_ctrlr_process  *active_proc;
     273          15 :         struct spdk_nvme_ctrlr          *ctrlr = qpair->ctrlr;
     274             : 
     275          15 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
     276          15 :         if (active_proc) {
     277           0 :                 TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
     278           0 :                 qpair->active_proc = active_proc;
     279             :         }
     280          15 : }
     281             : 
     282             : /**
     283             :  * This function will be called when the process frees the IO qpair.
     284             :  * Note: the ctrlr_lock must be held when calling this function.
     285             :  */
     286             : static void
     287          15 : nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
     288             : {
     289             :         struct spdk_nvme_ctrlr_process  *active_proc;
     290          15 :         struct spdk_nvme_ctrlr          *ctrlr = qpair->ctrlr;
     291             :         struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
     292             : 
     293          15 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
     294          15 :         if (!active_proc) {
     295          15 :                 return;
     296             :         }
     297             : 
     298           0 :         TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
     299             :                            per_process_tailq, tmp_qpair) {
     300           0 :                 if (active_qpair == qpair) {
     301           0 :                         TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
     302             :                                      active_qpair, per_process_tailq);
     303             : 
     304           0 :                         break;
     305             :                 }
     306             :         }
     307             : }
     308             : 
     309             : void
     310          27 : spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
     311             :                 struct spdk_nvme_io_qpair_opts *opts,
     312             :                 size_t opts_size)
     313             : {
     314          27 :         assert(ctrlr);
     315             : 
     316          27 :         assert(opts);
     317             : 
     318          27 :         memset(opts, 0, opts_size);
     319             : 
     320             : #define FIELD_OK(field) \
     321             :         offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
     322             : 
     323          27 :         if (FIELD_OK(qprio)) {
     324          27 :                 opts->qprio = SPDK_NVME_QPRIO_URGENT;
     325             :         }
     326             : 
     327          27 :         if (FIELD_OK(io_queue_size)) {
     328          27 :                 opts->io_queue_size = ctrlr->opts.io_queue_size;
     329             :         }
     330             : 
     331          27 :         if (FIELD_OK(io_queue_requests)) {
     332          26 :                 opts->io_queue_requests = ctrlr->opts.io_queue_requests;
     333             :         }
     334             : 
     335          27 :         if (FIELD_OK(delay_cmd_submit)) {
     336          26 :                 opts->delay_cmd_submit = false;
     337             :         }
     338             : 
     339          27 :         if (FIELD_OK(sq.vaddr)) {
     340          26 :                 opts->sq.vaddr = NULL;
     341             :         }
     342             : 
     343          27 :         if (FIELD_OK(sq.paddr)) {
     344          26 :                 opts->sq.paddr = 0;
     345             :         }
     346             : 
     347          27 :         if (FIELD_OK(sq.buffer_size)) {
     348          26 :                 opts->sq.buffer_size = 0;
     349             :         }
     350             : 
     351          27 :         if (FIELD_OK(cq.vaddr)) {
     352          26 :                 opts->cq.vaddr = NULL;
     353             :         }
     354             : 
     355          27 :         if (FIELD_OK(cq.paddr)) {
     356          26 :                 opts->cq.paddr = 0;
     357             :         }
     358             : 
     359          27 :         if (FIELD_OK(cq.buffer_size)) {
     360          26 :                 opts->cq.buffer_size = 0;
     361             :         }
     362             : 
     363          27 :         if (FIELD_OK(create_only)) {
     364          26 :                 opts->create_only = false;
     365             :         }
     366             : 
     367          27 :         if (FIELD_OK(async_mode)) {
     368          26 :                 opts->async_mode = false;
     369             :         }
     370             : 
     371             : #undef FIELD_OK
     372          27 : }
     373             : 
     374             : static struct spdk_nvme_qpair *
     375          22 : nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
     376             :                            const struct spdk_nvme_io_qpair_opts *opts)
     377             : {
     378             :         int32_t                                 qid;
     379             :         struct spdk_nvme_qpair                  *qpair;
     380             :         union spdk_nvme_cc_register             cc;
     381             : 
     382          22 :         if (!ctrlr) {
     383           0 :                 return NULL;
     384             :         }
     385             : 
     386          22 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
     387          22 :         cc.raw = ctrlr->process_init_cc.raw;
     388             : 
     389          22 :         if (opts->qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
     390           2 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     391           2 :                 return NULL;
     392             :         }
     393             : 
     394             :         /*
     395             :          * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
     396             :          * default round robin arbitration method.
     397             :          */
     398          20 :         if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts->qprio != SPDK_NVME_QPRIO_URGENT)) {
     399           3 :                 NVME_CTRLR_ERRLOG(ctrlr, "invalid queue priority for default round robin arbitration method\n");
     400           3 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     401           3 :                 return NULL;
     402             :         }
     403             : 
     404          17 :         qid = spdk_nvme_ctrlr_alloc_qid(ctrlr);
     405          17 :         if (qid < 0) {
     406           2 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     407           2 :                 return NULL;
     408             :         }
     409             : 
     410          15 :         qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
     411          15 :         if (qpair == NULL) {
     412           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_create_io_qpair() failed\n");
     413           0 :                 spdk_nvme_ctrlr_free_qid(ctrlr, qid);
     414           0 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     415           0 :                 return NULL;
     416             :         }
     417             : 
     418          15 :         TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
     419             : 
     420          15 :         nvme_ctrlr_proc_add_io_qpair(qpair);
     421             : 
     422          15 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     423             : 
     424          15 :         return qpair;
     425             : }
     426             : 
     427             : int
     428          15 : spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
     429             : {
     430             :         int rc;
     431             : 
     432          15 :         if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
     433           0 :                 return -EISCONN;
     434             :         }
     435             : 
     436          15 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
     437          15 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
     438          15 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     439             : 
     440          15 :         if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
     441           0 :                 spdk_delay_us(100);
     442             :         }
     443             : 
     444          15 :         return rc;
     445             : }
     446             : 
     447             : void
     448           0 : spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
     449             : {
     450           0 :         struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
     451             : 
     452           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
     453           0 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     454           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     455           0 : }
     456             : 
     457             : struct spdk_nvme_qpair *
     458          23 : spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
     459             :                                const struct spdk_nvme_io_qpair_opts *user_opts,
     460             :                                size_t opts_size)
     461             : {
     462             : 
     463          23 :         struct spdk_nvme_qpair          *qpair = NULL;
     464          23 :         struct spdk_nvme_io_qpair_opts  opts;
     465             :         int                             rc;
     466             : 
     467          23 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
     468             : 
     469          23 :         if (spdk_unlikely(ctrlr->state != NVME_CTRLR_STATE_READY)) {
     470             :                 /* When controller is resetting or initializing, free_io_qids is deleted or not created yet.
     471             :                  * We can't create IO qpair in that case */
     472           1 :                 goto unlock;
     473             :         }
     474             : 
     475             :         /*
     476             :          * Get the default options, then overwrite them with the user-provided options
     477             :          * up to opts_size.
     478             :          *
     479             :          * This allows for extensions of the opts structure without breaking
     480             :          * ABI compatibility.
     481             :          */
     482          22 :         spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
     483          22 :         if (user_opts) {
     484          18 :                 memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
     485             : 
     486             :                 /* If user passes buffers, make sure they're big enough for the requested queue size */
     487          18 :                 if (opts.sq.vaddr) {
     488           0 :                         if (opts.sq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cmd))) {
     489           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "sq buffer size %" PRIx64 " is too small for sq size %zx\n",
     490             :                                                   opts.sq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cmd)));
     491           0 :                                 goto unlock;
     492             :                         }
     493             :                 }
     494          18 :                 if (opts.cq.vaddr) {
     495           0 :                         if (opts.cq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cpl))) {
     496           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "cq buffer size %" PRIx64 " is too small for cq size %zx\n",
     497             :                                                   opts.cq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cpl)));
     498           0 :                                 goto unlock;
     499             :                         }
     500             :                 }
     501             :         }
     502             : 
     503          22 :         qpair = nvme_ctrlr_create_io_qpair(ctrlr, &opts);
     504             : 
     505          22 :         if (qpair == NULL || opts.create_only == true) {
     506           7 :                 goto unlock;
     507             :         }
     508             : 
     509          15 :         rc = spdk_nvme_ctrlr_connect_io_qpair(ctrlr, qpair);
     510          15 :         if (rc != 0) {
     511           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_connect_io_qpair() failed\n");
     512           1 :                 nvme_ctrlr_proc_remove_io_qpair(qpair);
     513           1 :                 TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
     514           1 :                 spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
     515           1 :                 nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
     516           1 :                 qpair = NULL;
     517           1 :                 goto unlock;
     518             :         }
     519             : 
     520          23 : unlock:
     521          23 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     522             : 
     523          23 :         return qpair;
     524             : }
     525             : 
     526             : int
     527           8 : spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
     528             : {
     529             :         struct spdk_nvme_ctrlr *ctrlr;
     530             :         enum nvme_qpair_state qpair_state;
     531             :         int rc;
     532             : 
     533           8 :         assert(qpair != NULL);
     534           8 :         assert(nvme_qpair_is_admin_queue(qpair) == false);
     535           8 :         assert(qpair->ctrlr != NULL);
     536             : 
     537           8 :         ctrlr = qpair->ctrlr;
     538           8 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
     539           8 :         qpair_state = nvme_qpair_get_state(qpair);
     540             : 
     541           8 :         if (ctrlr->is_removed) {
     542           2 :                 rc = -ENODEV;
     543           2 :                 goto out;
     544             :         }
     545             : 
     546           6 :         if (ctrlr->is_resetting || qpair_state == NVME_QPAIR_DISCONNECTING) {
     547           2 :                 rc = -EAGAIN;
     548           2 :                 goto out;
     549             :         }
     550             : 
     551           4 :         if (ctrlr->is_failed || qpair_state == NVME_QPAIR_DESTROYING) {
     552           2 :                 rc = -ENXIO;
     553           2 :                 goto out;
     554             :         }
     555             : 
     556           2 :         if (qpair_state != NVME_QPAIR_DISCONNECTED) {
     557           1 :                 rc = 0;
     558           1 :                 goto out;
     559             :         }
     560             : 
     561           1 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
     562           1 :         if (rc) {
     563           0 :                 rc = -EAGAIN;
     564           0 :                 goto out;
     565             :         }
     566             : 
     567           1 : out:
     568           8 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     569           8 :         return rc;
     570             : }
     571             : 
     572             : spdk_nvme_qp_failure_reason
     573           0 : spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
     574             : {
     575           0 :         return ctrlr->adminq->transport_failure_reason;
     576             : }
     577             : 
     578             : /*
     579             :  * This internal function will attempt to take the controller
     580             :  * lock before calling disconnect on a controller qpair.
     581             :  * Functions already holding the controller lock should
     582             :  * call nvme_transport_ctrlr_disconnect_qpair directly.
     583             :  */
     584             : void
     585           0 : nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair)
     586             : {
     587           0 :         struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
     588             : 
     589           0 :         assert(ctrlr != NULL);
     590           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
     591           0 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     592           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     593           0 : }
     594             : 
     595             : int
     596          14 : spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
     597             : {
     598             :         struct spdk_nvme_ctrlr *ctrlr;
     599             : 
     600          14 :         if (qpair == NULL) {
     601           0 :                 return 0;
     602             :         }
     603             : 
     604          14 :         ctrlr = qpair->ctrlr;
     605             : 
     606          14 :         if (qpair->in_completion_context) {
     607             :                 /*
     608             :                  * There are many cases where it is convenient to delete an io qpair in the context
     609             :                  *  of that qpair's completion routine.  To handle this properly, set a flag here
     610             :                  *  so that the completion routine will perform an actual delete after the context
     611             :                  *  unwinds.
     612             :                  */
     613           0 :                 qpair->delete_after_completion_context = 1;
     614           0 :                 return 0;
     615             :         }
     616             : 
     617          14 :         qpair->destroy_in_progress = 1;
     618             : 
     619          14 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     620             : 
     621          14 :         if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
     622           0 :                 spdk_nvme_poll_group_remove(qpair->poll_group->group, qpair);
     623             :         }
     624             : 
     625             :         /* Do not retry. */
     626          14 :         nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
     627             : 
     628             :         /* In the multi-process case, a process may call this function on a foreign
     629             :          * I/O qpair (i.e. one that this process did not create) when that qpairs process
     630             :          * exits unexpectedly.  In that case, we must not try to abort any reqs associated
     631             :          * with that qpair, since the callbacks will also be foreign to this process.
     632             :          */
     633          14 :         if (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr)) {
     634          14 :                 nvme_qpair_abort_all_queued_reqs(qpair);
     635             :         }
     636             : 
     637          14 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
     638             : 
     639          14 :         nvme_ctrlr_proc_remove_io_qpair(qpair);
     640             : 
     641          14 :         TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
     642          14 :         spdk_nvme_ctrlr_free_qid(ctrlr, qpair->id);
     643             : 
     644          14 :         nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
     645          14 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
     646          14 :         return 0;
     647             : }
     648             : 
     649             : static void
     650           3 : nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
     651             :                 struct spdk_nvme_intel_log_page_directory *log_page_directory)
     652             : {
     653           3 :         if (log_page_directory == NULL) {
     654           0 :                 return;
     655             :         }
     656             : 
     657           3 :         assert(ctrlr->cdata.vid == SPDK_PCI_VID_INTEL);
     658             : 
     659           3 :         ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
     660             : 
     661           3 :         if (log_page_directory->read_latency_log_len ||
     662           2 :             (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
     663           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
     664             :         }
     665           3 :         if (log_page_directory->write_latency_log_len ||
     666           2 :             (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
     667           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
     668             :         }
     669           3 :         if (log_page_directory->temperature_statistics_log_len) {
     670           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
     671             :         }
     672           3 :         if (log_page_directory->smart_log_len) {
     673           1 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
     674             :         }
     675           3 :         if (log_page_directory->marketing_description_log_len) {
     676           1 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
     677             :         }
     678             : }
     679             : 
     680             : struct intel_log_pages_ctx {
     681             :         struct spdk_nvme_intel_log_page_directory log_page_directory;
     682             :         struct spdk_nvme_ctrlr *ctrlr;
     683             : };
     684             : 
     685             : static void
     686           1 : nvme_ctrlr_set_intel_support_log_pages_done(void *arg, const struct spdk_nvme_cpl *cpl)
     687             : {
     688           1 :         struct intel_log_pages_ctx *ctx = arg;
     689           1 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
     690             : 
     691           1 :         if (!spdk_nvme_cpl_is_error(cpl)) {
     692           1 :                 nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, &ctx->log_page_directory);
     693             :         }
     694             : 
     695           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     696           1 :                              ctrlr->opts.admin_timeout_ms);
     697           1 :         free(ctx);
     698           1 : }
     699             : 
     700             : static int
     701           1 : nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
     702             : {
     703           1 :         int rc = 0;
     704             :         struct intel_log_pages_ctx *ctx;
     705             : 
     706           1 :         ctx = calloc(1, sizeof(*ctx));
     707           1 :         if (!ctx) {
     708           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     709           0 :                                      ctrlr->opts.admin_timeout_ms);
     710           0 :                 return 0;
     711             :         }
     712             : 
     713           1 :         ctx->ctrlr = ctrlr;
     714             : 
     715           1 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
     716           1 :                                               SPDK_NVME_GLOBAL_NS_TAG, &ctx->log_page_directory,
     717             :                                               sizeof(struct spdk_nvme_intel_log_page_directory),
     718             :                                               0, nvme_ctrlr_set_intel_support_log_pages_done, ctx);
     719           1 :         if (rc != 0) {
     720           0 :                 free(ctx);
     721           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     722           0 :                                      ctrlr->opts.admin_timeout_ms);
     723           0 :                 return 0;
     724             :         }
     725             : 
     726           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
     727           1 :                              ctrlr->opts.admin_timeout_ms);
     728             : 
     729           1 :         return 0;
     730             : }
     731             : 
     732             : static int
     733           4 : nvme_ctrlr_alloc_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
     734             : {
     735             :         uint32_t ana_log_page_size;
     736             : 
     737           4 :         ana_log_page_size = sizeof(struct spdk_nvme_ana_page) + ctrlr->cdata.nanagrpid *
     738           4 :                             sizeof(struct spdk_nvme_ana_group_descriptor) + ctrlr->active_ns_count *
     739             :                             sizeof(uint32_t);
     740             : 
     741             :         /* Number of active namespaces may have changed.
     742             :          * Check if ANA log page fits into existing buffer.
     743             :          */
     744           4 :         if (ana_log_page_size > ctrlr->ana_log_page_size) {
     745             :                 void *new_buffer;
     746             : 
     747           4 :                 if (ctrlr->ana_log_page) {
     748           1 :                         new_buffer = realloc(ctrlr->ana_log_page, ana_log_page_size);
     749             :                 } else {
     750           3 :                         new_buffer = calloc(1, ana_log_page_size);
     751             :                 }
     752             : 
     753           4 :                 if (!new_buffer) {
     754           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "could not allocate ANA log page buffer, size %u\n",
     755             :                                           ana_log_page_size);
     756           0 :                         return -ENXIO;
     757             :                 }
     758             : 
     759           4 :                 ctrlr->ana_log_page = new_buffer;
     760           4 :                 if (ctrlr->copied_ana_desc) {
     761           1 :                         new_buffer = realloc(ctrlr->copied_ana_desc, ana_log_page_size);
     762             :                 } else {
     763           3 :                         new_buffer = calloc(1, ana_log_page_size);
     764             :                 }
     765             : 
     766           4 :                 if (!new_buffer) {
     767           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "could not allocate a buffer to parse ANA descriptor, size %u\n",
     768             :                                           ana_log_page_size);
     769           0 :                         return -ENOMEM;
     770             :                 }
     771             : 
     772           4 :                 ctrlr->copied_ana_desc = new_buffer;
     773           4 :                 ctrlr->ana_log_page_size = ana_log_page_size;
     774             :         }
     775             : 
     776           4 :         return 0;
     777             : }
     778             : 
     779             : static int
     780           4 : nvme_ctrlr_update_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
     781             : {
     782             :         struct nvme_completion_poll_status *status;
     783             :         int rc;
     784             : 
     785           4 :         rc = nvme_ctrlr_alloc_ana_log_page(ctrlr);
     786           4 :         if (rc != 0) {
     787           0 :                 return rc;
     788             :         }
     789             : 
     790           4 :         status = calloc(1, sizeof(*status));
     791           4 :         if (status == NULL) {
     792           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
     793           0 :                 return -ENOMEM;
     794             :         }
     795             : 
     796           4 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS,
     797           4 :                                               SPDK_NVME_GLOBAL_NS_TAG, ctrlr->ana_log_page,
     798             :                                               ctrlr->ana_log_page_size, 0,
     799             :                                               nvme_completion_poll_cb, status);
     800           4 :         if (rc != 0) {
     801           0 :                 free(status);
     802           0 :                 return rc;
     803             :         }
     804             : 
     805           4 :         if (nvme_wait_for_completion_robust_lock_timeout(ctrlr->adminq, status, &ctrlr->ctrlr_lock,
     806           4 :                         ctrlr->opts.admin_timeout_ms * 1000)) {
     807           0 :                 if (!status->timed_out) {
     808           0 :                         free(status);
     809             :                 }
     810           0 :                 return -EIO;
     811             :         }
     812             : 
     813           4 :         free(status);
     814           4 :         return 0;
     815             : }
     816             : 
     817             : static int
     818           5 : nvme_ctrlr_update_ns_ana_states(const struct spdk_nvme_ana_group_descriptor *desc,
     819             :                                 void *cb_arg)
     820             : {
     821           5 :         struct spdk_nvme_ctrlr *ctrlr = cb_arg;
     822             :         struct spdk_nvme_ns *ns;
     823             :         uint32_t i, nsid;
     824             : 
     825          14 :         for (i = 0; i < desc->num_of_nsid; i++) {
     826           9 :                 nsid = desc->nsid[i];
     827           9 :                 if (nsid == 0 || nsid > ctrlr->cdata.nn) {
     828           0 :                         continue;
     829             :                 }
     830             : 
     831           9 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
     832           9 :                 assert(ns != NULL);
     833             : 
     834           9 :                 ns->ana_group_id = desc->ana_group_id;
     835           9 :                 ns->ana_state = desc->ana_state;
     836             :         }
     837             : 
     838           5 :         return 0;
     839             : }
     840             : 
     841             : int
     842           4 : nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
     843             :                               spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg)
     844             : {
     845             :         struct spdk_nvme_ana_group_descriptor *copied_desc;
     846             :         uint8_t *orig_desc;
     847             :         uint32_t i, desc_size, copy_len;
     848           4 :         int rc = 0;
     849             : 
     850           4 :         if (ctrlr->ana_log_page == NULL) {
     851           0 :                 return -EINVAL;
     852             :         }
     853             : 
     854           4 :         copied_desc = ctrlr->copied_ana_desc;
     855             : 
     856           4 :         orig_desc = (uint8_t *)ctrlr->ana_log_page + sizeof(struct spdk_nvme_ana_page);
     857           4 :         copy_len = ctrlr->ana_log_page_size - sizeof(struct spdk_nvme_ana_page);
     858             : 
     859           9 :         for (i = 0; i < ctrlr->ana_log_page->num_ana_group_desc; i++) {
     860           5 :                 memcpy(copied_desc, orig_desc, copy_len);
     861             : 
     862           5 :                 rc = cb_fn(copied_desc, cb_arg);
     863           5 :                 if (rc != 0) {
     864           0 :                         break;
     865             :                 }
     866             : 
     867           5 :                 desc_size = sizeof(struct spdk_nvme_ana_group_descriptor) +
     868           5 :                             copied_desc->num_of_nsid * sizeof(uint32_t);
     869           5 :                 orig_desc += desc_size;
     870           5 :                 copy_len -= desc_size;
     871             :         }
     872             : 
     873           4 :         return rc;
     874             : }
     875             : 
     876             : static int
     877          16 : nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
     878             : {
     879          16 :         int     rc = 0;
     880             : 
     881          16 :         memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
     882             :         /* Mandatory pages */
     883          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
     884          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
     885          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
     886          16 :         if (ctrlr->cdata.lpa.celp) {
     887           1 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
     888             :         }
     889             : 
     890          16 :         if (ctrlr->cdata.cmic.ana_reporting) {
     891           2 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] = true;
     892           2 :                 if (!ctrlr->opts.disable_read_ana_log_page) {
     893           2 :                         rc = nvme_ctrlr_update_ana_log_page(ctrlr);
     894           2 :                         if (rc == 0) {
     895           2 :                                 nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
     896             :                                                               ctrlr);
     897             :                         }
     898             :                 }
     899             :         }
     900             : 
     901          16 :         if (ctrlr->cdata.ctratt.bits.fdps) {
     902           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_CONFIGURATIONS] = true;
     903           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_RECLAIM_UNIT_HANDLE_USAGE] = true;
     904           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_STATISTICS] = true;
     905           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_EVENTS] = true;
     906             :         }
     907             : 
     908          16 :         if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL &&
     909           1 :             ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE &&
     910           1 :             !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
     911           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
     912           1 :                                      ctrlr->opts.admin_timeout_ms);
     913             : 
     914             :         } else {
     915          15 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     916          15 :                                      ctrlr->opts.admin_timeout_ms);
     917             : 
     918             :         }
     919             : 
     920          16 :         return rc;
     921             : }
     922             : 
     923             : static void
     924           1 : nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
     925             : {
     926           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
     927           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
     928           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
     929           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
     930           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
     931           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
     932           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
     933           1 : }
     934             : 
     935             : static void
     936          18 : nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
     937             : {
     938             :         uint32_t cdw11;
     939             :         struct nvme_completion_poll_status *status;
     940             : 
     941          18 :         if (ctrlr->opts.arbitration_burst == 0) {
     942          16 :                 return;
     943             :         }
     944             : 
     945           2 :         if (ctrlr->opts.arbitration_burst > 7) {
     946           1 :                 NVME_CTRLR_WARNLOG(ctrlr, "Valid arbitration burst values is from 0-7\n");
     947           1 :                 return;
     948             :         }
     949             : 
     950           1 :         status = calloc(1, sizeof(*status));
     951           1 :         if (!status) {
     952           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
     953           0 :                 return;
     954             :         }
     955             : 
     956           1 :         cdw11 = ctrlr->opts.arbitration_burst;
     957             : 
     958           1 :         if (spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_WRR_SUPPORTED) {
     959           1 :                 cdw11 |= (uint32_t)ctrlr->opts.low_priority_weight << 8;
     960           1 :                 cdw11 |= (uint32_t)ctrlr->opts.medium_priority_weight << 16;
     961           1 :                 cdw11 |= (uint32_t)ctrlr->opts.high_priority_weight << 24;
     962             :         }
     963             : 
     964           1 :         if (spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ARBITRATION,
     965             :                                             cdw11, 0, NULL, 0,
     966             :                                             nvme_completion_poll_cb, status) < 0) {
     967           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set arbitration feature failed\n");
     968           0 :                 free(status);
     969           0 :                 return;
     970             :         }
     971             : 
     972           1 :         if (nvme_wait_for_completion_timeout(ctrlr->adminq, status,
     973           1 :                                              ctrlr->opts.admin_timeout_ms * 1000)) {
     974           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Timeout to set arbitration feature\n");
     975             :         }
     976             : 
     977           1 :         if (!status->timed_out) {
     978           1 :                 free(status);
     979             :         }
     980             : }
     981             : 
     982             : static void
     983          16 : nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
     984             : {
     985          16 :         memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
     986             :         /* Mandatory features */
     987          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
     988          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
     989          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
     990          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
     991          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
     992          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
     993          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
     994          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
     995          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
     996             :         /* Optional features */
     997          16 :         if (ctrlr->cdata.vwc.present) {
     998           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
     999             :         }
    1000          16 :         if (ctrlr->cdata.apsta.supported) {
    1001           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
    1002             :         }
    1003          16 :         if (ctrlr->cdata.hmpre) {
    1004           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
    1005             :         }
    1006          16 :         if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
    1007           1 :                 nvme_ctrlr_set_intel_supported_features(ctrlr);
    1008             :         }
    1009             : 
    1010          16 :         nvme_ctrlr_set_arbitration_feature(ctrlr);
    1011          16 : }
    1012             : 
    1013             : bool
    1014           0 : spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
    1015             : {
    1016           0 :         return ctrlr->is_failed;
    1017             : }
    1018             : 
    1019             : void
    1020           1 : nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
    1021             : {
    1022             :         /*
    1023             :          * Set the flag here and leave the work failure of qpairs to
    1024             :          * spdk_nvme_qpair_process_completions().
    1025             :          */
    1026           1 :         if (hot_remove) {
    1027           0 :                 ctrlr->is_removed = true;
    1028             :         }
    1029             : 
    1030           1 :         if (ctrlr->is_failed) {
    1031           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "already in failed state\n");
    1032           0 :                 return;
    1033             :         }
    1034             : 
    1035           1 :         if (ctrlr->is_disconnecting) {
    1036           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "already disconnecting\n");
    1037           0 :                 return;
    1038             :         }
    1039             : 
    1040           1 :         ctrlr->is_failed = true;
    1041           1 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    1042           1 :         NVME_CTRLR_ERRLOG(ctrlr, "in failed state.\n");
    1043             : }
    1044             : 
    1045             : /**
    1046             :  * This public API function will try to take the controller lock.
    1047             :  * Any private functions being called from a thread already holding
    1048             :  * the ctrlr lock should call nvme_ctrlr_fail directly.
    1049             :  */
    1050             : void
    1051           0 : spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
    1052             : {
    1053           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    1054           0 :         nvme_ctrlr_fail(ctrlr, false);
    1055           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    1056           0 : }
    1057             : 
    1058             : static void
    1059          38 : nvme_ctrlr_shutdown_set_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1060             : {
    1061          38 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1062          38 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    1063             : 
    1064          38 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1065           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
    1066           0 :                 ctx->shutdown_complete = true;
    1067           0 :                 return;
    1068             :         }
    1069             : 
    1070          38 :         if (ctrlr->opts.no_shn_notification) {
    1071           0 :                 ctx->shutdown_complete = true;
    1072           0 :                 return;
    1073             :         }
    1074             : 
    1075             :         /*
    1076             :          * The NVMe specification defines RTD3E to be the time between
    1077             :          *  setting SHN = 1 until the controller will set SHST = 10b.
    1078             :          * If the device doesn't report RTD3 entry latency, or if it
    1079             :          *  reports RTD3 entry latency less than 10 seconds, pick
    1080             :          *  10 seconds as a reasonable amount of time to
    1081             :          *  wait before proceeding.
    1082             :          */
    1083          38 :         NVME_CTRLR_DEBUGLOG(ctrlr, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
    1084          38 :         ctx->shutdown_timeout_ms = SPDK_CEIL_DIV(ctrlr->cdata.rtd3e, 1000);
    1085          38 :         ctx->shutdown_timeout_ms = spdk_max(ctx->shutdown_timeout_ms, 10000);
    1086          38 :         NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown timeout = %" PRIu32 " ms\n", ctx->shutdown_timeout_ms);
    1087             : 
    1088          38 :         ctx->shutdown_start_tsc = spdk_get_ticks();
    1089          38 :         ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
    1090             : }
    1091             : 
    1092             : static void
    1093          38 : nvme_ctrlr_shutdown_get_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1094             : {
    1095          38 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1096          38 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    1097             :         union spdk_nvme_cc_register cc;
    1098             :         int rc;
    1099             : 
    1100          38 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1101           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    1102           0 :                 ctx->shutdown_complete = true;
    1103           0 :                 return;
    1104             :         }
    1105             : 
    1106          38 :         assert(value <= UINT32_MAX);
    1107          38 :         cc.raw = (uint32_t)value;
    1108             : 
    1109          38 :         if (ctrlr->opts.no_shn_notification) {
    1110           0 :                 NVME_CTRLR_INFOLOG(ctrlr, "Disable SSD without shutdown notification\n");
    1111           0 :                 if (cc.bits.en == 0) {
    1112           0 :                         ctx->shutdown_complete = true;
    1113           0 :                         return;
    1114             :                 }
    1115             : 
    1116           0 :                 cc.bits.en = 0;
    1117             :         } else {
    1118          38 :                 cc.bits.shn = SPDK_NVME_SHN_NORMAL;
    1119             :         }
    1120             : 
    1121          38 :         rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_shutdown_set_cc_done, ctx);
    1122          38 :         if (rc != 0) {
    1123           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
    1124           0 :                 ctx->shutdown_complete = true;
    1125             :         }
    1126             : }
    1127             : 
    1128             : static void
    1129          46 : nvme_ctrlr_shutdown_async(struct spdk_nvme_ctrlr *ctrlr,
    1130             :                           struct nvme_ctrlr_detach_ctx *ctx)
    1131             : {
    1132             :         int rc;
    1133             : 
    1134          46 :         if (ctrlr->is_removed) {
    1135           0 :                 ctx->shutdown_complete = true;
    1136           0 :                 return;
    1137             :         }
    1138             : 
    1139          46 :         if (ctrlr->adminq == NULL ||
    1140          39 :             ctrlr->adminq->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
    1141           8 :                 NVME_CTRLR_INFOLOG(ctrlr, "Adminq is not connected.\n");
    1142           8 :                 ctx->shutdown_complete = true;
    1143           8 :                 return;
    1144             :         }
    1145             : 
    1146          38 :         ctx->state = NVME_CTRLR_DETACH_SET_CC;
    1147          38 :         rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_shutdown_get_cc_done, ctx);
    1148          38 :         if (rc != 0) {
    1149           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    1150           0 :                 ctx->shutdown_complete = true;
    1151             :         }
    1152             : }
    1153             : 
    1154             : static void
    1155          38 : nvme_ctrlr_shutdown_get_csts_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1156             : {
    1157          38 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1158             : 
    1159          38 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1160           0 :                 NVME_CTRLR_ERRLOG(ctx->ctrlr, "Failed to read the CSTS register\n");
    1161           0 :                 ctx->shutdown_complete = true;
    1162           0 :                 return;
    1163             :         }
    1164             : 
    1165          38 :         assert(value <= UINT32_MAX);
    1166          38 :         ctx->csts.raw = (uint32_t)value;
    1167          38 :         ctx->state = NVME_CTRLR_DETACH_GET_CSTS_DONE;
    1168             : }
    1169             : 
    1170             : static int
    1171          76 : nvme_ctrlr_shutdown_poll_async(struct spdk_nvme_ctrlr *ctrlr,
    1172             :                                struct nvme_ctrlr_detach_ctx *ctx)
    1173             : {
    1174             :         union spdk_nvme_csts_register   csts;
    1175             :         uint32_t                        ms_waited;
    1176             : 
    1177          76 :         switch (ctx->state) {
    1178           0 :         case NVME_CTRLR_DETACH_SET_CC:
    1179             :         case NVME_CTRLR_DETACH_GET_CSTS:
    1180             :                 /* We're still waiting for the register operation to complete */
    1181           0 :                 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    1182           0 :                 return -EAGAIN;
    1183             : 
    1184          38 :         case NVME_CTRLR_DETACH_CHECK_CSTS:
    1185          38 :                 ctx->state = NVME_CTRLR_DETACH_GET_CSTS;
    1186          38 :                 if (nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_shutdown_get_csts_done, ctx)) {
    1187           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    1188           0 :                         return -EIO;
    1189             :                 }
    1190          38 :                 return -EAGAIN;
    1191             : 
    1192          38 :         case NVME_CTRLR_DETACH_GET_CSTS_DONE:
    1193          38 :                 ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
    1194          38 :                 break;
    1195             : 
    1196           0 :         default:
    1197           0 :                 assert(0 && "Should never happen");
    1198             :                 return -EINVAL;
    1199             :         }
    1200             : 
    1201          38 :         ms_waited = (spdk_get_ticks() - ctx->shutdown_start_tsc) * 1000 / spdk_get_ticks_hz();
    1202          38 :         csts.raw = ctx->csts.raw;
    1203             : 
    1204          38 :         if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
    1205          38 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown complete in %u milliseconds\n", ms_waited);
    1206          38 :                 return 0;
    1207             :         }
    1208             : 
    1209           0 :         if (ms_waited < ctx->shutdown_timeout_ms) {
    1210           0 :                 return -EAGAIN;
    1211             :         }
    1212             : 
    1213           0 :         NVME_CTRLR_ERRLOG(ctrlr, "did not shutdown within %u milliseconds\n",
    1214             :                           ctx->shutdown_timeout_ms);
    1215           0 :         if (ctrlr->quirks & NVME_QUIRK_SHST_COMPLETE) {
    1216           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "likely due to shutdown handling in the VMWare emulated NVMe SSD\n");
    1217             :         }
    1218             : 
    1219           0 :         return 0;
    1220             : }
    1221             : 
    1222             : static inline uint64_t
    1223         493 : nvme_ctrlr_get_ready_timeout(struct spdk_nvme_ctrlr *ctrlr)
    1224             : {
    1225         493 :         return ctrlr->cap.bits.to * 500;
    1226             : }
    1227             : 
    1228             : static void
    1229          14 : nvme_ctrlr_set_cc_en_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1230             : {
    1231          14 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    1232             : 
    1233          14 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1234           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to set the CC register\n");
    1235           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1236           0 :                 return;
    1237             :         }
    1238             : 
    1239          14 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    1240             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    1241             : }
    1242             : 
    1243             : static int
    1244          21 : nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
    1245             : {
    1246             :         union spdk_nvme_cc_register     cc;
    1247             :         int                             rc;
    1248             : 
    1249          21 :         rc = nvme_transport_ctrlr_enable(ctrlr);
    1250          21 :         if (rc != 0) {
    1251           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "transport ctrlr_enable failed\n");
    1252           0 :                 return rc;
    1253             :         }
    1254             : 
    1255          21 :         cc.raw = ctrlr->process_init_cc.raw;
    1256          21 :         if (cc.bits.en != 0) {
    1257           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "called with CC.EN = 1\n");
    1258           0 :                 return -EINVAL;
    1259             :         }
    1260             : 
    1261          21 :         cc.bits.en = 1;
    1262          21 :         cc.bits.css = 0;
    1263          21 :         cc.bits.shn = 0;
    1264          21 :         cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
    1265          21 :         cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
    1266             : 
    1267             :         /* Page size is 2 ^ (12 + mps). */
    1268          21 :         cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
    1269             : 
    1270             :         /*
    1271             :          * Since NVMe 1.0, a controller should have at least one bit set in CAP.CSS.
    1272             :          * A controller that does not have any bit set in CAP.CSS is not spec compliant.
    1273             :          * Try to support such a controller regardless.
    1274             :          */
    1275          21 :         if (ctrlr->cap.bits.css == 0) {
    1276          21 :                 NVME_CTRLR_INFOLOG(ctrlr, "Drive reports no command sets supported. Assuming NVM is supported.\n");
    1277          21 :                 ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
    1278             :         }
    1279             : 
    1280             :         /*
    1281             :          * If the user did not explicitly request a command set, or supplied a value larger than
    1282             :          * what can be saved in CC.CSS, use the most reasonable default.
    1283             :          */
    1284          21 :         if (ctrlr->opts.command_set >= CHAR_BIT) {
    1285           0 :                 if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS) {
    1286           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_IOCS;
    1287           0 :                 } else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NVM) {
    1288           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1289           0 :                 } else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NOIO) {
    1290           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NOIO;
    1291             :                 } else {
    1292             :                         /* Invalid supported bits detected, falling back to NVM. */
    1293           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1294             :                 }
    1295             :         }
    1296             : 
    1297             :         /* Verify that the selected command set is supported by the controller. */
    1298          21 :         if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
    1299           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Requested I/O command set %u but supported mask is 0x%x\n",
    1300             :                                     ctrlr->opts.command_set, ctrlr->cap.bits.css);
    1301           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Falling back to NVM. Assuming NVM is supported.\n");
    1302           0 :                 ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1303             :         }
    1304             : 
    1305          21 :         cc.bits.css = ctrlr->opts.command_set;
    1306             : 
    1307          21 :         switch (ctrlr->opts.arb_mechanism) {
    1308          10 :         case SPDK_NVME_CC_AMS_RR:
    1309          10 :                 break;
    1310           4 :         case SPDK_NVME_CC_AMS_WRR:
    1311           4 :                 if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
    1312           2 :                         break;
    1313             :                 }
    1314           2 :                 return -EINVAL;
    1315           4 :         case SPDK_NVME_CC_AMS_VS:
    1316           4 :                 if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
    1317           2 :                         break;
    1318             :                 }
    1319           2 :                 return -EINVAL;
    1320           3 :         default:
    1321           3 :                 return -EINVAL;
    1322             :         }
    1323             : 
    1324          14 :         cc.bits.ams = ctrlr->opts.arb_mechanism;
    1325          14 :         ctrlr->process_init_cc.raw = cc.raw;
    1326             : 
    1327          14 :         if (nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_set_cc_en_done, ctrlr)) {
    1328           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
    1329           0 :                 return -EIO;
    1330             :         }
    1331             : 
    1332          14 :         return 0;
    1333             : }
    1334             : 
    1335             : static const char *
    1336           1 : nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
    1337             : {
    1338           1 :         switch (state) {
    1339           0 :         case NVME_CTRLR_STATE_INIT_DELAY:
    1340           0 :                 return "delay init";
    1341           0 :         case NVME_CTRLR_STATE_CONNECT_ADMINQ:
    1342           0 :                 return "connect adminq";
    1343           0 :         case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
    1344           0 :                 return "wait for connect adminq";
    1345           0 :         case NVME_CTRLR_STATE_READ_VS:
    1346           0 :                 return "read vs";
    1347           0 :         case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
    1348           0 :                 return "read vs wait for vs";
    1349           0 :         case NVME_CTRLR_STATE_READ_CAP:
    1350           0 :                 return "read cap";
    1351           0 :         case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
    1352           0 :                 return "read cap wait for cap";
    1353           0 :         case NVME_CTRLR_STATE_CHECK_EN:
    1354           0 :                 return "check en";
    1355           0 :         case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
    1356           0 :                 return "check en wait for cc";
    1357           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
    1358           0 :                 return "disable and wait for CSTS.RDY = 1";
    1359           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    1360           0 :                 return "disable and wait for CSTS.RDY = 1 reg";
    1361           0 :         case NVME_CTRLR_STATE_SET_EN_0:
    1362           0 :                 return "set CC.EN = 0";
    1363           0 :         case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
    1364           0 :                 return "set CC.EN = 0 wait for cc";
    1365           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
    1366           0 :                 return "disable and wait for CSTS.RDY = 0";
    1367           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
    1368           0 :                 return "disable and wait for CSTS.RDY = 0 reg";
    1369           0 :         case NVME_CTRLR_STATE_DISABLED:
    1370           0 :                 return "controller is disabled";
    1371           0 :         case NVME_CTRLR_STATE_ENABLE:
    1372           0 :                 return "enable controller by writing CC.EN = 1";
    1373           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
    1374           0 :                 return "enable controller by writing CC.EN = 1 reg";
    1375           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
    1376           0 :                 return "wait for CSTS.RDY = 1";
    1377           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    1378           0 :                 return "wait for CSTS.RDY = 1 reg";
    1379           0 :         case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
    1380           0 :                 return "reset admin queue";
    1381           0 :         case NVME_CTRLR_STATE_IDENTIFY:
    1382           0 :                 return "identify controller";
    1383           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
    1384           0 :                 return "wait for identify controller";
    1385           0 :         case NVME_CTRLR_STATE_CONFIGURE_AER:
    1386           0 :                 return "configure AER";
    1387           0 :         case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
    1388           0 :                 return "wait for configure aer";
    1389           0 :         case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
    1390           0 :                 return "set keep alive timeout";
    1391           0 :         case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
    1392           0 :                 return "wait for set keep alive timeout";
    1393           0 :         case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
    1394           0 :                 return "identify controller iocs specific";
    1395           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
    1396           0 :                 return "wait for identify controller iocs specific";
    1397           0 :         case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
    1398           0 :                 return "get zns cmd and effects log page";
    1399           0 :         case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
    1400           0 :                 return "wait for get zns cmd and effects log page";
    1401           0 :         case NVME_CTRLR_STATE_SET_NUM_QUEUES:
    1402           0 :                 return "set number of queues";
    1403           0 :         case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
    1404           0 :                 return "wait for set number of queues";
    1405           0 :         case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
    1406           0 :                 return "identify active ns";
    1407           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
    1408           0 :                 return "wait for identify active ns";
    1409           0 :         case NVME_CTRLR_STATE_IDENTIFY_NS:
    1410           0 :                 return "identify ns";
    1411           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
    1412           0 :                 return "wait for identify ns";
    1413           0 :         case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
    1414           0 :                 return "identify namespace id descriptors";
    1415           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
    1416           0 :                 return "wait for identify namespace id descriptors";
    1417           0 :         case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
    1418           0 :                 return "identify ns iocs specific";
    1419           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
    1420           0 :                 return "wait for identify ns iocs specific";
    1421           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
    1422           0 :                 return "set supported log pages";
    1423           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
    1424           0 :                 return "set supported INTEL log pages";
    1425           0 :         case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
    1426           0 :                 return "wait for supported INTEL log pages";
    1427           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
    1428           0 :                 return "set supported features";
    1429           0 :         case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
    1430           0 :                 return "set doorbell buffer config";
    1431           0 :         case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
    1432           0 :                 return "wait for doorbell buffer config";
    1433           0 :         case NVME_CTRLR_STATE_SET_HOST_ID:
    1434           0 :                 return "set host ID";
    1435           0 :         case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
    1436           0 :                 return "wait for set host ID";
    1437           0 :         case NVME_CTRLR_STATE_TRANSPORT_READY:
    1438           0 :                 return "transport ready";
    1439           0 :         case NVME_CTRLR_STATE_READY:
    1440           0 :                 return "ready";
    1441           1 :         case NVME_CTRLR_STATE_ERROR:
    1442           1 :                 return "error";
    1443           0 :         case NVME_CTRLR_STATE_DISCONNECTED:
    1444           0 :                 return "disconnected";
    1445             :         }
    1446           0 :         return "unknown";
    1447             : };
    1448             : 
    1449             : static void
    1450         713 : _nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1451             :                       uint64_t timeout_in_ms, bool quiet)
    1452             : {
    1453             :         uint64_t ticks_per_ms, timeout_in_ticks, now_ticks;
    1454             : 
    1455         713 :         ctrlr->state = state;
    1456         713 :         if (timeout_in_ms == NVME_TIMEOUT_KEEP_EXISTING) {
    1457          33 :                 if (!quiet) {
    1458           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (keeping existing timeout)\n",
    1459             :                                             nvme_ctrlr_state_string(ctrlr->state));
    1460             :                 }
    1461          33 :                 return;
    1462             :         }
    1463             : 
    1464         680 :         if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
    1465         678 :                 goto inf;
    1466             :         }
    1467             : 
    1468           2 :         ticks_per_ms = spdk_get_ticks_hz() / 1000;
    1469           2 :         if (timeout_in_ms > UINT64_MAX / ticks_per_ms) {
    1470           0 :                 NVME_CTRLR_ERRLOG(ctrlr,
    1471             :                                   "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
    1472           0 :                 goto inf;
    1473             :         }
    1474             : 
    1475           2 :         now_ticks = spdk_get_ticks();
    1476           2 :         timeout_in_ticks = timeout_in_ms * ticks_per_ms;
    1477           2 :         if (timeout_in_ticks > UINT64_MAX - now_ticks) {
    1478           1 :                 NVME_CTRLR_ERRLOG(ctrlr,
    1479             :                                   "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
    1480           1 :                 goto inf;
    1481             :         }
    1482             : 
    1483           1 :         ctrlr->state_timeout_tsc = timeout_in_ticks + now_ticks;
    1484           1 :         if (!quiet) {
    1485           1 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (timeout %" PRIu64 " ms)\n",
    1486             :                                     nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms);
    1487             :         }
    1488           1 :         return;
    1489         679 : inf:
    1490         679 :         if (!quiet) {
    1491         679 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (no timeout)\n",
    1492             :                                     nvme_ctrlr_state_string(ctrlr->state));
    1493             :         }
    1494         679 :         ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
    1495             : }
    1496             : 
    1497             : static void
    1498         680 : nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1499             :                      uint64_t timeout_in_ms)
    1500             : {
    1501         680 :         _nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, false);
    1502         680 : }
    1503             : 
    1504             : static void
    1505          33 : nvme_ctrlr_set_state_quiet(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1506             :                            uint64_t timeout_in_ms)
    1507             : {
    1508          33 :         _nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, true);
    1509          33 : }
    1510             : 
    1511             : static void
    1512          47 : nvme_ctrlr_free_zns_specific_data(struct spdk_nvme_ctrlr *ctrlr)
    1513             : {
    1514          47 :         spdk_free(ctrlr->cdata_zns);
    1515          47 :         ctrlr->cdata_zns = NULL;
    1516          47 : }
    1517             : 
    1518             : static void
    1519          47 : nvme_ctrlr_free_iocs_specific_data(struct spdk_nvme_ctrlr *ctrlr)
    1520             : {
    1521          47 :         nvme_ctrlr_free_zns_specific_data(ctrlr);
    1522          47 : }
    1523             : 
    1524             : static void
    1525          48 : nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
    1526             : {
    1527          48 :         if (ctrlr->shadow_doorbell) {
    1528           1 :                 spdk_free(ctrlr->shadow_doorbell);
    1529           1 :                 ctrlr->shadow_doorbell = NULL;
    1530             :         }
    1531             : 
    1532          48 :         if (ctrlr->eventidx) {
    1533           1 :                 spdk_free(ctrlr->eventidx);
    1534           1 :                 ctrlr->eventidx = NULL;
    1535             :         }
    1536          48 : }
    1537             : 
    1538             : static void
    1539           1 : nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
    1540             : {
    1541           1 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    1542             : 
    1543           1 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1544           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "Doorbell buffer config failed\n");
    1545             :         } else {
    1546           1 :                 NVME_CTRLR_INFOLOG(ctrlr, "Doorbell buffer config enabled\n");
    1547             :         }
    1548           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1549           1 :                              ctrlr->opts.admin_timeout_ms);
    1550           1 : }
    1551             : 
    1552             : static int
    1553          15 : nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
    1554             : {
    1555          15 :         int rc = 0;
    1556          15 :         uint64_t prp1, prp2, len;
    1557             : 
    1558          15 :         if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
    1559          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1560          14 :                                      ctrlr->opts.admin_timeout_ms);
    1561          14 :                 return 0;
    1562             :         }
    1563             : 
    1564           1 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    1565           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1566           0 :                                      ctrlr->opts.admin_timeout_ms);
    1567           0 :                 return 0;
    1568             :         }
    1569             : 
    1570             :         /* only 1 page size for doorbell buffer */
    1571           1 :         ctrlr->shadow_doorbell = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
    1572             :                                               NULL, SPDK_ENV_LCORE_ID_ANY,
    1573             :                                               SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
    1574           1 :         if (ctrlr->shadow_doorbell == NULL) {
    1575           0 :                 rc = -ENOMEM;
    1576           0 :                 goto error;
    1577             :         }
    1578             : 
    1579           1 :         len = ctrlr->page_size;
    1580           1 :         prp1 = spdk_vtophys(ctrlr->shadow_doorbell, &len);
    1581           1 :         if (prp1 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
    1582           0 :                 rc = -EFAULT;
    1583           0 :                 goto error;
    1584             :         }
    1585             : 
    1586           1 :         ctrlr->eventidx = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
    1587             :                                        NULL, SPDK_ENV_LCORE_ID_ANY,
    1588             :                                        SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
    1589           1 :         if (ctrlr->eventidx == NULL) {
    1590           0 :                 rc = -ENOMEM;
    1591           0 :                 goto error;
    1592             :         }
    1593             : 
    1594           1 :         len = ctrlr->page_size;
    1595           1 :         prp2 = spdk_vtophys(ctrlr->eventidx, &len);
    1596           1 :         if (prp2 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
    1597           0 :                 rc = -EFAULT;
    1598           0 :                 goto error;
    1599             :         }
    1600             : 
    1601           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
    1602           1 :                              ctrlr->opts.admin_timeout_ms);
    1603             : 
    1604           1 :         rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
    1605             :                         nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
    1606           1 :         if (rc != 0) {
    1607           0 :                 goto error;
    1608             :         }
    1609             : 
    1610           1 :         return 0;
    1611             : 
    1612           0 : error:
    1613           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1614           0 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    1615           0 :         return rc;
    1616             : }
    1617             : 
    1618             : void
    1619          47 : nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr)
    1620             : {
    1621             :         struct nvme_request     *req, *tmp;
    1622          47 :         struct spdk_nvme_cpl    cpl = {};
    1623             : 
    1624          47 :         cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
    1625          47 :         cpl.status.sct = SPDK_NVME_SCT_GENERIC;
    1626             : 
    1627          47 :         STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
    1628           0 :                 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
    1629           0 :                 ctrlr->outstanding_aborts++;
    1630             : 
    1631           0 :                 nvme_complete_request(req->cb_fn, req->cb_arg, req->qpair, req, &cpl);
    1632             :         }
    1633          47 : }
    1634             : 
    1635             : static int
    1636           2 : nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
    1637             : {
    1638           2 :         if (ctrlr->is_resetting || ctrlr->is_removed) {
    1639             :                 /*
    1640             :                  * Controller is already resetting or has been removed. Return
    1641             :                  *  immediately since there is no need to kick off another
    1642             :                  *  reset in these cases.
    1643             :                  */
    1644           1 :                 return ctrlr->is_resetting ? -EBUSY : -ENXIO;
    1645             :         }
    1646             : 
    1647           1 :         ctrlr->is_resetting = true;
    1648           1 :         ctrlr->is_failed = false;
    1649           1 :         ctrlr->is_disconnecting = true;
    1650           1 :         ctrlr->prepare_for_reset = true;
    1651             : 
    1652           1 :         NVME_CTRLR_NOTICELOG(ctrlr, "resetting controller\n");
    1653             : 
    1654             :         /* Disable keep-alive, it'll be re-enabled as part of the init process */
    1655           1 :         ctrlr->keep_alive_interval_ticks = 0;
    1656             : 
    1657             :         /* Abort all of the queued abort requests */
    1658           1 :         nvme_ctrlr_abort_queued_aborts(ctrlr);
    1659             : 
    1660           1 :         nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
    1661             : 
    1662           1 :         ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1663           1 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    1664             : 
    1665           1 :         return 0;
    1666             : }
    1667             : 
    1668             : static void
    1669           1 : nvme_ctrlr_disconnect_done(struct spdk_nvme_ctrlr *ctrlr)
    1670             : {
    1671           1 :         assert(ctrlr->is_failed == false);
    1672           1 :         ctrlr->is_disconnecting = false;
    1673             : 
    1674             :         /* Doorbell buffer config is invalid during reset */
    1675           1 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    1676             : 
    1677             :         /* I/O Command Set Specific Identify Controller data is invalidated during reset */
    1678           1 :         nvme_ctrlr_free_iocs_specific_data(ctrlr);
    1679             : 
    1680           1 :         spdk_bit_array_free(&ctrlr->free_io_qids);
    1681             : 
    1682             :         /* Set the state back to DISCONNECTED to cause a full hardware reset. */
    1683           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISCONNECTED, NVME_TIMEOUT_INFINITE);
    1684           1 : }
    1685             : 
    1686             : int
    1687           0 : spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
    1688             : {
    1689             :         int rc;
    1690             : 
    1691           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    1692           0 :         rc = nvme_ctrlr_disconnect(ctrlr);
    1693           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    1694             : 
    1695           0 :         return rc;
    1696             : }
    1697             : 
    1698             : void
    1699           1 : spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
    1700             : {
    1701           1 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    1702             : 
    1703           1 :         ctrlr->prepare_for_reset = false;
    1704             : 
    1705             :         /* Set the state back to INIT to cause a full hardware reset. */
    1706           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    1707             : 
    1708             :         /* Return without releasing ctrlr_lock. ctrlr_lock will be released when
    1709             :          * spdk_nvme_ctrlr_reset_poll_async() returns 0.
    1710             :          */
    1711           1 : }
    1712             : 
    1713             : int
    1714           0 : nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
    1715             : {
    1716             :         bool async;
    1717             :         int rc;
    1718             : 
    1719           0 :         if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc ||
    1720           0 :             spdk_nvme_ctrlr_is_fabrics(ctrlr) || nvme_qpair_is_admin_queue(qpair)) {
    1721           0 :                 assert(false);
    1722             :                 return -EINVAL;
    1723             :         }
    1724             : 
    1725             :         /* Force a synchronous connect. */
    1726           0 :         async = qpair->async;
    1727           0 :         qpair->async = false;
    1728           0 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
    1729           0 :         qpair->async = async;
    1730             : 
    1731           0 :         if (rc != 0) {
    1732           0 :                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1733             :         }
    1734             : 
    1735           0 :         return rc;
    1736             : }
    1737             : 
    1738             : /**
    1739             :  * This function will be called when the controller is being reinitialized.
    1740             :  * Note: the ctrlr_lock must be held when calling this function.
    1741             :  */
    1742             : int
    1743          24 : spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
    1744             : {
    1745             :         struct spdk_nvme_ns *ns, *tmp_ns;
    1746             :         struct spdk_nvme_qpair  *qpair;
    1747          24 :         int rc = 0, rc_tmp = 0;
    1748             : 
    1749          24 :         if (nvme_ctrlr_process_init(ctrlr) != 0) {
    1750           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "controller reinitialization failed\n");
    1751           0 :                 rc = -1;
    1752             :         }
    1753          24 :         if (ctrlr->state != NVME_CTRLR_STATE_READY && rc != -1) {
    1754          23 :                 return -EAGAIN;
    1755             :         }
    1756             : 
    1757             :         /*
    1758             :          * For non-fabrics controllers, the memory locations of the transport qpair
    1759             :          * don't change when the controller is reset. They simply need to be
    1760             :          * re-enabled with admin commands to the controller. For fabric
    1761             :          * controllers we need to disconnect and reconnect the qpair on its
    1762             :          * own thread outside of the context of the reset.
    1763             :          */
    1764           1 :         if (rc == 0 && !spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
    1765             :                 /* Reinitialize qpairs */
    1766           1 :                 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
    1767             :                         /* Always clear the qid bit here, even for a foreign qpair. We need
    1768             :                          * to make sure another process doesn't get the chance to grab that
    1769             :                          * qid.
    1770             :                          */
    1771           0 :                         assert(spdk_bit_array_get(ctrlr->free_io_qids, qpair->id));
    1772           0 :                         spdk_bit_array_clear(ctrlr->free_io_qids, qpair->id);
    1773           0 :                         if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc) {
    1774             :                                 /*
    1775             :                                  * We cannot reinitialize a foreign qpair. The qpair's owning
    1776             :                                  * process will take care of it. Set failure reason to FAILURE_RESET
    1777             :                                  * to ensure that happens.
    1778             :                                  */
    1779           0 :                                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_RESET;
    1780           0 :                                 continue;
    1781             :                         }
    1782           0 :                         rc_tmp = nvme_ctrlr_reinitialize_io_qpair(ctrlr, qpair);
    1783           0 :                         if (rc_tmp != 0) {
    1784           0 :                                 rc = rc_tmp;
    1785             :                         }
    1786             :                 }
    1787             :         }
    1788             : 
    1789             :         /*
    1790             :          * Take this opportunity to remove inactive namespaces. During a reset namespace
    1791             :          * handles can be invalidated.
    1792             :          */
    1793           5 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    1794           4 :                 if (!ns->active) {
    1795           1 :                         RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
    1796           1 :                         spdk_free(ns);
    1797             :                 }
    1798             :         }
    1799             : 
    1800           1 :         if (rc) {
    1801           0 :                 nvme_ctrlr_fail(ctrlr, false);
    1802             :         }
    1803           1 :         ctrlr->is_resetting = false;
    1804             : 
    1805           1 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    1806             : 
    1807           1 :         if (!ctrlr->cdata.oaes.ns_attribute_notices) {
    1808             :                 /*
    1809             :                  * If controller doesn't support ns_attribute_notices and
    1810             :                  * namespace attributes change (e.g. number of namespaces)
    1811             :                  * we need to update system handling device reset.
    1812             :                  */
    1813           1 :                 nvme_io_msg_ctrlr_update(ctrlr);
    1814             :         }
    1815             : 
    1816           1 :         return rc;
    1817             : }
    1818             : 
    1819             : /*
    1820             :  * For PCIe transport, spdk_nvme_ctrlr_disconnect() will do a Controller Level Reset
    1821             :  * (Change CC.EN from 1 to 0) as a operation to disconnect the admin qpair.
    1822             :  * The following two functions are added to do a Controller Level Reset. They have
    1823             :  * to be called under the nvme controller's lock.
    1824             :  */
    1825             : void
    1826           1 : nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr)
    1827             : {
    1828           1 :         assert(ctrlr->is_disconnecting == true);
    1829             : 
    1830           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
    1831           1 : }
    1832             : 
    1833             : int
    1834           2 : nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr)
    1835             : {
    1836           2 :         int rc = 0;
    1837             : 
    1838           2 :         if (nvme_ctrlr_process_init(ctrlr) != 0) {
    1839           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "failed to disable controller\n");
    1840           0 :                 rc = -1;
    1841             :         }
    1842             : 
    1843           2 :         if (ctrlr->state != NVME_CTRLR_STATE_DISABLED && rc != -1) {
    1844           1 :                 return -EAGAIN;
    1845             :         }
    1846             : 
    1847           1 :         return rc;
    1848             : }
    1849             : 
    1850             : static void
    1851           1 : nvme_ctrlr_fail_io_qpairs(struct spdk_nvme_ctrlr *ctrlr)
    1852             : {
    1853             :         struct spdk_nvme_qpair  *qpair;
    1854             : 
    1855           1 :         TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
    1856           0 :                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1857             :         }
    1858           1 : }
    1859             : 
    1860             : int
    1861           2 : spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
    1862             : {
    1863             :         int rc;
    1864             : 
    1865           2 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    1866             : 
    1867           2 :         rc = nvme_ctrlr_disconnect(ctrlr);
    1868           2 :         if (rc == 0) {
    1869           1 :                 nvme_ctrlr_fail_io_qpairs(ctrlr);
    1870             :         }
    1871             : 
    1872           2 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    1873             : 
    1874           2 :         if (rc != 0) {
    1875           1 :                 if (rc == -EBUSY) {
    1876           1 :                         rc = 0;
    1877             :                 }
    1878           1 :                 return rc;
    1879             :         }
    1880             : 
    1881             :         while (1) {
    1882           1 :                 rc = spdk_nvme_ctrlr_process_admin_completions(ctrlr);
    1883           1 :                 if (rc == -ENXIO) {
    1884           1 :                         break;
    1885             :                 }
    1886             :         }
    1887             : 
    1888           1 :         spdk_nvme_ctrlr_reconnect_async(ctrlr);
    1889             : 
    1890             :         while (true) {
    1891          24 :                 rc = spdk_nvme_ctrlr_reconnect_poll_async(ctrlr);
    1892          24 :                 if (rc != -EAGAIN) {
    1893           1 :                         break;
    1894             :                 }
    1895             :         }
    1896             : 
    1897           1 :         return rc;
    1898             : }
    1899             : 
    1900             : int
    1901           0 : spdk_nvme_ctrlr_reset_subsystem(struct spdk_nvme_ctrlr *ctrlr)
    1902             : {
    1903             :         union spdk_nvme_cap_register cap;
    1904           0 :         int rc = 0;
    1905             : 
    1906           0 :         cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr);
    1907           0 :         if (cap.bits.nssrs == 0) {
    1908           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "subsystem reset is not supported\n");
    1909           0 :                 return -ENOTSUP;
    1910             :         }
    1911             : 
    1912           0 :         NVME_CTRLR_NOTICELOG(ctrlr, "resetting subsystem\n");
    1913           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    1914           0 :         ctrlr->is_resetting = true;
    1915           0 :         rc = nvme_ctrlr_set_nssr(ctrlr, SPDK_NVME_NSSR_VALUE);
    1916           0 :         ctrlr->is_resetting = false;
    1917             : 
    1918           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    1919             :         /*
    1920             :          * No more cleanup at this point like in the ctrlr reset. A subsystem reset will cause
    1921             :          * a hot remove for PCIe transport. The hot remove handling does all the necessary ctrlr cleanup.
    1922             :          */
    1923           0 :         return rc;
    1924             : }
    1925             : 
    1926             : int
    1927           4 : spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transport_id *trid)
    1928             : {
    1929           4 :         int rc = 0;
    1930             : 
    1931           4 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    1932             : 
    1933           4 :         if (ctrlr->is_failed == false) {
    1934           1 :                 rc = -EPERM;
    1935           1 :                 goto out;
    1936             :         }
    1937             : 
    1938           3 :         if (trid->trtype != ctrlr->trid.trtype) {
    1939           1 :                 rc = -EINVAL;
    1940           1 :                 goto out;
    1941             :         }
    1942             : 
    1943           2 :         if (strncmp(trid->subnqn, ctrlr->trid.subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
    1944           1 :                 rc = -EINVAL;
    1945           1 :                 goto out;
    1946             :         }
    1947             : 
    1948           1 :         ctrlr->trid = *trid;
    1949             : 
    1950           4 : out:
    1951           4 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    1952           4 :         return rc;
    1953             : }
    1954             : 
    1955             : void
    1956           0 : spdk_nvme_ctrlr_set_remove_cb(struct spdk_nvme_ctrlr *ctrlr,
    1957             :                               spdk_nvme_remove_cb remove_cb, void *remove_ctx)
    1958             : {
    1959           0 :         if (!spdk_process_is_primary()) {
    1960           0 :                 return;
    1961             :         }
    1962             : 
    1963           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    1964           0 :         ctrlr->remove_cb = remove_cb;
    1965           0 :         ctrlr->cb_ctx = remove_ctx;
    1966           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    1967             : }
    1968             : 
    1969             : static void
    1970          16 : nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
    1971             : {
    1972          16 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    1973             : 
    1974          16 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1975           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_identify_controller failed!\n");
    1976           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1977           0 :                 return;
    1978             :         }
    1979             : 
    1980             :         /*
    1981             :          * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
    1982             :          *  controller supports.
    1983             :          */
    1984          16 :         ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
    1985          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
    1986          16 :         if (ctrlr->cdata.mdts > 0) {
    1987           0 :                 ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
    1988             :                                                 ctrlr->min_page_size * (1 << ctrlr->cdata.mdts));
    1989           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
    1990             :         }
    1991             : 
    1992          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
    1993          16 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    1994           1 :                 ctrlr->cntlid = ctrlr->cdata.cntlid;
    1995             :         } else {
    1996             :                 /*
    1997             :                  * Fabrics controllers should already have CNTLID from the Connect command.
    1998             :                  *
    1999             :                  * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
    2000             :                  * trust the one from Connect.
    2001             :                  */
    2002          15 :                 if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
    2003           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
    2004             :                                             ctrlr->cdata.cntlid, ctrlr->cntlid);
    2005             :                 }
    2006             :         }
    2007             : 
    2008          16 :         if (ctrlr->cdata.sgls.supported && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
    2009           0 :                 assert(ctrlr->cdata.sgls.supported != 0x3);
    2010           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
    2011           0 :                 if (ctrlr->cdata.sgls.supported == 0x2) {
    2012           0 :                         ctrlr->flags |= SPDK_NVME_CTRLR_SGL_REQUIRES_DWORD_ALIGNMENT;
    2013             :                 }
    2014             : 
    2015           0 :                 ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
    2016           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_sges %u\n", ctrlr->max_sges);
    2017             :         }
    2018             : 
    2019          16 :         if (ctrlr->cdata.sgls.metadata_address && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
    2020           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_MPTR_SGL_SUPPORTED;
    2021             :         }
    2022             : 
    2023          16 :         if (ctrlr->cdata.oacs.security && !(ctrlr->quirks & NVME_QUIRK_OACS_SECURITY)) {
    2024           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED;
    2025             :         }
    2026             : 
    2027          16 :         if (ctrlr->cdata.oacs.directives) {
    2028           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_DIRECTIVES_SUPPORTED;
    2029             :         }
    2030             : 
    2031          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "fuses compare and write: %d\n",
    2032             :                             ctrlr->cdata.fuses.compare_and_write);
    2033          16 :         if (ctrlr->cdata.fuses.compare_and_write) {
    2034           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_COMPARE_AND_WRITE_SUPPORTED;
    2035             :         }
    2036             : 
    2037          16 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
    2038          16 :                              ctrlr->opts.admin_timeout_ms);
    2039             : }
    2040             : 
    2041             : static int
    2042          16 : nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
    2043             : {
    2044             :         int     rc;
    2045             : 
    2046          16 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
    2047          16 :                              ctrlr->opts.admin_timeout_ms);
    2048             : 
    2049          16 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0, 0,
    2050          16 :                                      &ctrlr->cdata, sizeof(ctrlr->cdata),
    2051             :                                      nvme_ctrlr_identify_done, ctrlr);
    2052          16 :         if (rc != 0) {
    2053           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2054           0 :                 return rc;
    2055             :         }
    2056             : 
    2057          16 :         return 0;
    2058             : }
    2059             : 
    2060             : static void
    2061           0 : nvme_ctrlr_get_zns_cmd_and_effects_log_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2062             : {
    2063             :         struct spdk_nvme_cmds_and_effect_log_page *log_page;
    2064           0 :         struct spdk_nvme_ctrlr *ctrlr = arg;
    2065             : 
    2066           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2067           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_get_zns_cmd_and_effects_log failed!\n");
    2068           0 :                 spdk_free(ctrlr->tmp_ptr);
    2069           0 :                 ctrlr->tmp_ptr = NULL;
    2070           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2071           0 :                 return;
    2072             :         }
    2073             : 
    2074           0 :         log_page = ctrlr->tmp_ptr;
    2075             : 
    2076           0 :         if (log_page->io_cmds_supported[SPDK_NVME_OPC_ZONE_APPEND].csupp) {
    2077           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED;
    2078             :         }
    2079           0 :         spdk_free(ctrlr->tmp_ptr);
    2080           0 :         ctrlr->tmp_ptr = NULL;
    2081             : 
    2082           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES, ctrlr->opts.admin_timeout_ms);
    2083             : }
    2084             : 
    2085             : static int
    2086           0 : nvme_ctrlr_get_zns_cmd_and_effects_log(struct spdk_nvme_ctrlr *ctrlr)
    2087             : {
    2088             :         int rc;
    2089             : 
    2090           0 :         assert(!ctrlr->tmp_ptr);
    2091           0 :         ctrlr->tmp_ptr = spdk_zmalloc(sizeof(struct spdk_nvme_cmds_and_effect_log_page), 64, NULL,
    2092             :                                       SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
    2093           0 :         if (!ctrlr->tmp_ptr) {
    2094           0 :                 rc = -ENOMEM;
    2095           0 :                 goto error;
    2096             :         }
    2097             : 
    2098           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
    2099           0 :                              ctrlr->opts.admin_timeout_ms);
    2100             : 
    2101           0 :         rc = spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, SPDK_NVME_LOG_COMMAND_EFFECTS_LOG,
    2102             :                         0, ctrlr->tmp_ptr, sizeof(struct spdk_nvme_cmds_and_effect_log_page),
    2103             :                         0, 0, 0, SPDK_NVME_CSI_ZNS << 24,
    2104             :                         nvme_ctrlr_get_zns_cmd_and_effects_log_done, ctrlr);
    2105           0 :         if (rc != 0) {
    2106           0 :                 goto error;
    2107             :         }
    2108             : 
    2109           0 :         return 0;
    2110             : 
    2111           0 : error:
    2112           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2113           0 :         spdk_free(ctrlr->tmp_ptr);
    2114           0 :         ctrlr->tmp_ptr = NULL;
    2115           0 :         return rc;
    2116             : }
    2117             : 
    2118             : static void
    2119           0 : nvme_ctrlr_identify_zns_specific_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2120             : {
    2121           0 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2122             : 
    2123           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2124             :                 /* no need to print an error, the controller simply does not support ZNS */
    2125           0 :                 nvme_ctrlr_free_zns_specific_data(ctrlr);
    2126           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
    2127           0 :                                      ctrlr->opts.admin_timeout_ms);
    2128           0 :                 return;
    2129             :         }
    2130             : 
    2131             :         /* A zero zasl value means use mdts */
    2132           0 :         if (ctrlr->cdata_zns->zasl) {
    2133           0 :                 uint32_t max_append = ctrlr->min_page_size * (1 << ctrlr->cdata_zns->zasl);
    2134           0 :                 ctrlr->max_zone_append_size = spdk_min(ctrlr->max_xfer_size, max_append);
    2135             :         } else {
    2136           0 :                 ctrlr->max_zone_append_size = ctrlr->max_xfer_size;
    2137             :         }
    2138             : 
    2139           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
    2140           0 :                              ctrlr->opts.admin_timeout_ms);
    2141             : }
    2142             : 
    2143             : /**
    2144             :  * This function will try to fetch the I/O Command Specific Controller data structure for
    2145             :  * each I/O Command Set supported by SPDK.
    2146             :  *
    2147             :  * If an I/O Command Set is not supported by the controller, "Invalid Field in Command"
    2148             :  * will be returned. Since we are fetching in a exploratively way, getting an error back
    2149             :  * from the controller should not be treated as fatal.
    2150             :  *
    2151             :  * I/O Command Sets not supported by SPDK will be skipped (e.g. Key Value Command Set).
    2152             :  *
    2153             :  * I/O Command Sets without a IOCS specific data structure (i.e. a zero-filled IOCS specific
    2154             :  * data structure) will be skipped (e.g. NVM Command Set, Key Value Command Set).
    2155             :  */
    2156             : static int
    2157          19 : nvme_ctrlr_identify_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
    2158             : {
    2159             :         int     rc;
    2160             : 
    2161          19 :         if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
    2162          19 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
    2163          19 :                                      ctrlr->opts.admin_timeout_ms);
    2164          19 :                 return 0;
    2165             :         }
    2166             : 
    2167             :         /*
    2168             :          * Since SPDK currently only needs to fetch a single Command Set, keep the code here,
    2169             :          * instead of creating multiple NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC substates,
    2170             :          * which would require additional functions and complexity for no good reason.
    2171             :          */
    2172           0 :         assert(!ctrlr->cdata_zns);
    2173           0 :         ctrlr->cdata_zns = spdk_zmalloc(sizeof(*ctrlr->cdata_zns), 64, NULL, SPDK_ENV_SOCKET_ID_ANY,
    2174             :                                         SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
    2175           0 :         if (!ctrlr->cdata_zns) {
    2176           0 :                 rc = -ENOMEM;
    2177           0 :                 goto error;
    2178             :         }
    2179             : 
    2180           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
    2181           0 :                              ctrlr->opts.admin_timeout_ms);
    2182             : 
    2183           0 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR_IOCS, 0, 0, SPDK_NVME_CSI_ZNS,
    2184           0 :                                      ctrlr->cdata_zns, sizeof(*ctrlr->cdata_zns),
    2185             :                                      nvme_ctrlr_identify_zns_specific_done, ctrlr);
    2186           0 :         if (rc != 0) {
    2187           0 :                 goto error;
    2188             :         }
    2189             : 
    2190           0 :         return 0;
    2191             : 
    2192           0 : error:
    2193           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2194           0 :         nvme_ctrlr_free_zns_specific_data(ctrlr);
    2195           0 :         return rc;
    2196             : }
    2197             : 
    2198             : enum nvme_active_ns_state {
    2199             :         NVME_ACTIVE_NS_STATE_IDLE,
    2200             :         NVME_ACTIVE_NS_STATE_PROCESSING,
    2201             :         NVME_ACTIVE_NS_STATE_DONE,
    2202             :         NVME_ACTIVE_NS_STATE_ERROR
    2203             : };
    2204             : 
    2205             : typedef void (*nvme_active_ns_ctx_deleter)(struct nvme_active_ns_ctx *);
    2206             : 
    2207             : struct nvme_active_ns_ctx {
    2208             :         struct spdk_nvme_ctrlr *ctrlr;
    2209             :         uint32_t page_count;
    2210             :         uint32_t next_nsid;
    2211             :         uint32_t *new_ns_list;
    2212             :         nvme_active_ns_ctx_deleter deleter;
    2213             : 
    2214             :         enum nvme_active_ns_state state;
    2215             : };
    2216             : 
    2217             : static struct nvme_active_ns_ctx *
    2218          45 : nvme_active_ns_ctx_create(struct spdk_nvme_ctrlr *ctrlr, nvme_active_ns_ctx_deleter deleter)
    2219             : {
    2220             :         struct nvme_active_ns_ctx *ctx;
    2221          45 :         uint32_t *new_ns_list = NULL;
    2222             : 
    2223          45 :         ctx = calloc(1, sizeof(*ctx));
    2224          45 :         if (!ctx) {
    2225           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate nvme_active_ns_ctx!\n");
    2226           0 :                 return NULL;
    2227             :         }
    2228             : 
    2229          45 :         new_ns_list = spdk_zmalloc(sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
    2230             :                                    NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
    2231          45 :         if (!new_ns_list) {
    2232           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate active_ns_list!\n");
    2233           0 :                 free(ctx);
    2234           0 :                 return NULL;
    2235             :         }
    2236             : 
    2237          45 :         ctx->page_count = 1;
    2238          45 :         ctx->new_ns_list = new_ns_list;
    2239          45 :         ctx->ctrlr = ctrlr;
    2240          45 :         ctx->deleter = deleter;
    2241             : 
    2242          45 :         return ctx;
    2243             : }
    2244             : 
    2245             : static void
    2246          45 : nvme_active_ns_ctx_destroy(struct nvme_active_ns_ctx *ctx)
    2247             : {
    2248          45 :         spdk_free(ctx->new_ns_list);
    2249          45 :         free(ctx);
    2250          45 : }
    2251             : 
    2252             : static int
    2253       18403 : nvme_ctrlr_destruct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    2254             : {
    2255       18403 :         struct spdk_nvme_ns tmp, *ns;
    2256             : 
    2257       18403 :         assert(ctrlr != NULL);
    2258             : 
    2259       18403 :         tmp.id = nsid;
    2260       18403 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    2261       18403 :         if (ns == NULL) {
    2262           0 :                 return -EINVAL;
    2263             :         }
    2264             : 
    2265       18403 :         nvme_ns_destruct(ns);
    2266       18403 :         ns->active = false;
    2267             : 
    2268       18403 :         return 0;
    2269             : }
    2270             : 
    2271             : static int
    2272       12311 : nvme_ctrlr_construct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    2273             : {
    2274             :         struct spdk_nvme_ns *ns;
    2275             : 
    2276       12311 :         if (nsid < 1 || nsid > ctrlr->cdata.nn) {
    2277           0 :                 return -EINVAL;
    2278             :         }
    2279             : 
    2280             :         /* Namespaces are constructed on demand, so simply request it. */
    2281       12311 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2282       12311 :         if (ns == NULL) {
    2283           0 :                 return -ENOMEM;
    2284             :         }
    2285             : 
    2286       12311 :         ns->active = true;
    2287             : 
    2288       12311 :         return 0;
    2289             : }
    2290             : 
    2291             : static void
    2292          44 : nvme_ctrlr_identify_active_ns_swap(struct spdk_nvme_ctrlr *ctrlr, uint32_t *new_ns_list,
    2293             :                                    size_t max_entries)
    2294             : {
    2295          44 :         uint32_t active_ns_count = 0;
    2296             :         size_t i;
    2297             :         uint32_t nsid;
    2298             :         struct spdk_nvme_ns *ns, *tmp_ns;
    2299             :         int rc;
    2300             : 
    2301             :         /* First, remove namespaces that no longer exist */
    2302       15387 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    2303       15343 :                 nsid = new_ns_list[0];
    2304       15343 :                 active_ns_count = 0;
    2305     3547429 :                 while (nsid != 0) {
    2306     3536712 :                         if (nsid == ns->id) {
    2307        4626 :                                 break;
    2308             :                         }
    2309             : 
    2310     3532086 :                         nsid = new_ns_list[active_ns_count++];
    2311             :                 }
    2312             : 
    2313       15343 :                 if (nsid != ns->id) {
    2314             :                         /* Did not find this namespace id in the new list. */
    2315       10717 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was removed\n", ns->id);
    2316       10717 :                         nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
    2317             :                 }
    2318             :         }
    2319             : 
    2320             :         /* Next, add new namespaces */
    2321          44 :         active_ns_count = 0;
    2322       12355 :         for (i = 0; i < max_entries; i++) {
    2323       12355 :                 nsid = new_ns_list[active_ns_count];
    2324             : 
    2325       12355 :                 if (nsid == 0) {
    2326          44 :                         break;
    2327             :                 }
    2328             : 
    2329             :                 /* If the namespace already exists, this will not construct it a second time. */
    2330       12311 :                 rc = nvme_ctrlr_construct_namespace(ctrlr, nsid);
    2331       12311 :                 if (rc != 0) {
    2332             :                         /* We can't easily handle a failure here. But just move on. */
    2333           0 :                         assert(false);
    2334             :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to allocate a namespace object.\n");
    2335             :                         continue;
    2336             :                 }
    2337             : 
    2338       12311 :                 active_ns_count++;
    2339             :         }
    2340             : 
    2341          44 :         ctrlr->active_ns_count = active_ns_count;
    2342          44 : }
    2343             : 
    2344             : static void
    2345          30 : nvme_ctrlr_identify_active_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2346             : {
    2347          30 :         struct nvme_active_ns_ctx *ctx = arg;
    2348          30 :         uint32_t *new_ns_list = NULL;
    2349             : 
    2350          30 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2351           1 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2352           1 :                 goto out;
    2353             :         }
    2354             : 
    2355          29 :         ctx->next_nsid = ctx->new_ns_list[1024 * ctx->page_count - 1];
    2356          29 :         if (ctx->next_nsid == 0) {
    2357          24 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2358          24 :                 goto out;
    2359             :         }
    2360             : 
    2361           5 :         ctx->page_count++;
    2362           5 :         new_ns_list = spdk_realloc(ctx->new_ns_list,
    2363           5 :                                    ctx->page_count * sizeof(struct spdk_nvme_ns_list),
    2364           5 :                                    ctx->ctrlr->page_size);
    2365           5 :         if (!new_ns_list) {
    2366           0 :                 SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
    2367           0 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2368           0 :                 goto out;
    2369             :         }
    2370             : 
    2371           5 :         ctx->new_ns_list = new_ns_list;
    2372           5 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2373           5 :         return;
    2374             : 
    2375          25 : out:
    2376          25 :         if (ctx->deleter) {
    2377           9 :                 ctx->deleter(ctx);
    2378             :         }
    2379             : }
    2380             : 
    2381             : static void
    2382          50 : nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx)
    2383             : {
    2384          50 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    2385             :         uint32_t i;
    2386             :         int rc;
    2387             : 
    2388          50 :         if (ctrlr->cdata.nn == 0) {
    2389          16 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2390          16 :                 goto out;
    2391             :         }
    2392             : 
    2393          34 :         assert(ctx->new_ns_list != NULL);
    2394             : 
    2395             :         /*
    2396             :          * If controller doesn't support active ns list CNS 0x02 dummy up
    2397             :          * an active ns list, i.e. all namespaces report as active
    2398             :          */
    2399          34 :         if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 1, 0) || ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS) {
    2400             :                 uint32_t *new_ns_list;
    2401             : 
    2402             :                 /*
    2403             :                  * Active NS list must always end with zero element.
    2404             :                  * So, we allocate for cdata.nn+1.
    2405             :                  */
    2406           4 :                 ctx->page_count = spdk_divide_round_up(ctrlr->cdata.nn + 1,
    2407             :                                                        sizeof(struct spdk_nvme_ns_list) / sizeof(new_ns_list[0]));
    2408           4 :                 new_ns_list = spdk_realloc(ctx->new_ns_list,
    2409           4 :                                            ctx->page_count * sizeof(struct spdk_nvme_ns_list),
    2410           4 :                                            ctx->ctrlr->page_size);
    2411           4 :                 if (!new_ns_list) {
    2412           0 :                         SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
    2413           0 :                         ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2414           0 :                         goto out;
    2415             :                 }
    2416             : 
    2417           4 :                 ctx->new_ns_list = new_ns_list;
    2418           4 :                 ctx->new_ns_list[ctrlr->cdata.nn] = 0;
    2419        4091 :                 for (i = 0; i < ctrlr->cdata.nn; i++) {
    2420        4087 :                         ctx->new_ns_list[i] = i + 1;
    2421             :                 }
    2422             : 
    2423           4 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2424           4 :                 goto out;
    2425             :         }
    2426             : 
    2427          30 :         ctx->state = NVME_ACTIVE_NS_STATE_PROCESSING;
    2428          30 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, ctx->next_nsid, 0,
    2429          30 :                                      &ctx->new_ns_list[1024 * (ctx->page_count - 1)], sizeof(struct spdk_nvme_ns_list),
    2430             :                                      nvme_ctrlr_identify_active_ns_async_done, ctx);
    2431          30 :         if (rc != 0) {
    2432           0 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2433           0 :                 goto out;
    2434             :         }
    2435             : 
    2436          30 :         return;
    2437             : 
    2438          20 : out:
    2439          20 :         if (ctx->deleter) {
    2440          15 :                 ctx->deleter(ctx);
    2441             :         }
    2442             : }
    2443             : 
    2444             : static void
    2445          24 : _nvme_active_ns_ctx_deleter(struct nvme_active_ns_ctx *ctx)
    2446             : {
    2447          24 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    2448             :         struct spdk_nvme_ns *ns;
    2449             : 
    2450          24 :         if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
    2451           0 :                 nvme_active_ns_ctx_destroy(ctx);
    2452           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2453           0 :                 return;
    2454             :         }
    2455             : 
    2456          24 :         assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
    2457             : 
    2458          28 :         RB_FOREACH(ns, nvme_ns_tree, &ctrlr->ns) {
    2459           4 :                 nvme_ns_free_iocs_specific_data(ns);
    2460             :         }
    2461             : 
    2462          24 :         nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
    2463          24 :         nvme_active_ns_ctx_destroy(ctx);
    2464          24 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS, ctrlr->opts.admin_timeout_ms);
    2465             : }
    2466             : 
    2467             : static void
    2468          24 : _nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    2469             : {
    2470             :         struct nvme_active_ns_ctx *ctx;
    2471             : 
    2472          24 :         ctx = nvme_active_ns_ctx_create(ctrlr, _nvme_active_ns_ctx_deleter);
    2473          24 :         if (!ctx) {
    2474           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2475           0 :                 return;
    2476             :         }
    2477             : 
    2478          24 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
    2479          24 :                              ctrlr->opts.admin_timeout_ms);
    2480          24 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2481             : }
    2482             : 
    2483             : int
    2484          21 : nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    2485             : {
    2486             :         struct nvme_active_ns_ctx *ctx;
    2487             :         int rc;
    2488             : 
    2489          21 :         ctx = nvme_active_ns_ctx_create(ctrlr, NULL);
    2490          21 :         if (!ctx) {
    2491           0 :                 return -ENOMEM;
    2492             :         }
    2493             : 
    2494          21 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2495          21 :         while (ctx->state == NVME_ACTIVE_NS_STATE_PROCESSING) {
    2496           0 :                 rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    2497           0 :                 if (rc < 0) {
    2498           0 :                         ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2499           0 :                         break;
    2500             :                 }
    2501             :         }
    2502             : 
    2503          21 :         if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
    2504           1 :                 nvme_active_ns_ctx_destroy(ctx);
    2505           1 :                 return -ENXIO;
    2506             :         }
    2507             : 
    2508          20 :         assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
    2509          20 :         nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
    2510          20 :         nvme_active_ns_ctx_destroy(ctx);
    2511             : 
    2512          20 :         return 0;
    2513             : }
    2514             : 
    2515             : static void
    2516          21 : nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2517             : {
    2518          21 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2519          21 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2520             :         uint32_t nsid;
    2521             :         int rc;
    2522             : 
    2523          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2524           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2525           0 :                 return;
    2526             :         }
    2527             : 
    2528          21 :         nvme_ns_set_identify_data(ns);
    2529             : 
    2530             :         /* move on to the next active NS */
    2531          21 :         nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2532          21 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2533          21 :         if (ns == NULL) {
    2534           6 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
    2535           6 :                                      ctrlr->opts.admin_timeout_ms);
    2536           6 :                 return;
    2537             :         }
    2538          15 :         ns->ctrlr = ctrlr;
    2539          15 :         ns->id = nsid;
    2540             : 
    2541          15 :         rc = nvme_ctrlr_identify_ns_async(ns);
    2542          15 :         if (rc) {
    2543           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2544             :         }
    2545             : }
    2546             : 
    2547             : static int
    2548          21 : nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
    2549             : {
    2550          21 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2551             :         struct spdk_nvme_ns_data *nsdata;
    2552             : 
    2553          21 :         nsdata = &ns->nsdata;
    2554             : 
    2555          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
    2556          21 :                              ctrlr->opts.admin_timeout_ms);
    2557          21 :         return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id, 0,
    2558             :                                        nsdata, sizeof(*nsdata),
    2559             :                                        nvme_ctrlr_identify_ns_async_done, ns);
    2560             : }
    2561             : 
    2562             : static int
    2563          14 : nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    2564             : {
    2565             :         uint32_t nsid;
    2566             :         struct spdk_nvme_ns *ns;
    2567             :         int rc;
    2568             : 
    2569          14 :         nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2570          14 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2571          14 :         if (ns == NULL) {
    2572             :                 /* No active NS, move on to the next state */
    2573           8 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
    2574           8 :                                      ctrlr->opts.admin_timeout_ms);
    2575           8 :                 return 0;
    2576             :         }
    2577             : 
    2578           6 :         ns->ctrlr = ctrlr;
    2579           6 :         ns->id = nsid;
    2580             : 
    2581           6 :         rc = nvme_ctrlr_identify_ns_async(ns);
    2582           6 :         if (rc) {
    2583           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2584             :         }
    2585             : 
    2586           6 :         return rc;
    2587             : }
    2588             : 
    2589             : static int
    2590           4 : nvme_ctrlr_identify_namespaces_iocs_specific_next(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
    2591             : {
    2592             :         uint32_t nsid;
    2593             :         struct spdk_nvme_ns *ns;
    2594             :         int rc;
    2595             : 
    2596           4 :         if (!prev_nsid) {
    2597           2 :                 nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2598             :         } else {
    2599             :                 /* move on to the next active NS */
    2600           2 :                 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, prev_nsid);
    2601             :         }
    2602             : 
    2603           4 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2604           4 :         if (ns == NULL) {
    2605             :                 /* No first/next active NS, move on to the next state */
    2606           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2607           1 :                                      ctrlr->opts.admin_timeout_ms);
    2608           1 :                 return 0;
    2609             :         }
    2610             : 
    2611             :         /* loop until we find a ns which has (supported) iocs specific data */
    2612          10 :         while (!nvme_ns_has_supported_iocs_specific_data(ns)) {
    2613           8 :                 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2614           8 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2615           8 :                 if (ns == NULL) {
    2616             :                         /* no namespace with (supported) iocs specific data found */
    2617           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2618           1 :                                              ctrlr->opts.admin_timeout_ms);
    2619           1 :                         return 0;
    2620             :                 }
    2621             :         }
    2622             : 
    2623           2 :         rc = nvme_ctrlr_identify_ns_iocs_specific_async(ns);
    2624           2 :         if (rc) {
    2625           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2626             :         }
    2627             : 
    2628           2 :         return rc;
    2629             : }
    2630             : 
    2631             : static void
    2632           0 : nvme_ctrlr_identify_ns_zns_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2633             : {
    2634           0 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2635           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2636             : 
    2637           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2638           0 :                 nvme_ns_free_zns_specific_data(ns);
    2639           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2640           0 :                 return;
    2641             :         }
    2642             : 
    2643           0 :         nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
    2644             : }
    2645             : 
    2646             : static int
    2647           2 : nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns)
    2648             : {
    2649           2 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2650             :         int rc;
    2651             : 
    2652           2 :         switch (ns->csi) {
    2653           2 :         case SPDK_NVME_CSI_ZNS:
    2654           2 :                 break;
    2655           0 :         default:
    2656             :                 /*
    2657             :                  * This switch must handle all cases for which
    2658             :                  * nvme_ns_has_supported_iocs_specific_data() returns true,
    2659             :                  * other cases should never happen.
    2660             :                  */
    2661           0 :                 assert(0);
    2662             :         }
    2663             : 
    2664           2 :         assert(!ns->nsdata_zns);
    2665           2 :         ns->nsdata_zns = spdk_zmalloc(sizeof(*ns->nsdata_zns), 64, NULL, SPDK_ENV_SOCKET_ID_ANY,
    2666             :                                       SPDK_MALLOC_SHARE);
    2667           2 :         if (!ns->nsdata_zns) {
    2668           0 :                 return -ENOMEM;
    2669             :         }
    2670             : 
    2671           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
    2672           2 :                              ctrlr->opts.admin_timeout_ms);
    2673           2 :         rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
    2674           2 :                                      ns->nsdata_zns, sizeof(*ns->nsdata_zns),
    2675             :                                      nvme_ctrlr_identify_ns_zns_specific_async_done, ns);
    2676           2 :         if (rc) {
    2677           1 :                 nvme_ns_free_zns_specific_data(ns);
    2678             :         }
    2679             : 
    2680           2 :         return rc;
    2681             : }
    2682             : 
    2683             : static int
    2684          14 : nvme_ctrlr_identify_namespaces_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
    2685             : {
    2686          14 :         if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
    2687             :                 /* Multi IOCS not supported/enabled, move on to the next state */
    2688          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2689          14 :                                      ctrlr->opts.admin_timeout_ms);
    2690          14 :                 return 0;
    2691             :         }
    2692             : 
    2693           0 :         return nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, 0);
    2694             : }
    2695             : 
    2696             : static void
    2697           6 : nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2698             : {
    2699           6 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2700           6 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2701             :         uint32_t nsid;
    2702             :         int rc;
    2703             : 
    2704           6 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2705             :                 /*
    2706             :                  * Many controllers claim to be compatible with NVMe 1.3, however,
    2707             :                  * they do not implement NS ID Desc List. Therefore, instead of setting
    2708             :                  * the state to NVME_CTRLR_STATE_ERROR, silently ignore the completion
    2709             :                  * error and move on to the next state.
    2710             :                  *
    2711             :                  * The proper way is to create a new quirk for controllers that violate
    2712             :                  * the NVMe 1.3 spec by not supporting NS ID Desc List.
    2713             :                  * (Re-using the NVME_QUIRK_IDENTIFY_CNS quirk is not possible, since
    2714             :                  * it is too generic and was added in order to handle controllers that
    2715             :                  * violate the NVMe 1.1 spec by not supporting ACTIVE LIST).
    2716             :                  */
    2717           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2718           0 :                                      ctrlr->opts.admin_timeout_ms);
    2719           0 :                 return;
    2720             :         }
    2721             : 
    2722           6 :         nvme_ns_set_id_desc_list_data(ns);
    2723             : 
    2724             :         /* move on to the next active NS */
    2725           6 :         nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2726           6 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2727           6 :         if (ns == NULL) {
    2728           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2729           2 :                                      ctrlr->opts.admin_timeout_ms);
    2730           2 :                 return;
    2731             :         }
    2732             : 
    2733           4 :         rc = nvme_ctrlr_identify_id_desc_async(ns);
    2734           4 :         if (rc) {
    2735           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2736             :         }
    2737             : }
    2738             : 
    2739             : static int
    2740           6 : nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
    2741             : {
    2742           6 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2743             : 
    2744           6 :         memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
    2745             : 
    2746           6 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
    2747           6 :                              ctrlr->opts.admin_timeout_ms);
    2748          12 :         return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
    2749           6 :                                        0, ns->id, 0, ns->id_desc_list, sizeof(ns->id_desc_list),
    2750             :                                        nvme_ctrlr_identify_id_desc_async_done, ns);
    2751             : }
    2752             : 
    2753             : static int
    2754          14 : nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    2755             : {
    2756             :         uint32_t nsid;
    2757             :         struct spdk_nvme_ns *ns;
    2758             :         int rc;
    2759             : 
    2760          14 :         if ((ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) &&
    2761          12 :              !(ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS)) ||
    2762           2 :             (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
    2763          12 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
    2764             :                 /* NS ID Desc List not supported, move on to the next state */
    2765          12 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2766          12 :                                      ctrlr->opts.admin_timeout_ms);
    2767          12 :                 return 0;
    2768             :         }
    2769             : 
    2770           2 :         nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2771           2 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2772           2 :         if (ns == NULL) {
    2773             :                 /* No active NS, move on to the next state */
    2774           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2775           0 :                                      ctrlr->opts.admin_timeout_ms);
    2776           0 :                 return 0;
    2777             :         }
    2778             : 
    2779           2 :         rc = nvme_ctrlr_identify_id_desc_async(ns);
    2780           2 :         if (rc) {
    2781           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2782             :         }
    2783             : 
    2784           2 :         return rc;
    2785             : }
    2786             : 
    2787             : static void
    2788          19 : nvme_ctrlr_update_nvmf_ioccsz(struct spdk_nvme_ctrlr *ctrlr)
    2789             : {
    2790          19 :         if (spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
    2791           4 :                 if (ctrlr->cdata.nvmf_specific.ioccsz < 4) {
    2792           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Incorrect IOCCSZ %u, the minimum value should be 4\n",
    2793             :                                           ctrlr->cdata.nvmf_specific.ioccsz);
    2794           0 :                         ctrlr->cdata.nvmf_specific.ioccsz = 4;
    2795           0 :                         assert(0);
    2796             :                 }
    2797           4 :                 ctrlr->ioccsz_bytes = ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd);
    2798           4 :                 ctrlr->icdoff = ctrlr->cdata.nvmf_specific.icdoff;
    2799             :         }
    2800          19 : }
    2801             : 
    2802             : static void
    2803          19 : nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2804             : {
    2805             :         uint32_t cq_allocated, sq_allocated, min_allocated, i;
    2806          19 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2807             : 
    2808          19 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2809           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Number of Queues failed!\n");
    2810           0 :                 ctrlr->opts.num_io_queues = 0;
    2811             :         } else {
    2812             :                 /*
    2813             :                  * Data in cdw0 is 0-based.
    2814             :                  * Lower 16-bits indicate number of submission queues allocated.
    2815             :                  * Upper 16-bits indicate number of completion queues allocated.
    2816             :                  */
    2817          19 :                 sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
    2818          19 :                 cq_allocated = (cpl->cdw0 >> 16) + 1;
    2819             : 
    2820             :                 /*
    2821             :                  * For 1:1 queue mapping, set number of allocated queues to be minimum of
    2822             :                  * submission and completion queues.
    2823             :                  */
    2824          19 :                 min_allocated = spdk_min(sq_allocated, cq_allocated);
    2825             : 
    2826             :                 /* Set number of queues to be minimum of requested and actually allocated. */
    2827          19 :                 ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
    2828             :         }
    2829             : 
    2830          19 :         ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
    2831          19 :         if (ctrlr->free_io_qids == NULL) {
    2832           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2833           0 :                 return;
    2834             :         }
    2835             : 
    2836             :         /* Initialize list of free I/O queue IDs. QID 0 is the admin queue (implicitly allocated). */
    2837          69 :         for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
    2838          50 :                 spdk_nvme_ctrlr_free_qid(ctrlr, i);
    2839             :         }
    2840             : 
    2841          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
    2842          19 :                              ctrlr->opts.admin_timeout_ms);
    2843             : }
    2844             : 
    2845             : static int
    2846          19 : nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
    2847             : {
    2848             :         int rc;
    2849             : 
    2850          19 :         if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
    2851           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "Limiting requested num_io_queues %u to max %d\n",
    2852             :                                      ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
    2853           0 :                 ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
    2854          19 :         } else if (ctrlr->opts.num_io_queues < 1) {
    2855          13 :                 NVME_CTRLR_NOTICELOG(ctrlr, "Requested num_io_queues 0, increasing to 1\n");
    2856          13 :                 ctrlr->opts.num_io_queues = 1;
    2857             :         }
    2858             : 
    2859          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
    2860          19 :                              ctrlr->opts.admin_timeout_ms);
    2861             : 
    2862          19 :         rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
    2863             :                                            nvme_ctrlr_set_num_queues_done, ctrlr);
    2864          19 :         if (rc != 0) {
    2865           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2866           0 :                 return rc;
    2867             :         }
    2868             : 
    2869          19 :         return 0;
    2870             : }
    2871             : 
    2872             : static void
    2873           3 : nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2874             : {
    2875             :         uint32_t keep_alive_interval_us;
    2876           3 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2877             : 
    2878           3 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2879           2 :                 if ((cpl->status.sct == SPDK_NVME_SCT_GENERIC) &&
    2880           2 :                     (cpl->status.sc == SPDK_NVME_SC_INVALID_FIELD)) {
    2881           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Keep alive timeout Get Feature is not supported\n");
    2882             :                 } else {
    2883           1 :                         NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: SC %x SCT %x\n",
    2884             :                                           cpl->status.sc, cpl->status.sct);
    2885           1 :                         ctrlr->opts.keep_alive_timeout_ms = 0;
    2886           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2887           1 :                         return;
    2888             :                 }
    2889             :         } else {
    2890           1 :                 if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
    2891           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Controller adjusted keep alive timeout to %u ms\n",
    2892             :                                             cpl->cdw0);
    2893             :                 }
    2894             : 
    2895           1 :                 ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
    2896             :         }
    2897             : 
    2898           2 :         if (ctrlr->opts.keep_alive_timeout_ms == 0) {
    2899           0 :                 ctrlr->keep_alive_interval_ticks = 0;
    2900             :         } else {
    2901           2 :                 keep_alive_interval_us = ctrlr->opts.keep_alive_timeout_ms * 1000 / 2;
    2902             : 
    2903           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Sending keep alive every %u us\n", keep_alive_interval_us);
    2904             : 
    2905           2 :                 ctrlr->keep_alive_interval_ticks = (keep_alive_interval_us * spdk_get_ticks_hz()) /
    2906             :                                                    UINT64_C(1000000);
    2907             : 
    2908             :                 /* Schedule the first Keep Alive to be sent as soon as possible. */
    2909           2 :                 ctrlr->next_keep_alive_tick = spdk_get_ticks();
    2910             :         }
    2911             : 
    2912           2 :         if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    2913           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    2914             :         } else {
    2915           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    2916           2 :                                      ctrlr->opts.admin_timeout_ms);
    2917             :         }
    2918             : }
    2919             : 
    2920             : static int
    2921          22 : nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
    2922             : {
    2923             :         int rc;
    2924             : 
    2925          22 :         if (ctrlr->opts.keep_alive_timeout_ms == 0) {
    2926          19 :                 if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    2927           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    2928             :                 } else {
    2929          19 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    2930          19 :                                              ctrlr->opts.admin_timeout_ms);
    2931             :                 }
    2932          19 :                 return 0;
    2933             :         }
    2934             : 
    2935             :         /* Note: Discovery controller identify data does not populate KAS according to spec. */
    2936           3 :         if (!spdk_nvme_ctrlr_is_discovery(ctrlr) && ctrlr->cdata.kas == 0) {
    2937           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Controller KAS is 0 - not enabling Keep Alive\n");
    2938           0 :                 ctrlr->opts.keep_alive_timeout_ms = 0;
    2939           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    2940           0 :                                      ctrlr->opts.admin_timeout_ms);
    2941           0 :                 return 0;
    2942             :         }
    2943             : 
    2944           3 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
    2945           3 :                              ctrlr->opts.admin_timeout_ms);
    2946             : 
    2947             :         /* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
    2948           3 :         rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
    2949             :                                              nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
    2950           3 :         if (rc != 0) {
    2951           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: %d\n", rc);
    2952           0 :                 ctrlr->opts.keep_alive_timeout_ms = 0;
    2953           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2954           0 :                 return rc;
    2955             :         }
    2956             : 
    2957           3 :         return 0;
    2958             : }
    2959             : 
    2960             : static void
    2961           0 : nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2962             : {
    2963           0 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2964             : 
    2965           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2966             :                 /*
    2967             :                  * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
    2968             :                  * is optional.
    2969             :                  */
    2970           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
    2971             :                                    cpl->status.sc, cpl->status.sct);
    2972             :         } else {
    2973           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Set Features - Host ID was successful\n");
    2974             :         }
    2975             : 
    2976           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    2977           0 : }
    2978             : 
    2979             : static int
    2980          14 : nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
    2981             : {
    2982             :         uint8_t *host_id;
    2983             :         uint32_t host_id_size;
    2984             :         int rc;
    2985             : 
    2986          14 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    2987             :                 /*
    2988             :                  * NVMe-oF sends the host ID during Connect and doesn't allow
    2989             :                  * Set Features - Host Identifier after Connect, so we don't need to do anything here.
    2990             :                  */
    2991          14 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "NVMe-oF transport - not sending Set Features - Host ID\n");
    2992          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    2993          14 :                 return 0;
    2994             :         }
    2995             : 
    2996           0 :         if (ctrlr->cdata.ctratt.bits.host_id_exhid_supported) {
    2997           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Using 128-bit extended host identifier\n");
    2998           0 :                 host_id = ctrlr->opts.extended_host_id;
    2999           0 :                 host_id_size = sizeof(ctrlr->opts.extended_host_id);
    3000             :         } else {
    3001           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Using 64-bit host identifier\n");
    3002           0 :                 host_id = ctrlr->opts.host_id;
    3003           0 :                 host_id_size = sizeof(ctrlr->opts.host_id);
    3004             :         }
    3005             : 
    3006             :         /* If the user specified an all-zeroes host identifier, don't send the command. */
    3007           0 :         if (spdk_mem_all_zero(host_id, host_id_size)) {
    3008           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "User did not specify host ID - not sending Set Features - Host ID\n");
    3009           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3010           0 :                 return 0;
    3011             :         }
    3012             : 
    3013           0 :         SPDK_LOGDUMP(nvme, "host_id", host_id, host_id_size);
    3014             : 
    3015           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
    3016           0 :                              ctrlr->opts.admin_timeout_ms);
    3017             : 
    3018           0 :         rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
    3019           0 :         if (rc != 0) {
    3020           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Host ID failed: %d\n", rc);
    3021           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3022           0 :                 return rc;
    3023             :         }
    3024             : 
    3025           0 :         return 0;
    3026             : }
    3027             : 
    3028             : void
    3029           4 : nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    3030             : {
    3031             :         uint32_t nsid;
    3032             :         struct spdk_nvme_ns *ns;
    3033             : 
    3034          19 :         for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    3035          15 :              nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
    3036          15 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    3037          15 :                 nvme_ns_construct(ns, nsid, ctrlr);
    3038             :         }
    3039           4 : }
    3040             : 
    3041             : static int
    3042           4 : nvme_ctrlr_clear_changed_ns_log(struct spdk_nvme_ctrlr *ctrlr)
    3043             : {
    3044             :         struct nvme_completion_poll_status      *status;
    3045           4 :         int             rc = -ENOMEM;
    3046           4 :         char            *buffer = NULL;
    3047             :         uint32_t        nsid;
    3048           4 :         size_t          buf_size = (SPDK_NVME_MAX_CHANGED_NAMESPACES * sizeof(uint32_t));
    3049             : 
    3050           4 :         if (ctrlr->opts.disable_read_changed_ns_list_log_page) {
    3051           0 :                 return 0;
    3052             :         }
    3053             : 
    3054           4 :         buffer = spdk_dma_zmalloc(buf_size, 4096, NULL);
    3055           4 :         if (!buffer) {
    3056           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate buffer for getting "
    3057             :                                   "changed ns log.\n");
    3058           0 :                 return rc;
    3059             :         }
    3060             : 
    3061           4 :         status = calloc(1, sizeof(*status));
    3062           4 :         if (!status) {
    3063           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    3064           0 :                 goto free_buffer;
    3065             :         }
    3066             : 
    3067           4 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr,
    3068             :                                               SPDK_NVME_LOG_CHANGED_NS_LIST,
    3069             :                                               SPDK_NVME_GLOBAL_NS_TAG,
    3070             :                                               buffer, buf_size, 0,
    3071             :                                               nvme_completion_poll_cb, status);
    3072             : 
    3073           4 :         if (rc) {
    3074           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_get_log_page() failed: rc=%d\n", rc);
    3075           0 :                 free(status);
    3076           0 :                 goto free_buffer;
    3077             :         }
    3078             : 
    3079           4 :         rc = nvme_wait_for_completion_timeout(ctrlr->adminq, status,
    3080           4 :                                               ctrlr->opts.admin_timeout_ms * 1000);
    3081           4 :         if (!status->timed_out) {
    3082           4 :                 free(status);
    3083             :         }
    3084             : 
    3085           4 :         if (rc) {
    3086           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "wait for spdk_nvme_ctrlr_cmd_get_log_page failed: rc=%d\n", rc);
    3087           0 :                 goto free_buffer;
    3088             :         }
    3089             : 
    3090             :         /* only check the case of overflow. */
    3091           4 :         nsid = from_le32(buffer);
    3092           4 :         if (nsid == 0xffffffffu) {
    3093           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "changed ns log overflowed.\n");
    3094             :         }
    3095             : 
    3096           4 : free_buffer:
    3097           4 :         spdk_dma_free(buffer);
    3098           4 :         return rc;
    3099             : }
    3100             : 
    3101             : static void
    3102           5 : nvme_ctrlr_process_async_event(struct spdk_nvme_ctrlr *ctrlr,
    3103             :                                const struct spdk_nvme_cpl *cpl)
    3104             : {
    3105             :         union spdk_nvme_async_event_completion event;
    3106             :         struct spdk_nvme_ctrlr_process *active_proc;
    3107             :         int rc;
    3108             : 
    3109           5 :         event.raw = cpl->cdw0;
    3110             : 
    3111           5 :         if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
    3112           5 :             (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
    3113           4 :                 nvme_ctrlr_clear_changed_ns_log(ctrlr);
    3114             : 
    3115           4 :                 rc = nvme_ctrlr_identify_active_ns(ctrlr);
    3116           4 :                 if (rc) {
    3117           0 :                         return;
    3118             :                 }
    3119           4 :                 nvme_ctrlr_update_namespaces(ctrlr);
    3120           4 :                 nvme_io_msg_ctrlr_update(ctrlr);
    3121             :         }
    3122             : 
    3123           5 :         if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
    3124           5 :             (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE)) {
    3125           1 :                 if (!ctrlr->opts.disable_read_ana_log_page) {
    3126           1 :                         rc = nvme_ctrlr_update_ana_log_page(ctrlr);
    3127           1 :                         if (rc) {
    3128           0 :                                 return;
    3129             :                         }
    3130           1 :                         nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
    3131             :                                                       ctrlr);
    3132             :                 }
    3133             :         }
    3134             : 
    3135           5 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3136           5 :         if (active_proc && active_proc->aer_cb_fn) {
    3137           3 :                 active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
    3138             :         }
    3139             : }
    3140             : 
    3141             : static void
    3142           5 : nvme_ctrlr_queue_async_event(struct spdk_nvme_ctrlr *ctrlr,
    3143             :                              const struct spdk_nvme_cpl *cpl)
    3144             : {
    3145             :         struct  spdk_nvme_ctrlr_aer_completion_list *nvme_event;
    3146             :         struct spdk_nvme_ctrlr_process *proc;
    3147             : 
    3148             :         /* Add async event to each process objects event list */
    3149          10 :         TAILQ_FOREACH(proc, &ctrlr->active_procs, tailq) {
    3150             :                 /* Must be shared memory so other processes can access */
    3151           5 :                 nvme_event = spdk_zmalloc(sizeof(*nvme_event), 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
    3152           5 :                 if (!nvme_event) {
    3153           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Alloc nvme event failed, ignore the event\n");
    3154           0 :                         return;
    3155             :                 }
    3156           5 :                 nvme_event->cpl = *cpl;
    3157             : 
    3158           5 :                 STAILQ_INSERT_TAIL(&proc->async_events, nvme_event, link);
    3159             :         }
    3160             : }
    3161             : 
    3162             : static void
    3163           5 : nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr)
    3164             : {
    3165             :         struct  spdk_nvme_ctrlr_aer_completion_list  *nvme_event, *nvme_event_tmp;
    3166             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3167             : 
    3168           5 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3169             : 
    3170          10 :         STAILQ_FOREACH_SAFE(nvme_event, &active_proc->async_events, link, nvme_event_tmp) {
    3171           5 :                 STAILQ_REMOVE(&active_proc->async_events, nvme_event,
    3172             :                               spdk_nvme_ctrlr_aer_completion_list, link);
    3173           5 :                 nvme_ctrlr_process_async_event(ctrlr, &nvme_event->cpl);
    3174           5 :                 spdk_free(nvme_event);
    3175             : 
    3176             :         }
    3177           5 : }
    3178             : 
    3179             : static void
    3180           5 : nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
    3181             : {
    3182           5 :         struct nvme_async_event_request *aer = arg;
    3183           5 :         struct spdk_nvme_ctrlr          *ctrlr = aer->ctrlr;
    3184             : 
    3185           5 :         if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
    3186           5 :             cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
    3187             :                 /*
    3188             :                  *  This is simulated when controller is being shut down, to
    3189             :                  *  effectively abort outstanding asynchronous event requests
    3190             :                  *  and make sure all memory is freed.  Do not repost the
    3191             :                  *  request in this case.
    3192             :                  */
    3193           0 :                 return;
    3194             :         }
    3195             : 
    3196           5 :         if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
    3197           0 :             cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
    3198             :                 /*
    3199             :                  *  SPDK will only send as many AERs as the device says it supports,
    3200             :                  *  so this status code indicates an out-of-spec device.  Do not repost
    3201             :                  *  the request in this case.
    3202             :                  */
    3203           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Controller appears out-of-spec for asynchronous event request\n"
    3204             :                                   "handling.  Do not repost this AER.\n");
    3205           0 :                 return;
    3206             :         }
    3207             : 
    3208             :         /* Add the events to the list */
    3209           5 :         nvme_ctrlr_queue_async_event(ctrlr, cpl);
    3210             : 
    3211             :         /* If the ctrlr was removed or in the destruct state, we should not send aer again */
    3212           5 :         if (ctrlr->is_removed || ctrlr->is_destructed) {
    3213           0 :                 return;
    3214             :         }
    3215             : 
    3216             :         /*
    3217             :          * Repost another asynchronous event request to replace the one
    3218             :          *  that just completed.
    3219             :          */
    3220           5 :         if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
    3221             :                 /*
    3222             :                  * We can't do anything to recover from a failure here,
    3223             :                  * so just print a warning message and leave the AER unsubmitted.
    3224             :                  */
    3225           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "resubmitting AER failed!\n");
    3226             :         }
    3227             : }
    3228             : 
    3229             : static int
    3230          24 : nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
    3231             :                                     struct nvme_async_event_request *aer)
    3232             : {
    3233             :         struct nvme_request *req;
    3234             : 
    3235          24 :         aer->ctrlr = ctrlr;
    3236          24 :         req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
    3237          24 :         aer->req = req;
    3238          24 :         if (req == NULL) {
    3239           0 :                 return -1;
    3240             :         }
    3241             : 
    3242          24 :         req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
    3243          24 :         return nvme_ctrlr_submit_admin_request(ctrlr, req);
    3244             : }
    3245             : 
    3246             : static void
    3247          19 : nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3248             : {
    3249             :         struct nvme_async_event_request         *aer;
    3250             :         int                                     rc;
    3251             :         uint32_t                                i;
    3252          19 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3253             : 
    3254          19 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3255           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "nvme_ctrlr_configure_aer failed!\n");
    3256           0 :                 ctrlr->num_aers = 0;
    3257             :         } else {
    3258             :                 /* aerl is a zero-based value, so we need to add 1 here. */
    3259          19 :                 ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
    3260             :         }
    3261             : 
    3262          38 :         for (i = 0; i < ctrlr->num_aers; i++) {
    3263          19 :                 aer = &ctrlr->aer[i];
    3264          19 :                 rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
    3265          19 :                 if (rc) {
    3266           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_construct_and_submit_aer failed!\n");
    3267           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3268           0 :                         return;
    3269             :                 }
    3270             :         }
    3271          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, ctrlr->opts.admin_timeout_ms);
    3272             : }
    3273             : 
    3274             : static int
    3275          19 : nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
    3276             : {
    3277             :         union spdk_nvme_feat_async_event_configuration  config;
    3278             :         int                                             rc;
    3279             : 
    3280          19 :         config.raw = 0;
    3281             : 
    3282          19 :         if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3283           0 :                 config.bits.discovery_log_change_notice = 1;
    3284             :         } else {
    3285          19 :                 config.bits.crit_warn.bits.available_spare = 1;
    3286          19 :                 config.bits.crit_warn.bits.temperature = 1;
    3287          19 :                 config.bits.crit_warn.bits.device_reliability = 1;
    3288          19 :                 config.bits.crit_warn.bits.read_only = 1;
    3289          19 :                 config.bits.crit_warn.bits.volatile_memory_backup = 1;
    3290             : 
    3291          19 :                 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
    3292           4 :                         if (ctrlr->cdata.oaes.ns_attribute_notices) {
    3293           0 :                                 config.bits.ns_attr_notice = 1;
    3294             :                         }
    3295           4 :                         if (ctrlr->cdata.oaes.fw_activation_notices) {
    3296           0 :                                 config.bits.fw_activation_notice = 1;
    3297             :                         }
    3298           4 :                         if (ctrlr->cdata.oaes.ana_change_notices) {
    3299           0 :                                 config.bits.ana_change_notice = 1;
    3300             :                         }
    3301             :                 }
    3302          19 :                 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
    3303           0 :                         config.bits.telemetry_log_notice = 1;
    3304             :                 }
    3305             :         }
    3306             : 
    3307          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
    3308          19 :                              ctrlr->opts.admin_timeout_ms);
    3309             : 
    3310          19 :         rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
    3311             :                         nvme_ctrlr_configure_aer_done,
    3312             :                         ctrlr);
    3313          19 :         if (rc != 0) {
    3314           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3315           0 :                 return rc;
    3316             :         }
    3317             : 
    3318          19 :         return 0;
    3319             : }
    3320             : 
    3321             : struct spdk_nvme_ctrlr_process *
    3322          61 : nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
    3323             : {
    3324             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3325             : 
    3326          61 :         TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
    3327          12 :                 if (active_proc->pid == pid) {
    3328          12 :                         return active_proc;
    3329             :                 }
    3330             :         }
    3331             : 
    3332          49 :         return NULL;
    3333             : }
    3334             : 
    3335             : struct spdk_nvme_ctrlr_process *
    3336          57 : nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
    3337             : {
    3338          57 :         return nvme_ctrlr_get_process(ctrlr, getpid());
    3339             : }
    3340             : 
    3341             : /**
    3342             :  * This function will be called when a process is using the controller.
    3343             :  *  1. For the primary process, it is called when constructing the controller.
    3344             :  *  2. For the secondary process, it is called at probing the controller.
    3345             :  * Note: will check whether the process is already added for the same process.
    3346             :  */
    3347             : int
    3348           4 : nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
    3349             : {
    3350             :         struct spdk_nvme_ctrlr_process  *ctrlr_proc;
    3351           4 :         pid_t                           pid = getpid();
    3352             : 
    3353             :         /* Check whether the process is already added or not */
    3354           4 :         if (nvme_ctrlr_get_process(ctrlr, pid)) {
    3355           0 :                 return 0;
    3356             :         }
    3357             : 
    3358             :         /* Initialize the per process properties for this ctrlr */
    3359           4 :         ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
    3360             :                                   64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
    3361           4 :         if (ctrlr_proc == NULL) {
    3362           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "failed to allocate memory to track the process props\n");
    3363             : 
    3364           0 :                 return -1;
    3365             :         }
    3366             : 
    3367           4 :         ctrlr_proc->is_primary = spdk_process_is_primary();
    3368           4 :         ctrlr_proc->pid = pid;
    3369           4 :         STAILQ_INIT(&ctrlr_proc->active_reqs);
    3370           4 :         ctrlr_proc->devhandle = devhandle;
    3371           4 :         ctrlr_proc->ref = 0;
    3372           4 :         TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
    3373           4 :         STAILQ_INIT(&ctrlr_proc->async_events);
    3374             : 
    3375           4 :         TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
    3376             : 
    3377           4 :         return 0;
    3378             : }
    3379             : 
    3380             : /**
    3381             :  * This function will be called when the process detaches the controller.
    3382             :  * Note: the ctrlr_lock must be held when calling this function.
    3383             :  */
    3384             : static void
    3385           1 : nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
    3386             :                           struct spdk_nvme_ctrlr_process *proc)
    3387             : {
    3388             :         struct spdk_nvme_qpair  *qpair, *tmp_qpair;
    3389             : 
    3390           1 :         assert(STAILQ_EMPTY(&proc->active_reqs));
    3391             : 
    3392           1 :         TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
    3393           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    3394             :         }
    3395             : 
    3396           1 :         TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
    3397             : 
    3398           1 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    3399           1 :                 spdk_pci_device_detach(proc->devhandle);
    3400             :         }
    3401             : 
    3402           1 :         spdk_free(proc);
    3403           1 : }
    3404             : 
    3405             : /**
    3406             :  * This function will be called when the process exited unexpectedly
    3407             :  *  in order to free any incomplete nvme request, allocated IO qpairs
    3408             :  *  and allocated memory.
    3409             :  * Note: the ctrlr_lock must be held when calling this function.
    3410             :  */
    3411             : static void
    3412           0 : nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
    3413             : {
    3414             :         struct nvme_request     *req, *tmp_req;
    3415             :         struct spdk_nvme_qpair  *qpair, *tmp_qpair;
    3416             :         struct spdk_nvme_ctrlr_aer_completion_list *event;
    3417             : 
    3418           0 :         STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
    3419           0 :                 STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
    3420             : 
    3421           0 :                 assert(req->pid == proc->pid);
    3422           0 :                 nvme_cleanup_user_req(req);
    3423           0 :                 nvme_free_request(req);
    3424             :         }
    3425             : 
    3426             :         /* Remove async event from each process objects event list */
    3427           0 :         while (!STAILQ_EMPTY(&proc->async_events)) {
    3428           0 :                 event = STAILQ_FIRST(&proc->async_events);
    3429           0 :                 STAILQ_REMOVE_HEAD(&proc->async_events, link);
    3430           0 :                 spdk_free(event);
    3431             :         }
    3432             : 
    3433           0 :         TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
    3434           0 :                 TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
    3435             : 
    3436             :                 /*
    3437             :                  * The process may have been killed while some qpairs were in their
    3438             :                  *  completion context.  Clear that flag here to allow these IO
    3439             :                  *  qpairs to be deleted.
    3440             :                  */
    3441           0 :                 qpair->in_completion_context = 0;
    3442             : 
    3443           0 :                 qpair->no_deletion_notification_needed = 1;
    3444             : 
    3445           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    3446             :         }
    3447             : 
    3448           0 :         spdk_free(proc);
    3449           0 : }
    3450             : 
    3451             : /**
    3452             :  * This function will be called when destructing the controller.
    3453             :  *  1. There is no more admin request on this controller.
    3454             :  *  2. Clean up any left resource allocation when its associated process is gone.
    3455             :  */
    3456             : void
    3457          49 : nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
    3458             : {
    3459             :         struct spdk_nvme_ctrlr_process  *active_proc, *tmp;
    3460             : 
    3461             :         /* Free all the processes' properties and make sure no pending admin IOs */
    3462          52 :         TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
    3463           3 :                 TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
    3464             : 
    3465           3 :                 assert(STAILQ_EMPTY(&active_proc->active_reqs));
    3466             : 
    3467           3 :                 spdk_free(active_proc);
    3468             :         }
    3469          49 : }
    3470             : 
    3471             : /**
    3472             :  * This function will be called when any other process attaches or
    3473             :  *  detaches the controller in order to cleanup those unexpectedly
    3474             :  *  terminated processes.
    3475             :  * Note: the ctrlr_lock must be held when calling this function.
    3476             :  */
    3477             : static int
    3478           0 : nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
    3479             : {
    3480             :         struct spdk_nvme_ctrlr_process  *active_proc, *tmp;
    3481           0 :         int                             active_proc_count = 0;
    3482             : 
    3483           0 :         TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
    3484           0 :                 if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
    3485           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "process %d terminated unexpected\n", active_proc->pid);
    3486             : 
    3487           0 :                         TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
    3488             : 
    3489           0 :                         nvme_ctrlr_cleanup_process(active_proc);
    3490             :                 } else {
    3491           0 :                         active_proc_count++;
    3492             :                 }
    3493             :         }
    3494             : 
    3495           0 :         return active_proc_count;
    3496             : }
    3497             : 
    3498             : void
    3499           0 : nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
    3500             : {
    3501             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3502             : 
    3503           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    3504             : 
    3505           0 :         nvme_ctrlr_remove_inactive_proc(ctrlr);
    3506             : 
    3507           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3508           0 :         if (active_proc) {
    3509           0 :                 active_proc->ref++;
    3510             :         }
    3511             : 
    3512           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    3513           0 : }
    3514             : 
    3515             : void
    3516           0 : nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
    3517             : {
    3518             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3519             :         int                             proc_count;
    3520             : 
    3521           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    3522             : 
    3523           0 :         proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
    3524             : 
    3525           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3526           0 :         if (active_proc) {
    3527           0 :                 active_proc->ref--;
    3528           0 :                 assert(active_proc->ref >= 0);
    3529             : 
    3530             :                 /*
    3531             :                  * The last active process will be removed at the end of
    3532             :                  * the destruction of the controller.
    3533             :                  */
    3534           0 :                 if (active_proc->ref == 0 && proc_count != 1) {
    3535           0 :                         nvme_ctrlr_remove_process(ctrlr, active_proc);
    3536             :                 }
    3537             :         }
    3538             : 
    3539           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    3540           0 : }
    3541             : 
    3542             : int
    3543           0 : nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
    3544             : {
    3545             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3546           0 :         int                             ref = 0;
    3547             : 
    3548           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    3549             : 
    3550           0 :         nvme_ctrlr_remove_inactive_proc(ctrlr);
    3551             : 
    3552           0 :         TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
    3553           0 :                 ref += active_proc->ref;
    3554             :         }
    3555             : 
    3556           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    3557             : 
    3558           0 :         return ref;
    3559             : }
    3560             : 
    3561             : /**
    3562             :  *  Get the PCI device handle which is only visible to its associated process.
    3563             :  */
    3564             : struct spdk_pci_device *
    3565           0 : nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
    3566             : {
    3567             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3568           0 :         struct spdk_pci_device          *devhandle = NULL;
    3569             : 
    3570           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    3571             : 
    3572           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3573           0 :         if (active_proc) {
    3574           0 :                 devhandle = active_proc->devhandle;
    3575             :         }
    3576             : 
    3577           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    3578             : 
    3579           0 :         return devhandle;
    3580             : }
    3581             : 
    3582             : static void
    3583          21 : nvme_ctrlr_process_init_vs_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3584             : {
    3585          21 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3586             : 
    3587          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3588           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the VS register\n");
    3589           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3590           0 :                 return;
    3591             :         }
    3592             : 
    3593          21 :         assert(value <= UINT32_MAX);
    3594          21 :         ctrlr->vs.raw = (uint32_t)value;
    3595          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP, NVME_TIMEOUT_INFINITE);
    3596             : }
    3597             : 
    3598             : static void
    3599          21 : nvme_ctrlr_process_init_cap_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3600             : {
    3601          21 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3602             : 
    3603          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3604           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CAP register\n");
    3605           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3606           0 :                 return;
    3607             :         }
    3608             : 
    3609          21 :         ctrlr->cap.raw = value;
    3610          21 :         nvme_ctrlr_init_cap(ctrlr);
    3611          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
    3612             : }
    3613             : 
    3614             : static void
    3615          22 : nvme_ctrlr_process_init_check_en(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3616             : {
    3617          22 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3618             :         enum nvme_ctrlr_state state;
    3619             : 
    3620          22 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3621           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    3622           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3623           0 :                 return;
    3624             :         }
    3625             : 
    3626          22 :         assert(value <= UINT32_MAX);
    3627          22 :         ctrlr->process_init_cc.raw = (uint32_t)value;
    3628             : 
    3629          22 :         if (ctrlr->process_init_cc.bits.en) {
    3630           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1\n");
    3631           2 :                 state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1;
    3632             :         } else {
    3633          20 :                 state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
    3634             :         }
    3635             : 
    3636          22 :         nvme_ctrlr_set_state(ctrlr, state, nvme_ctrlr_get_ready_timeout(ctrlr));
    3637             : }
    3638             : 
    3639             : static void
    3640           2 : nvme_ctrlr_process_init_set_en_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3641             : {
    3642           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3643             : 
    3644           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3645           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write the CC register\n");
    3646           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3647           0 :                 return;
    3648             :         }
    3649             : 
    3650             :         /*
    3651             :          * Wait 2.5 seconds before accessing PCI registers.
    3652             :          * Not using sleep() to avoid blocking other controller's initialization.
    3653             :          */
    3654           2 :         if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
    3655           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Applying quirk: delay 2.5 seconds before reading registers\n");
    3656           0 :                 ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
    3657             :         }
    3658             : 
    3659           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3660             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    3661             : }
    3662             : 
    3663             : static void
    3664           2 : nvme_ctrlr_process_init_set_en_0_read_cc(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3665             : {
    3666           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3667             :         union spdk_nvme_cc_register cc;
    3668             :         int rc;
    3669             : 
    3670           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3671           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    3672           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3673           0 :                 return;
    3674             :         }
    3675             : 
    3676           2 :         assert(value <= UINT32_MAX);
    3677           2 :         cc.raw = (uint32_t)value;
    3678           2 :         cc.bits.en = 0;
    3679           2 :         ctrlr->process_init_cc.raw = cc.raw;
    3680             : 
    3681           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
    3682             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    3683             : 
    3684           2 :         rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_process_init_set_en_0, ctrlr);
    3685           2 :         if (rc != 0) {
    3686           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
    3687           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3688             :         }
    3689             : }
    3690             : 
    3691             : static void
    3692           2 : nvme_ctrlr_process_init_wait_for_ready_1(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3693             : {
    3694           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3695             :         union spdk_nvme_csts_register csts;
    3696             : 
    3697           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3698             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3699             :                  * temporarily. Allow for this case.
    3700             :                  */
    3701           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3702           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3703           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
    3704             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3705             :                 } else {
    3706           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3707           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3708             :                 }
    3709             : 
    3710           0 :                 return;
    3711             :         }
    3712             : 
    3713           2 :         assert(value <= UINT32_MAX);
    3714           2 :         csts.raw = (uint32_t)value;
    3715           2 :         if (csts.bits.rdy == 1 || csts.bits.cfs == 1) {
    3716           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0,
    3717             :                                      nvme_ctrlr_get_ready_timeout(ctrlr));
    3718             :         } else {
    3719           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
    3720           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
    3721             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3722             :         }
    3723             : }
    3724             : 
    3725             : static void
    3726          22 : nvme_ctrlr_process_init_wait_for_ready_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3727             : {
    3728          22 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3729             :         union spdk_nvme_csts_register csts;
    3730             : 
    3731          22 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3732             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3733             :                  * temporarily. Allow for this case.
    3734             :                  */
    3735           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3736           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3737           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3738             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3739             :                 } else {
    3740           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3741           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3742             :                 }
    3743             : 
    3744           0 :                 return;
    3745             :         }
    3746             : 
    3747          22 :         assert(value <= UINT32_MAX);
    3748          22 :         csts.raw = (uint32_t)value;
    3749          22 :         if (csts.bits.rdy == 0) {
    3750          22 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 0 && CSTS.RDY = 0\n");
    3751          22 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLED,
    3752             :                                      nvme_ctrlr_get_ready_timeout(ctrlr));
    3753             :         } else {
    3754           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3755             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3756             :         }
    3757             : }
    3758             : 
    3759             : static void
    3760           9 : nvme_ctrlr_process_init_enable_wait_for_ready_1(void *ctx, uint64_t value,
    3761             :                 const struct spdk_nvme_cpl *cpl)
    3762             : {
    3763           9 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3764             :         union spdk_nvme_csts_register csts;
    3765             : 
    3766           9 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3767             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3768             :                  * temporarily. Allow for this case.
    3769             :                  */
    3770           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3771           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3772           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    3773             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3774             :                 } else {
    3775           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3776           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3777             :                 }
    3778             : 
    3779           0 :                 return;
    3780             :         }
    3781             : 
    3782           9 :         assert(value <= UINT32_MAX);
    3783           9 :         csts.raw = value;
    3784           9 :         if (csts.bits.rdy == 1) {
    3785           9 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
    3786             :                 /*
    3787             :                  * The controller has been enabled.
    3788             :                  *  Perform the rest of initialization serially.
    3789             :                  */
    3790           9 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
    3791           9 :                                      ctrlr->opts.admin_timeout_ms);
    3792             :         } else {
    3793           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    3794             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3795             :         }
    3796             : }
    3797             : 
    3798             : /**
    3799             :  * This function will be called repeatedly during initialization until the controller is ready.
    3800             :  */
    3801             : int
    3802         430 : nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
    3803             : {
    3804             :         uint32_t ready_timeout_in_ms;
    3805             :         uint64_t ticks;
    3806         430 :         int rc = 0;
    3807             : 
    3808         430 :         ticks = spdk_get_ticks();
    3809             : 
    3810             :         /*
    3811             :          * May need to avoid accessing any register on the target controller
    3812             :          * for a while. Return early without touching the FSM.
    3813             :          * Check sleep_timeout_tsc > 0 for unit test.
    3814             :          */
    3815         430 :         if ((ctrlr->sleep_timeout_tsc > 0) &&
    3816           2 :             (ticks <= ctrlr->sleep_timeout_tsc)) {
    3817           1 :                 return 0;
    3818             :         }
    3819         429 :         ctrlr->sleep_timeout_tsc = 0;
    3820             : 
    3821         429 :         ready_timeout_in_ms = nvme_ctrlr_get_ready_timeout(ctrlr);
    3822             : 
    3823             :         /*
    3824             :          * Check if the current initialization step is done or has timed out.
    3825             :          */
    3826         429 :         switch (ctrlr->state) {
    3827           1 :         case NVME_CTRLR_STATE_INIT_DELAY:
    3828           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
    3829           1 :                 if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_INIT) {
    3830             :                         /*
    3831             :                          * Controller may need some delay before it's enabled.
    3832             :                          *
    3833             :                          * This is a workaround for an issue where the PCIe-attached NVMe controller
    3834             :                          * is not ready after VFIO reset. We delay the initialization rather than the
    3835             :                          * enabling itself, because this is required only for the very first enabling
    3836             :                          * - directly after a VFIO reset.
    3837             :                          */
    3838           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Adding 2 second delay before initializing the controller\n");
    3839           1 :                         ctrlr->sleep_timeout_tsc = ticks + (2000 * spdk_get_ticks_hz() / 1000);
    3840             :                 }
    3841           1 :                 break;
    3842             : 
    3843           0 :         case NVME_CTRLR_STATE_DISCONNECTED:
    3844           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    3845           0 :                 break;
    3846             : 
    3847          21 :         case NVME_CTRLR_STATE_CONNECT_ADMINQ: /* synonymous with NVME_CTRLR_STATE_INIT and NVME_CTRLR_STATE_DISCONNECTED */
    3848          21 :                 rc = nvme_transport_ctrlr_connect_qpair(ctrlr, ctrlr->adminq);
    3849          21 :                 if (rc == 0) {
    3850          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
    3851             :                                              NVME_TIMEOUT_INFINITE);
    3852             :                 } else {
    3853           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3854             :                 }
    3855          21 :                 break;
    3856             : 
    3857          21 :         case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
    3858          21 :                 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    3859             : 
    3860          21 :                 switch (nvme_qpair_get_state(ctrlr->adminq)) {
    3861           0 :                 case NVME_QPAIR_CONNECTING:
    3862           0 :                         break;
    3863          21 :                 case NVME_QPAIR_CONNECTED:
    3864          21 :                         nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_ENABLED);
    3865             :                 /* Fall through */
    3866          21 :                 case NVME_QPAIR_ENABLED:
    3867          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS,
    3868             :                                              NVME_TIMEOUT_INFINITE);
    3869             :                         /* Abort any queued requests that were sent while the adminq was connecting
    3870             :                          * to avoid stalling the init process during a reset, as requests don't get
    3871             :                          * resubmitted while the controller is resetting and subsequent commands
    3872             :                          * would get queued too.
    3873             :                          */
    3874          21 :                         nvme_qpair_abort_queued_reqs(ctrlr->adminq);
    3875          21 :                         break;
    3876           0 :                 case NVME_QPAIR_DISCONNECTING:
    3877           0 :                         assert(ctrlr->adminq->async == true);
    3878           0 :                         break;
    3879           0 :                 case NVME_QPAIR_DISCONNECTED:
    3880             :                 /* fallthrough */
    3881             :                 default:
    3882           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3883           0 :                         break;
    3884             :                 }
    3885             : 
    3886          21 :                 break;
    3887             : 
    3888          21 :         case NVME_CTRLR_STATE_READ_VS:
    3889          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS, NVME_TIMEOUT_INFINITE);
    3890          21 :                 rc = nvme_ctrlr_get_vs_async(ctrlr, nvme_ctrlr_process_init_vs_done, ctrlr);
    3891          21 :                 break;
    3892             : 
    3893          21 :         case NVME_CTRLR_STATE_READ_CAP:
    3894          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP, NVME_TIMEOUT_INFINITE);
    3895          21 :                 rc = nvme_ctrlr_get_cap_async(ctrlr, nvme_ctrlr_process_init_cap_done, ctrlr);
    3896          21 :                 break;
    3897             : 
    3898          22 :         case NVME_CTRLR_STATE_CHECK_EN:
    3899             :                 /* Begin the hardware initialization by making sure the controller is disabled. */
    3900          22 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC, ready_timeout_in_ms);
    3901          22 :                 rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_check_en, ctrlr);
    3902          22 :                 break;
    3903             : 
    3904           2 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
    3905             :                 /*
    3906             :                  * Controller is currently enabled. We need to disable it to cause a reset.
    3907             :                  *
    3908             :                  * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
    3909             :                  *  Wait for the ready bit to be 1 before disabling the controller.
    3910             :                  */
    3911           2 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
    3912             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3913           2 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_1, ctrlr);
    3914           2 :                 break;
    3915             : 
    3916           2 :         case NVME_CTRLR_STATE_SET_EN_0:
    3917           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 0\n");
    3918           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC, ready_timeout_in_ms);
    3919           2 :                 rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_set_en_0_read_cc, ctrlr);
    3920           2 :                 break;
    3921             : 
    3922          22 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
    3923          22 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
    3924             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3925          22 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_0, ctrlr);
    3926          22 :                 break;
    3927             : 
    3928          21 :         case NVME_CTRLR_STATE_DISABLED:
    3929          21 :                 if (ctrlr->is_disconnecting) {
    3930           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr was disabled.\n");
    3931             :                 } else {
    3932          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
    3933             : 
    3934             :                         /*
    3935             :                          * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
    3936             :                          *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
    3937             :                          */
    3938          21 :                         spdk_delay_us(100);
    3939             :                 }
    3940          21 :                 break;
    3941             : 
    3942          21 :         case NVME_CTRLR_STATE_ENABLE:
    3943          21 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 1\n");
    3944          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC, ready_timeout_in_ms);
    3945          21 :                 rc = nvme_ctrlr_enable(ctrlr);
    3946          21 :                 if (rc) {
    3947           7 :                         NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr enable failed with error: %d", rc);
    3948             :                 }
    3949          21 :                 return rc;
    3950             : 
    3951           9 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
    3952           9 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
    3953             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3954           9 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_enable_wait_for_ready_1,
    3955             :                                                ctrlr);
    3956           9 :                 break;
    3957             : 
    3958           9 :         case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
    3959           9 :                 nvme_transport_qpair_reset(ctrlr->adminq);
    3960           9 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY, NVME_TIMEOUT_INFINITE);
    3961           9 :                 break;
    3962             : 
    3963          16 :         case NVME_CTRLR_STATE_IDENTIFY:
    3964          16 :                 rc = nvme_ctrlr_identify(ctrlr);
    3965          16 :                 break;
    3966             : 
    3967          19 :         case NVME_CTRLR_STATE_CONFIGURE_AER:
    3968          19 :                 rc = nvme_ctrlr_configure_aer(ctrlr);
    3969          19 :                 break;
    3970             : 
    3971          22 :         case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
    3972          22 :                 rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
    3973          22 :                 break;
    3974             : 
    3975          19 :         case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
    3976          19 :                 rc = nvme_ctrlr_identify_iocs_specific(ctrlr);
    3977          19 :                 break;
    3978             : 
    3979           0 :         case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
    3980           0 :                 rc = nvme_ctrlr_get_zns_cmd_and_effects_log(ctrlr);
    3981           0 :                 break;
    3982             : 
    3983          19 :         case NVME_CTRLR_STATE_SET_NUM_QUEUES:
    3984          19 :                 nvme_ctrlr_update_nvmf_ioccsz(ctrlr);
    3985          19 :                 rc = nvme_ctrlr_set_num_queues(ctrlr);
    3986          19 :                 break;
    3987             : 
    3988          24 :         case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
    3989          24 :                 _nvme_ctrlr_identify_active_ns(ctrlr);
    3990          24 :                 break;
    3991             : 
    3992          14 :         case NVME_CTRLR_STATE_IDENTIFY_NS:
    3993          14 :                 rc = nvme_ctrlr_identify_namespaces(ctrlr);
    3994          14 :                 break;
    3995             : 
    3996          14 :         case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
    3997          14 :                 rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
    3998          14 :                 break;
    3999             : 
    4000          14 :         case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
    4001          14 :                 rc = nvme_ctrlr_identify_namespaces_iocs_specific(ctrlr);
    4002          14 :                 break;
    4003             : 
    4004          15 :         case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
    4005          15 :                 rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
    4006          15 :                 break;
    4007             : 
    4008           1 :         case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
    4009           1 :                 rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
    4010           1 :                 break;
    4011             : 
    4012          14 :         case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
    4013          14 :                 nvme_ctrlr_set_supported_features(ctrlr);
    4014          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
    4015          14 :                                      ctrlr->opts.admin_timeout_ms);
    4016          14 :                 break;
    4017             : 
    4018          14 :         case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
    4019          14 :                 rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
    4020          14 :                 break;
    4021             : 
    4022          14 :         case NVME_CTRLR_STATE_SET_HOST_ID:
    4023          14 :                 rc = nvme_ctrlr_set_host_id(ctrlr);
    4024          14 :                 break;
    4025             : 
    4026          17 :         case NVME_CTRLR_STATE_TRANSPORT_READY:
    4027          17 :                 rc = nvme_transport_ctrlr_ready(ctrlr);
    4028          17 :                 if (rc) {
    4029           1 :                         NVME_CTRLR_ERRLOG(ctrlr, "Transport controller ready step failed: rc %d\n", rc);
    4030           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    4031             :                 } else {
    4032          16 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    4033             :                 }
    4034          17 :                 break;
    4035             : 
    4036           0 :         case NVME_CTRLR_STATE_READY:
    4037           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr already in ready state\n");
    4038           0 :                 return 0;
    4039             : 
    4040           0 :         case NVME_CTRLR_STATE_ERROR:
    4041           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr is in error state\n");
    4042           0 :                 return -1;
    4043             : 
    4044           0 :         case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
    4045             :         case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
    4046             :         case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
    4047             :         case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
    4048             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    4049             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
    4050             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
    4051             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    4052             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
    4053             :         case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
    4054             :         case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
    4055             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
    4056             :         case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
    4057             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
    4058             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
    4059             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
    4060             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
    4061             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
    4062             :         case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
    4063             :         case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
    4064             :         case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
    4065             :                 /*
    4066             :                  * nvme_ctrlr_process_init() may be called from the completion context
    4067             :                  * for the admin qpair. Avoid recursive calls for this case.
    4068             :                  */
    4069           0 :                 if (!ctrlr->adminq->in_completion_context) {
    4070           0 :                         spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4071             :                 }
    4072           0 :                 break;
    4073             : 
    4074           0 :         default:
    4075           0 :                 assert(0);
    4076             :                 return -1;
    4077             :         }
    4078             : 
    4079         408 :         if (rc) {
    4080           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr operation failed with error: %d, ctrlr state: %d (%s)\n",
    4081             :                                   rc, ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
    4082             :         }
    4083             : 
    4084             :         /* Note: we use the ticks captured when we entered this function.
    4085             :          * This covers environments where the SPDK process gets swapped out after
    4086             :          * we tried to advance the state but before we check the timeout here.
    4087             :          * It is not normal for this to happen, but harmless to handle it in this
    4088             :          * way.
    4089             :          */
    4090         408 :         if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
    4091           0 :             ticks > ctrlr->state_timeout_tsc) {
    4092           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Initialization timed out in state %d (%s)\n",
    4093             :                                   ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
    4094           0 :                 return -1;
    4095             :         }
    4096             : 
    4097         408 :         return rc;
    4098             : }
    4099             : 
    4100             : int
    4101          46 : nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
    4102             : {
    4103          46 :         pthread_mutexattr_t attr;
    4104          46 :         int rc = 0;
    4105             : 
    4106          46 :         if (pthread_mutexattr_init(&attr)) {
    4107           0 :                 return -1;
    4108             :         }
    4109          92 :         if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
    4110             : #ifndef __FreeBSD__
    4111          92 :             pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
    4112          92 :             pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
    4113             : #endif
    4114          46 :             pthread_mutex_init(mtx, &attr)) {
    4115           0 :                 rc = -1;
    4116             :         }
    4117          46 :         pthread_mutexattr_destroy(&attr);
    4118          46 :         return rc;
    4119             : }
    4120             : 
    4121             : int
    4122          46 : nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
    4123             : {
    4124             :         int rc;
    4125             : 
    4126          46 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    4127           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
    4128             :         } else {
    4129          45 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    4130             :         }
    4131             : 
    4132          46 :         if (ctrlr->opts.admin_queue_size > SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES) {
    4133           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "admin_queue_size %u exceeds max defined by NVMe spec, use max value\n",
    4134             :                                   ctrlr->opts.admin_queue_size);
    4135           0 :                 ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES;
    4136             :         }
    4137             : 
    4138          46 :         if (ctrlr->quirks & NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE &&
    4139           0 :             (ctrlr->opts.admin_queue_size % SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE) != 0) {
    4140           0 :                 NVME_CTRLR_ERRLOG(ctrlr,
    4141             :                                   "admin_queue_size %u is invalid for this NVMe device, adjust to next multiple\n",
    4142             :                                   ctrlr->opts.admin_queue_size);
    4143           0 :                 ctrlr->opts.admin_queue_size = SPDK_ALIGN_CEIL(ctrlr->opts.admin_queue_size,
    4144             :                                                SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE);
    4145             :         }
    4146             : 
    4147          46 :         if (ctrlr->opts.admin_queue_size < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES) {
    4148          25 :                 NVME_CTRLR_ERRLOG(ctrlr,
    4149             :                                   "admin_queue_size %u is less than minimum defined by NVMe spec, use min value\n",
    4150             :                                   ctrlr->opts.admin_queue_size);
    4151          25 :                 ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES;
    4152             :         }
    4153             : 
    4154          46 :         ctrlr->flags = 0;
    4155          46 :         ctrlr->free_io_qids = NULL;
    4156          46 :         ctrlr->is_resetting = false;
    4157          46 :         ctrlr->is_failed = false;
    4158          46 :         ctrlr->is_destructed = false;
    4159             : 
    4160          46 :         TAILQ_INIT(&ctrlr->active_io_qpairs);
    4161          46 :         STAILQ_INIT(&ctrlr->queued_aborts);
    4162          46 :         ctrlr->outstanding_aborts = 0;
    4163             : 
    4164          46 :         ctrlr->ana_log_page = NULL;
    4165          46 :         ctrlr->ana_log_page_size = 0;
    4166             : 
    4167          46 :         rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
    4168          46 :         if (rc != 0) {
    4169           0 :                 return rc;
    4170             :         }
    4171             : 
    4172          46 :         TAILQ_INIT(&ctrlr->active_procs);
    4173          46 :         STAILQ_INIT(&ctrlr->register_operations);
    4174             : 
    4175          46 :         RB_INIT(&ctrlr->ns);
    4176             : 
    4177          46 :         return rc;
    4178             : }
    4179             : 
    4180             : static void
    4181          21 : nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr)
    4182             : {
    4183          21 :         if (ctrlr->cap.bits.ams & SPDK_NVME_CAP_AMS_WRR) {
    4184           5 :                 ctrlr->flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
    4185             :         }
    4186             : 
    4187          21 :         ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
    4188             : 
    4189             :         /* For now, always select page_size == min_page_size. */
    4190          21 :         ctrlr->page_size = ctrlr->min_page_size;
    4191             : 
    4192          21 :         ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
    4193          21 :         ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
    4194          21 :         if (ctrlr->quirks & NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE &&
    4195           0 :             ctrlr->opts.io_queue_size == DEFAULT_IO_QUEUE_SIZE) {
    4196             :                 /* If the user specifically set an IO queue size different than the
    4197             :                  * default, use that value.  Otherwise overwrite with the quirked value.
    4198             :                  * This allows this quirk to be overridden when necessary.
    4199             :                  * However, cap.mqes still needs to be respected.
    4200             :                  */
    4201           0 :                 ctrlr->opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK;
    4202             :         }
    4203          21 :         ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
    4204             : 
    4205          21 :         ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
    4206          21 : }
    4207             : 
    4208             : void
    4209          46 : nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
    4210             : {
    4211          46 :         pthread_mutex_destroy(&ctrlr->ctrlr_lock);
    4212             : 
    4213          46 :         nvme_ctrlr_free_processes(ctrlr);
    4214          46 : }
    4215             : 
    4216             : void
    4217          46 : nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
    4218             :                           struct nvme_ctrlr_detach_ctx *ctx)
    4219             : {
    4220             :         struct spdk_nvme_qpair *qpair, *tmp;
    4221             : 
    4222          46 :         NVME_CTRLR_DEBUGLOG(ctrlr, "Prepare to destruct SSD\n");
    4223             : 
    4224          46 :         ctrlr->prepare_for_reset = false;
    4225          46 :         ctrlr->is_destructed = true;
    4226             : 
    4227          46 :         spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4228             : 
    4229          46 :         nvme_ctrlr_abort_queued_aborts(ctrlr);
    4230          46 :         nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
    4231             : 
    4232          46 :         TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
    4233           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    4234             :         }
    4235             : 
    4236          46 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    4237          46 :         nvme_ctrlr_free_iocs_specific_data(ctrlr);
    4238             : 
    4239          46 :         nvme_ctrlr_shutdown_async(ctrlr, ctx);
    4240          46 : }
    4241             : 
    4242             : int
    4243          84 : nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
    4244             :                                struct nvme_ctrlr_detach_ctx *ctx)
    4245             : {
    4246             :         struct spdk_nvme_ns *ns, *tmp_ns;
    4247          84 :         int rc = 0;
    4248             : 
    4249          84 :         if (!ctx->shutdown_complete) {
    4250          76 :                 rc = nvme_ctrlr_shutdown_poll_async(ctrlr, ctx);
    4251          76 :                 if (rc == -EAGAIN) {
    4252          38 :                         return -EAGAIN;
    4253             :                 }
    4254             :                 /* Destruct ctrlr forcefully for any other error. */
    4255             :         }
    4256             : 
    4257          46 :         if (ctx->cb_fn) {
    4258           0 :                 ctx->cb_fn(ctrlr);
    4259             :         }
    4260             : 
    4261          46 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    4262             : 
    4263        7732 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    4264        7686 :                 nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
    4265        7686 :                 RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
    4266        7686 :                 spdk_free(ns);
    4267             :         }
    4268             : 
    4269          46 :         ctrlr->active_ns_count = 0;
    4270             : 
    4271          46 :         spdk_bit_array_free(&ctrlr->free_io_qids);
    4272             : 
    4273          46 :         free(ctrlr->ana_log_page);
    4274          46 :         free(ctrlr->copied_ana_desc);
    4275          46 :         ctrlr->ana_log_page = NULL;
    4276          46 :         ctrlr->copied_ana_desc = NULL;
    4277          46 :         ctrlr->ana_log_page_size = 0;
    4278             : 
    4279          46 :         nvme_transport_ctrlr_destruct(ctrlr);
    4280             : 
    4281          46 :         return rc;
    4282             : }
    4283             : 
    4284             : void
    4285          46 : nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
    4286             : {
    4287          46 :         struct nvme_ctrlr_detach_ctx ctx = { .ctrlr = ctrlr };
    4288             :         int rc;
    4289             : 
    4290          46 :         nvme_ctrlr_destruct_async(ctrlr, &ctx);
    4291             : 
    4292             :         while (1) {
    4293          84 :                 rc = nvme_ctrlr_destruct_poll_async(ctrlr, &ctx);
    4294          84 :                 if (rc != -EAGAIN) {
    4295          46 :                         break;
    4296             :                 }
    4297          38 :                 nvme_delay(1000);
    4298             :         }
    4299          46 : }
    4300             : 
    4301             : int
    4302          24 : nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
    4303             :                                 struct nvme_request *req)
    4304             : {
    4305          24 :         return nvme_qpair_submit_request(ctrlr->adminq, req);
    4306             : }
    4307             : 
    4308             : static void
    4309           0 : nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
    4310             : {
    4311             :         /* Do nothing */
    4312           0 : }
    4313             : 
    4314             : /*
    4315             :  * Check if we need to send a Keep Alive command.
    4316             :  * Caller must hold ctrlr->ctrlr_lock.
    4317             :  */
    4318             : static int
    4319           0 : nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
    4320             : {
    4321             :         uint64_t now;
    4322             :         struct nvme_request *req;
    4323             :         struct spdk_nvme_cmd *cmd;
    4324           0 :         int rc = 0;
    4325             : 
    4326           0 :         now = spdk_get_ticks();
    4327           0 :         if (now < ctrlr->next_keep_alive_tick) {
    4328           0 :                 return rc;
    4329             :         }
    4330             : 
    4331           0 :         req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
    4332           0 :         if (req == NULL) {
    4333           0 :                 return rc;
    4334             :         }
    4335             : 
    4336           0 :         cmd = &req->cmd;
    4337           0 :         cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
    4338             : 
    4339           0 :         rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
    4340           0 :         if (rc != 0) {
    4341           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Submitting Keep Alive failed\n");
    4342           0 :                 rc = -ENXIO;
    4343             :         }
    4344             : 
    4345           0 :         ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
    4346           0 :         return rc;
    4347             : }
    4348             : 
    4349             : int32_t
    4350           1 : spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
    4351             : {
    4352             :         int32_t num_completions;
    4353             :         int32_t rc;
    4354             :         struct spdk_nvme_ctrlr_process  *active_proc;
    4355             : 
    4356           1 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    4357             : 
    4358           1 :         if (ctrlr->keep_alive_interval_ticks) {
    4359           0 :                 rc = nvme_ctrlr_keep_alive(ctrlr);
    4360           0 :                 if (rc) {
    4361           0 :                         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4362           0 :                         return rc;
    4363             :                 }
    4364             :         }
    4365             : 
    4366           1 :         rc = nvme_io_msg_process(ctrlr);
    4367           1 :         if (rc < 0) {
    4368           0 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4369           0 :                 return rc;
    4370             :         }
    4371           1 :         num_completions = rc;
    4372             : 
    4373           1 :         rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4374             : 
    4375             :         /* Each process has an async list, complete the ones for this process object */
    4376           1 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4377           1 :         if (active_proc) {
    4378           0 :                 nvme_ctrlr_complete_queued_async_events(ctrlr);
    4379             :         }
    4380             : 
    4381           1 :         if (rc == -ENXIO && ctrlr->is_disconnecting) {
    4382           1 :                 nvme_ctrlr_disconnect_done(ctrlr);
    4383             :         }
    4384             : 
    4385           1 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4386             : 
    4387           1 :         if (rc < 0) {
    4388           1 :                 num_completions = rc;
    4389             :         } else {
    4390           0 :                 num_completions += rc;
    4391             :         }
    4392             : 
    4393           1 :         return num_completions;
    4394             : }
    4395             : 
    4396             : const struct spdk_nvme_ctrlr_data *
    4397           0 : spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
    4398             : {
    4399           0 :         return &ctrlr->cdata;
    4400             : }
    4401             : 
    4402           0 : union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
    4403             : {
    4404           0 :         union spdk_nvme_csts_register csts;
    4405             : 
    4406           0 :         if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
    4407           0 :                 csts.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
    4408             :         }
    4409           0 :         return csts;
    4410             : }
    4411             : 
    4412           0 : union spdk_nvme_cc_register spdk_nvme_ctrlr_get_regs_cc(struct spdk_nvme_ctrlr *ctrlr)
    4413             : {
    4414           0 :         union spdk_nvme_cc_register cc;
    4415             : 
    4416           0 :         if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
    4417           0 :                 cc.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
    4418             :         }
    4419           0 :         return cc;
    4420             : }
    4421             : 
    4422           0 : union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
    4423             : {
    4424           0 :         return ctrlr->cap;
    4425             : }
    4426             : 
    4427           0 : union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
    4428             : {
    4429           0 :         return ctrlr->vs;
    4430             : }
    4431             : 
    4432           0 : union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr)
    4433             : {
    4434           0 :         union spdk_nvme_cmbsz_register cmbsz;
    4435             : 
    4436           0 :         if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) {
    4437           0 :                 cmbsz.raw = 0;
    4438             :         }
    4439             : 
    4440           0 :         return cmbsz;
    4441             : }
    4442             : 
    4443           0 : union spdk_nvme_pmrcap_register spdk_nvme_ctrlr_get_regs_pmrcap(struct spdk_nvme_ctrlr *ctrlr)
    4444             : {
    4445           0 :         union spdk_nvme_pmrcap_register pmrcap;
    4446             : 
    4447           0 :         if (nvme_ctrlr_get_pmrcap(ctrlr, &pmrcap)) {
    4448           0 :                 pmrcap.raw = 0;
    4449             :         }
    4450             : 
    4451           0 :         return pmrcap;
    4452             : }
    4453             : 
    4454           0 : union spdk_nvme_bpinfo_register spdk_nvme_ctrlr_get_regs_bpinfo(struct spdk_nvme_ctrlr *ctrlr)
    4455             : {
    4456           0 :         union spdk_nvme_bpinfo_register bpinfo;
    4457             : 
    4458           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    4459           0 :                 bpinfo.raw = 0;
    4460             :         }
    4461             : 
    4462           0 :         return bpinfo;
    4463             : }
    4464             : 
    4465             : uint64_t
    4466           0 : spdk_nvme_ctrlr_get_pmrsz(struct spdk_nvme_ctrlr *ctrlr)
    4467             : {
    4468           0 :         return ctrlr->pmr_size;
    4469             : }
    4470             : 
    4471             : uint32_t
    4472           2 : spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
    4473             : {
    4474           2 :         return ctrlr->cdata.nn;
    4475             : }
    4476             : 
    4477             : bool
    4478        9301 : spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4479             : {
    4480        9301 :         struct spdk_nvme_ns tmp, *ns;
    4481             : 
    4482        9301 :         tmp.id = nsid;
    4483        9301 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4484             : 
    4485        9301 :         if (ns != NULL) {
    4486        9209 :                 return ns->active;
    4487             :         }
    4488             : 
    4489          92 :         return false;
    4490             : }
    4491             : 
    4492             : uint32_t
    4493          35 : spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    4494             : {
    4495             :         struct spdk_nvme_ns *ns;
    4496             : 
    4497          35 :         ns = RB_MIN(nvme_ns_tree, &ctrlr->ns);
    4498          35 :         if (ns == NULL) {
    4499          10 :                 return 0;
    4500             :         }
    4501             : 
    4502        4618 :         while (ns != NULL) {
    4503        4615 :                 if (ns->active) {
    4504          22 :                         return ns->id;
    4505             :                 }
    4506             : 
    4507        4593 :                 ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4508             :         }
    4509             : 
    4510           3 :         return 0;
    4511             : }
    4512             : 
    4513             : uint32_t
    4514        4657 : spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
    4515             : {
    4516        4657 :         struct spdk_nvme_ns tmp, *ns;
    4517             : 
    4518        4657 :         tmp.id = prev_nsid;
    4519        4657 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4520        4657 :         if (ns == NULL) {
    4521           5 :                 return 0;
    4522             :         }
    4523             : 
    4524        4652 :         ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4525        6184 :         while (ns != NULL) {
    4526        6164 :                 if (ns->active) {
    4527        4632 :                         return ns->id;
    4528             :                 }
    4529             : 
    4530        1532 :                 ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4531             :         }
    4532             : 
    4533          20 :         return 0;
    4534             : }
    4535             : 
    4536             : struct spdk_nvme_ns *
    4537       12403 : spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4538             : {
    4539       12403 :         struct spdk_nvme_ns tmp;
    4540             :         struct spdk_nvme_ns *ns;
    4541             : 
    4542       12403 :         if (nsid < 1 || nsid > ctrlr->cdata.nn) {
    4543          18 :                 return NULL;
    4544             :         }
    4545             : 
    4546       12385 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    4547             : 
    4548       12385 :         tmp.id = nsid;
    4549       12385 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4550             : 
    4551       12385 :         if (ns == NULL) {
    4552        7687 :                 ns = spdk_zmalloc(sizeof(struct spdk_nvme_ns), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
    4553        7687 :                 if (ns == NULL) {
    4554           0 :                         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4555           0 :                         return NULL;
    4556             :                 }
    4557             : 
    4558        7687 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was added\n", nsid);
    4559        7687 :                 ns->id = nsid;
    4560        7687 :                 RB_INSERT(nvme_ns_tree, &ctrlr->ns, ns);
    4561             :         }
    4562             : 
    4563       12385 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4564             : 
    4565       12385 :         return ns;
    4566             : }
    4567             : 
    4568             : struct spdk_pci_device *
    4569           0 : spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
    4570             : {
    4571           0 :         if (ctrlr == NULL) {
    4572           0 :                 return NULL;
    4573             :         }
    4574             : 
    4575           0 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    4576           0 :                 return NULL;
    4577             :         }
    4578             : 
    4579           0 :         return nvme_ctrlr_proc_get_devhandle(ctrlr);
    4580             : }
    4581             : 
    4582             : uint32_t
    4583           0 : spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
    4584             : {
    4585           0 :         return ctrlr->max_xfer_size;
    4586             : }
    4587             : 
    4588             : uint16_t
    4589           0 : spdk_nvme_ctrlr_get_max_sges(const struct spdk_nvme_ctrlr *ctrlr)
    4590             : {
    4591           0 :         if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
    4592           0 :                 return ctrlr->max_sges;
    4593             :         } else {
    4594           0 :                 return UINT16_MAX;
    4595             :         }
    4596             : }
    4597             : 
    4598             : void
    4599           2 : spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
    4600             :                                       spdk_nvme_aer_cb aer_cb_fn,
    4601             :                                       void *aer_cb_arg)
    4602             : {
    4603             :         struct spdk_nvme_ctrlr_process *active_proc;
    4604             : 
    4605           2 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    4606             : 
    4607           2 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4608           2 :         if (active_proc) {
    4609           2 :                 active_proc->aer_cb_fn = aer_cb_fn;
    4610           2 :                 active_proc->aer_cb_arg = aer_cb_arg;
    4611             :         }
    4612             : 
    4613           2 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4614           2 : }
    4615             : 
    4616             : void
    4617           0 : spdk_nvme_ctrlr_disable_read_changed_ns_list_log_page(struct spdk_nvme_ctrlr *ctrlr)
    4618             : {
    4619           0 :         ctrlr->opts.disable_read_changed_ns_list_log_page = true;
    4620           0 : }
    4621             : 
    4622             : void
    4623           0 : spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
    4624             :                 uint64_t timeout_io_us, uint64_t timeout_admin_us,
    4625             :                 spdk_nvme_timeout_cb cb_fn, void *cb_arg)
    4626             : {
    4627             :         struct spdk_nvme_ctrlr_process  *active_proc;
    4628             : 
    4629           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    4630             : 
    4631           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4632           0 :         if (active_proc) {
    4633           0 :                 active_proc->timeout_io_ticks = timeout_io_us * spdk_get_ticks_hz() / 1000000ULL;
    4634           0 :                 active_proc->timeout_admin_ticks = timeout_admin_us * spdk_get_ticks_hz() / 1000000ULL;
    4635           0 :                 active_proc->timeout_cb_fn = cb_fn;
    4636           0 :                 active_proc->timeout_cb_arg = cb_arg;
    4637             :         }
    4638             : 
    4639           0 :         ctrlr->timeout_enabled = true;
    4640             : 
    4641           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4642           0 : }
    4643             : 
    4644             : bool
    4645           8 : spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
    4646             : {
    4647             :         /* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
    4648             :         SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
    4649           8 :         return ctrlr->log_page_supported[log_page];
    4650             : }
    4651             : 
    4652             : bool
    4653           4 : spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
    4654             : {
    4655             :         /* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
    4656             :         SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
    4657           4 :         return ctrlr->feature_supported[feature_code];
    4658             : }
    4659             : 
    4660             : int
    4661           1 : spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4662             :                           struct spdk_nvme_ctrlr_list *payload)
    4663             : {
    4664             :         struct nvme_completion_poll_status      *status;
    4665             :         struct spdk_nvme_ns                     *ns;
    4666             :         int                                     res;
    4667             : 
    4668           1 :         if (nsid == 0) {
    4669           0 :                 return -EINVAL;
    4670             :         }
    4671             : 
    4672           1 :         status = calloc(1, sizeof(*status));
    4673           1 :         if (!status) {
    4674           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4675           0 :                 return -ENOMEM;
    4676             :         }
    4677             : 
    4678           1 :         res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
    4679             :                                        nvme_completion_poll_cb, status);
    4680           1 :         if (res) {
    4681           0 :                 free(status);
    4682           0 :                 return res;
    4683             :         }
    4684           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4685           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_attach_ns failed!\n");
    4686           0 :                 if (!status->timed_out) {
    4687           0 :                         free(status);
    4688             :                 }
    4689           0 :                 return -ENXIO;
    4690             :         }
    4691           1 :         free(status);
    4692             : 
    4693           1 :         res = nvme_ctrlr_identify_active_ns(ctrlr);
    4694           1 :         if (res) {
    4695           0 :                 return res;
    4696             :         }
    4697             : 
    4698           1 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    4699           1 :         if (ns == NULL) {
    4700           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_get_ns failed!\n");
    4701           0 :                 return -ENXIO;
    4702             :         }
    4703             : 
    4704           1 :         return nvme_ns_construct(ns, nsid, ctrlr);
    4705             : }
    4706             : 
    4707             : int
    4708           1 : spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4709             :                           struct spdk_nvme_ctrlr_list *payload)
    4710             : {
    4711             :         struct nvme_completion_poll_status      *status;
    4712             :         int                                     res;
    4713             : 
    4714           1 :         if (nsid == 0) {
    4715           0 :                 return -EINVAL;
    4716             :         }
    4717             : 
    4718           1 :         status = calloc(1, sizeof(*status));
    4719           1 :         if (!status) {
    4720           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4721           0 :                 return -ENOMEM;
    4722             :         }
    4723             : 
    4724           1 :         res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
    4725             :                                        nvme_completion_poll_cb, status);
    4726           1 :         if (res) {
    4727           0 :                 free(status);
    4728           0 :                 return res;
    4729             :         }
    4730           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4731           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_detach_ns failed!\n");
    4732           0 :                 if (!status->timed_out) {
    4733           0 :                         free(status);
    4734             :                 }
    4735           0 :                 return -ENXIO;
    4736             :         }
    4737           1 :         free(status);
    4738             : 
    4739           1 :         return nvme_ctrlr_identify_active_ns(ctrlr);
    4740             : }
    4741             : 
    4742             : uint32_t
    4743           1 : spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
    4744             : {
    4745             :         struct nvme_completion_poll_status      *status;
    4746             :         int                                     res;
    4747             :         uint32_t                                nsid;
    4748             : 
    4749           1 :         status = calloc(1, sizeof(*status));
    4750           1 :         if (!status) {
    4751           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4752           0 :                 return 0;
    4753             :         }
    4754             : 
    4755           1 :         res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, status);
    4756           1 :         if (res) {
    4757           0 :                 free(status);
    4758           0 :                 return 0;
    4759             :         }
    4760           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4761           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_create_ns failed!\n");
    4762           0 :                 if (!status->timed_out) {
    4763           0 :                         free(status);
    4764             :                 }
    4765           0 :                 return 0;
    4766             :         }
    4767             : 
    4768           1 :         nsid = status->cpl.cdw0;
    4769           1 :         free(status);
    4770             : 
    4771           1 :         assert(nsid > 0);
    4772             : 
    4773             :         /* Return the namespace ID that was created */
    4774           1 :         return nsid;
    4775             : }
    4776             : 
    4777             : int
    4778           1 : spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4779             : {
    4780             :         struct nvme_completion_poll_status      *status;
    4781             :         int                                     res;
    4782             : 
    4783           1 :         if (nsid == 0) {
    4784           0 :                 return -EINVAL;
    4785             :         }
    4786             : 
    4787           1 :         status = calloc(1, sizeof(*status));
    4788           1 :         if (!status) {
    4789           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4790           0 :                 return -ENOMEM;
    4791             :         }
    4792             : 
    4793           1 :         res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, status);
    4794           1 :         if (res) {
    4795           0 :                 free(status);
    4796           0 :                 return res;
    4797             :         }
    4798           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4799           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_delete_ns failed!\n");
    4800           0 :                 if (!status->timed_out) {
    4801           0 :                         free(status);
    4802             :                 }
    4803           0 :                 return -ENXIO;
    4804             :         }
    4805           1 :         free(status);
    4806             : 
    4807           1 :         return nvme_ctrlr_identify_active_ns(ctrlr);
    4808             : }
    4809             : 
    4810             : int
    4811           0 : spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4812             :                        struct spdk_nvme_format *format)
    4813             : {
    4814             :         struct nvme_completion_poll_status      *status;
    4815             :         int                                     res;
    4816             : 
    4817           0 :         status = calloc(1, sizeof(*status));
    4818           0 :         if (!status) {
    4819           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4820           0 :                 return -ENOMEM;
    4821             :         }
    4822             : 
    4823           0 :         res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
    4824             :                                     status);
    4825           0 :         if (res) {
    4826           0 :                 free(status);
    4827           0 :                 return res;
    4828             :         }
    4829           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4830           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_format failed!\n");
    4831           0 :                 if (!status->timed_out) {
    4832           0 :                         free(status);
    4833             :                 }
    4834           0 :                 return -ENXIO;
    4835             :         }
    4836           0 :         free(status);
    4837             : 
    4838           0 :         return spdk_nvme_ctrlr_reset(ctrlr);
    4839             : }
    4840             : 
    4841             : int
    4842           8 : spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
    4843             :                                 int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
    4844             : {
    4845           8 :         struct spdk_nvme_fw_commit              fw_commit;
    4846             :         struct nvme_completion_poll_status      *status;
    4847             :         int                                     res;
    4848             :         unsigned int                            size_remaining;
    4849             :         unsigned int                            offset;
    4850             :         unsigned int                            transfer;
    4851             :         uint8_t                                 *p;
    4852             : 
    4853           8 :         if (!completion_status) {
    4854           0 :                 return -EINVAL;
    4855             :         }
    4856           8 :         memset(completion_status, 0, sizeof(struct spdk_nvme_status));
    4857           8 :         if (size % 4) {
    4858           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid size!\n");
    4859           1 :                 return -1;
    4860             :         }
    4861             : 
    4862             :         /* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
    4863             :          * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
    4864             :          */
    4865           7 :         if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
    4866             :             (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
    4867           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid command!\n");
    4868           0 :                 return -1;
    4869             :         }
    4870             : 
    4871           7 :         status = calloc(1, sizeof(*status));
    4872           7 :         if (!status) {
    4873           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4874           0 :                 return -ENOMEM;
    4875             :         }
    4876             : 
    4877             :         /* Firmware download */
    4878           7 :         size_remaining = size;
    4879           7 :         offset = 0;
    4880           7 :         p = payload;
    4881             : 
    4882          10 :         while (size_remaining > 0) {
    4883           7 :                 transfer = spdk_min(size_remaining, ctrlr->min_page_size);
    4884             : 
    4885           7 :                 memset(status, 0, sizeof(*status));
    4886           7 :                 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
    4887             :                                                        nvme_completion_poll_cb,
    4888             :                                                        status);
    4889           7 :                 if (res) {
    4890           2 :                         free(status);
    4891           2 :                         return res;
    4892             :                 }
    4893             : 
    4894           5 :                 if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4895           2 :                         NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_fw_image_download failed!\n");
    4896           2 :                         if (!status->timed_out) {
    4897           1 :                                 free(status);
    4898             :                         }
    4899           2 :                         return -ENXIO;
    4900             :                 }
    4901           3 :                 p += transfer;
    4902           3 :                 offset += transfer;
    4903           3 :                 size_remaining -= transfer;
    4904             :         }
    4905             : 
    4906             :         /* Firmware commit */
    4907           3 :         memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    4908           3 :         fw_commit.fs = slot;
    4909           3 :         fw_commit.ca = commit_action;
    4910             : 
    4911           3 :         memset(status, 0, sizeof(*status));
    4912           3 :         res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
    4913             :                                        status);
    4914           3 :         if (res) {
    4915           1 :                 free(status);
    4916           1 :                 return res;
    4917             :         }
    4918             : 
    4919           2 :         res = nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock);
    4920             : 
    4921           2 :         memcpy(completion_status, &status->cpl.status, sizeof(struct spdk_nvme_status));
    4922             : 
    4923           2 :         if (!status->timed_out) {
    4924           2 :                 free(status);
    4925             :         }
    4926             : 
    4927           2 :         if (res) {
    4928           1 :                 if (completion_status->sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
    4929           0 :                     completion_status->sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
    4930           1 :                         if (completion_status->sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
    4931           0 :                             completion_status->sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
    4932           0 :                                 NVME_CTRLR_NOTICELOG(ctrlr,
    4933             :                                                      "firmware activation requires conventional reset to be performed. !\n");
    4934             :                         } else {
    4935           1 :                                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    4936             :                         }
    4937           1 :                         return -ENXIO;
    4938             :                 }
    4939             :         }
    4940             : 
    4941           1 :         return spdk_nvme_ctrlr_reset(ctrlr);
    4942             : }
    4943             : 
    4944             : int
    4945           0 : spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
    4946             : {
    4947             :         int rc, size;
    4948             :         union spdk_nvme_cmbsz_register cmbsz;
    4949             : 
    4950           0 :         cmbsz = spdk_nvme_ctrlr_get_regs_cmbsz(ctrlr);
    4951             : 
    4952           0 :         if (cmbsz.bits.rds == 0 || cmbsz.bits.wds == 0) {
    4953           0 :                 return -ENOTSUP;
    4954             :         }
    4955             : 
    4956           0 :         size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4));
    4957             : 
    4958           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    4959           0 :         rc = nvme_transport_ctrlr_reserve_cmb(ctrlr);
    4960           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4961             : 
    4962           0 :         if (rc < 0) {
    4963           0 :                 return rc;
    4964             :         }
    4965             : 
    4966           0 :         return size;
    4967             : }
    4968             : 
    4969             : void *
    4970           0 : spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
    4971             : {
    4972             :         void *buf;
    4973             : 
    4974           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    4975           0 :         buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
    4976           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4977             : 
    4978           0 :         return buf;
    4979             : }
    4980             : 
    4981             : void
    4982           0 : spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
    4983             : {
    4984           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    4985           0 :         nvme_transport_ctrlr_unmap_cmb(ctrlr);
    4986           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4987           0 : }
    4988             : 
    4989             : int
    4990           0 : spdk_nvme_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
    4991             : {
    4992             :         int rc;
    4993             : 
    4994           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    4995           0 :         rc = nvme_transport_ctrlr_enable_pmr(ctrlr);
    4996           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    4997             : 
    4998           0 :         return rc;
    4999             : }
    5000             : 
    5001             : int
    5002           0 : spdk_nvme_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5003             : {
    5004             :         int rc;
    5005             : 
    5006           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    5007           0 :         rc = nvme_transport_ctrlr_disable_pmr(ctrlr);
    5008           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5009             : 
    5010           0 :         return rc;
    5011             : }
    5012             : 
    5013             : void *
    5014           0 : spdk_nvme_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
    5015             : {
    5016             :         void *buf;
    5017             : 
    5018           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    5019           0 :         buf = nvme_transport_ctrlr_map_pmr(ctrlr, size);
    5020           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5021             : 
    5022           0 :         return buf;
    5023             : }
    5024             : 
    5025             : int
    5026           0 : spdk_nvme_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5027             : {
    5028             :         int rc;
    5029             : 
    5030           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    5031           0 :         rc = nvme_transport_ctrlr_unmap_pmr(ctrlr);
    5032           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5033             : 
    5034           0 :         return rc;
    5035             : }
    5036             : 
    5037             : int
    5038           0 : spdk_nvme_ctrlr_read_boot_partition_start(struct spdk_nvme_ctrlr *ctrlr, void *payload,
    5039             :                 uint32_t bprsz, uint32_t bprof, uint32_t bpid)
    5040             : {
    5041           0 :         union spdk_nvme_bprsel_register bprsel;
    5042           0 :         union spdk_nvme_bpinfo_register bpinfo;
    5043           0 :         uint64_t bpmbl, bpmb_size;
    5044             : 
    5045           0 :         if (ctrlr->cap.bits.bps == 0) {
    5046           0 :                 return -ENOTSUP;
    5047             :         }
    5048             : 
    5049           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    5050           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
    5051           0 :                 return -EIO;
    5052             :         }
    5053             : 
    5054           0 :         if (bpinfo.bits.brs == SPDK_NVME_BRS_READ_IN_PROGRESS) {
    5055           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read already initiated\n");
    5056           0 :                 return -EALREADY;
    5057             :         }
    5058             : 
    5059           0 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    5060             : 
    5061           0 :         bpmb_size = bprsz * 4096;
    5062           0 :         bpmbl = spdk_vtophys(payload, &bpmb_size);
    5063           0 :         if (bpmbl == SPDK_VTOPHYS_ERROR) {
    5064           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_vtophys of bpmbl failed\n");
    5065           0 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5066           0 :                 return -EFAULT;
    5067             :         }
    5068             : 
    5069           0 :         if (bpmb_size != bprsz * 4096) {
    5070           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition buffer is not physically contiguous\n");
    5071           0 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5072           0 :                 return -EFAULT;
    5073             :         }
    5074             : 
    5075           0 :         if (nvme_ctrlr_set_bpmbl(ctrlr, bpmbl)) {
    5076           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_bpmbl() failed\n");
    5077           0 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5078           0 :                 return -EIO;
    5079             :         }
    5080             : 
    5081           0 :         bprsel.bits.bpid = bpid;
    5082           0 :         bprsel.bits.bprof = bprof;
    5083           0 :         bprsel.bits.bprsz = bprsz;
    5084             : 
    5085           0 :         if (nvme_ctrlr_set_bprsel(ctrlr, &bprsel)) {
    5086           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_bprsel() failed\n");
    5087           0 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5088           0 :                 return -EIO;
    5089             :         }
    5090             : 
    5091           0 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5092           0 :         return 0;
    5093             : }
    5094             : 
    5095             : int
    5096           0 : spdk_nvme_ctrlr_read_boot_partition_poll(struct spdk_nvme_ctrlr *ctrlr)
    5097             : {
    5098           0 :         int rc = 0;
    5099           0 :         union spdk_nvme_bpinfo_register bpinfo;
    5100             : 
    5101           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    5102           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
    5103           0 :                 return -EIO;
    5104             :         }
    5105             : 
    5106           0 :         switch (bpinfo.bits.brs) {
    5107           0 :         case SPDK_NVME_BRS_NO_READ:
    5108           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read not initiated\n");
    5109           0 :                 rc = -EINVAL;
    5110           0 :                 break;
    5111           0 :         case SPDK_NVME_BRS_READ_IN_PROGRESS:
    5112           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition read in progress\n");
    5113           0 :                 rc = -EAGAIN;
    5114           0 :                 break;
    5115           0 :         case SPDK_NVME_BRS_READ_ERROR:
    5116           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Error completing Boot Partition read\n");
    5117           0 :                 rc = -EIO;
    5118           0 :                 break;
    5119           0 :         case SPDK_NVME_BRS_READ_SUCCESS:
    5120           0 :                 NVME_CTRLR_INFOLOG(ctrlr, "Boot Partition read completed successfully\n");
    5121           0 :                 break;
    5122           0 :         default:
    5123           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition read status\n");
    5124           0 :                 rc = -EINVAL;
    5125             :         }
    5126             : 
    5127           0 :         return rc;
    5128             : }
    5129             : 
    5130             : static void
    5131           0 : nvme_write_boot_partition_cb(void *arg, const struct spdk_nvme_cpl *cpl)
    5132             : {
    5133             :         int res;
    5134           0 :         struct spdk_nvme_ctrlr *ctrlr = arg;
    5135           0 :         struct spdk_nvme_fw_commit fw_commit;
    5136           0 :         struct spdk_nvme_cpl err_cpl =
    5137             :         {.status = {.sct = SPDK_NVME_SCT_GENERIC, .sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR }};
    5138             : 
    5139           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    5140           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Write Boot Partition failed\n");
    5141           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
    5142           0 :                 return;
    5143             :         }
    5144             : 
    5145           0 :         if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADING) {
    5146           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Downloading at Offset %d Success\n", ctrlr->fw_offset);
    5147           0 :                 ctrlr->fw_payload = (uint8_t *)ctrlr->fw_payload + ctrlr->fw_transfer_size;
    5148           0 :                 ctrlr->fw_offset += ctrlr->fw_transfer_size;
    5149           0 :                 ctrlr->fw_size_remaining -= ctrlr->fw_transfer_size;
    5150           0 :                 ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
    5151           0 :                 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
    5152             :                                                        ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
    5153           0 :                 if (res) {
    5154           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_image_download failed!\n");
    5155           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5156           0 :                         return;
    5157             :                 }
    5158             : 
    5159           0 :                 if (ctrlr->fw_transfer_size < ctrlr->min_page_size) {
    5160           0 :                         ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADED;
    5161             :                 }
    5162           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADED) {
    5163           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Download Success\n");
    5164           0 :                 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5165           0 :                 fw_commit.bpid = ctrlr->bpid;
    5166           0 :                 fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_BOOT_PARTITION;
    5167           0 :                 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
    5168             :                                                nvme_write_boot_partition_cb, ctrlr);
    5169           0 :                 if (res) {
    5170           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5171           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
    5172           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5173           0 :                         return;
    5174             :                 }
    5175             : 
    5176           0 :                 ctrlr->bp_ws = SPDK_NVME_BP_WS_REPLACE;
    5177           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_REPLACE) {
    5178           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Replacement Success\n");
    5179           0 :                 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5180           0 :                 fw_commit.bpid = ctrlr->bpid;
    5181           0 :                 fw_commit.ca = SPDK_NVME_FW_COMMIT_ACTIVATE_BOOT_PARTITION;
    5182           0 :                 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
    5183             :                                                nvme_write_boot_partition_cb, ctrlr);
    5184           0 :                 if (res) {
    5185           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5186           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
    5187           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5188           0 :                         return;
    5189             :                 }
    5190             : 
    5191           0 :                 ctrlr->bp_ws = SPDK_NVME_BP_WS_ACTIVATE;
    5192           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_ACTIVATE) {
    5193           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Activation Success\n");
    5194           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
    5195             :         } else {
    5196           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition write state\n");
    5197           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5198           0 :                 return;
    5199             :         }
    5200             : }
    5201             : 
    5202             : int
    5203           0 : spdk_nvme_ctrlr_write_boot_partition(struct spdk_nvme_ctrlr *ctrlr,
    5204             :                                      void *payload, uint32_t size, uint32_t bpid,
    5205             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    5206             : {
    5207             :         int res;
    5208             : 
    5209           0 :         if (ctrlr->cap.bits.bps == 0) {
    5210           0 :                 return -ENOTSUP;
    5211             :         }
    5212             : 
    5213           0 :         ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADING;
    5214           0 :         ctrlr->bpid = bpid;
    5215           0 :         ctrlr->bp_write_cb_fn = cb_fn;
    5216           0 :         ctrlr->bp_write_cb_arg = cb_arg;
    5217           0 :         ctrlr->fw_offset = 0;
    5218           0 :         ctrlr->fw_size_remaining = size;
    5219           0 :         ctrlr->fw_payload = payload;
    5220           0 :         ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
    5221             : 
    5222           0 :         res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
    5223             :                                                ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
    5224             : 
    5225           0 :         return res;
    5226             : }
    5227             : 
    5228             : bool
    5229          43 : spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
    5230             : {
    5231          43 :         assert(ctrlr);
    5232             : 
    5233          43 :         return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
    5234             :                         strlen(SPDK_NVMF_DISCOVERY_NQN));
    5235             : }
    5236             : 
    5237             : bool
    5238          20 : spdk_nvme_ctrlr_is_fabrics(struct spdk_nvme_ctrlr *ctrlr)
    5239             : {
    5240          20 :         assert(ctrlr);
    5241             : 
    5242          20 :         return spdk_nvme_trtype_is_fabrics(ctrlr->trid.trtype);
    5243             : }
    5244             : 
    5245             : int
    5246           0 : spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
    5247             :                                  uint16_t spsp, uint8_t nssf, void *payload, size_t size)
    5248             : {
    5249             :         struct nvme_completion_poll_status      *status;
    5250             :         int                                     res;
    5251             : 
    5252           0 :         status = calloc(1, sizeof(*status));
    5253           0 :         if (!status) {
    5254           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5255           0 :                 return -ENOMEM;
    5256             :         }
    5257             : 
    5258           0 :         res = spdk_nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
    5259             :                         nvme_completion_poll_cb, status);
    5260           0 :         if (res) {
    5261           0 :                 free(status);
    5262           0 :                 return res;
    5263             :         }
    5264           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5265           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_receive failed!\n");
    5266           0 :                 if (!status->timed_out) {
    5267           0 :                         free(status);
    5268             :                 }
    5269           0 :                 return -ENXIO;
    5270             :         }
    5271           0 :         free(status);
    5272             : 
    5273           0 :         return 0;
    5274             : }
    5275             : 
    5276             : int
    5277           0 : spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
    5278             :                               uint16_t spsp, uint8_t nssf, void *payload, size_t size)
    5279             : {
    5280             :         struct nvme_completion_poll_status      *status;
    5281             :         int                                     res;
    5282             : 
    5283           0 :         status = calloc(1, sizeof(*status));
    5284           0 :         if (!status) {
    5285           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5286           0 :                 return -ENOMEM;
    5287             :         }
    5288             : 
    5289           0 :         res = spdk_nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size,
    5290             :                                                 nvme_completion_poll_cb,
    5291             :                                                 status);
    5292           0 :         if (res) {
    5293           0 :                 free(status);
    5294           0 :                 return res;
    5295             :         }
    5296           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5297           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_send failed!\n");
    5298           0 :                 if (!status->timed_out) {
    5299           0 :                         free(status);
    5300             :                 }
    5301           0 :                 return -ENXIO;
    5302             :         }
    5303             : 
    5304           0 :         free(status);
    5305             : 
    5306           0 :         return 0;
    5307             : }
    5308             : 
    5309             : uint64_t
    5310           1 : spdk_nvme_ctrlr_get_flags(struct spdk_nvme_ctrlr *ctrlr)
    5311             : {
    5312           1 :         return ctrlr->flags;
    5313             : }
    5314             : 
    5315             : const struct spdk_nvme_transport_id *
    5316           0 : spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
    5317             : {
    5318           0 :         return &ctrlr->trid;
    5319             : }
    5320             : 
    5321             : int32_t
    5322          17 : spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr)
    5323             : {
    5324             :         uint32_t qid;
    5325             : 
    5326          17 :         assert(ctrlr->free_io_qids);
    5327          17 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    5328          17 :         qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
    5329          17 :         if (qid > ctrlr->opts.num_io_queues) {
    5330           2 :                 NVME_CTRLR_ERRLOG(ctrlr, "No free I/O queue IDs\n");
    5331           2 :                 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5332           2 :                 return -1;
    5333             :         }
    5334             : 
    5335          15 :         spdk_bit_array_clear(ctrlr->free_io_qids, qid);
    5336          15 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5337          15 :         return qid;
    5338             : }
    5339             : 
    5340             : void
    5341          64 : spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
    5342             : {
    5343          64 :         assert(qid <= ctrlr->opts.num_io_queues);
    5344             : 
    5345          64 :         nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
    5346             : 
    5347          64 :         if (spdk_likely(ctrlr->free_io_qids)) {
    5348          64 :                 spdk_bit_array_set(ctrlr->free_io_qids, qid);
    5349             :         }
    5350             : 
    5351          64 :         nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
    5352          64 : }
    5353             : 
    5354             : int
    5355           2 : spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
    5356             :                                    struct spdk_memory_domain **domains, int array_size)
    5357             : {
    5358           2 :         return nvme_transport_ctrlr_get_memory_domains(ctrlr, domains, array_size);
    5359             : }

Generated by: LCOV version 1.15