Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2015 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #ifndef __NVME_INTERNAL_H__
8 : #define __NVME_INTERNAL_H__
9 :
10 : #include "spdk/config.h"
11 : #include "spdk/likely.h"
12 : #include "spdk/stdinc.h"
13 :
14 : #include "spdk/nvme.h"
15 :
16 : #if defined(__i386__) || defined(__x86_64__)
17 : #include <x86intrin.h>
18 : #endif
19 :
20 : #include "spdk/queue.h"
21 : #include "spdk/barrier.h"
22 : #include "spdk/bit_array.h"
23 : #include "spdk/mmio.h"
24 : #include "spdk/pci_ids.h"
25 : #include "spdk/util.h"
26 : #include "spdk/memory.h"
27 : #include "spdk/nvme_intel.h"
28 : #include "spdk/nvmf_spec.h"
29 : #include "spdk/tree.h"
30 : #include "spdk/uuid.h"
31 :
32 : #include "spdk_internal/assert.h"
33 : #include "spdk/log.h"
34 :
35 : extern pid_t g_spdk_nvme_pid;
36 :
37 : extern struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts;
38 :
39 : /*
40 : * Some Intel devices support vendor-unique read latency log page even
41 : * though the log page directory says otherwise.
42 : */
43 : #define NVME_INTEL_QUIRK_READ_LATENCY 0x1
44 :
45 : /*
46 : * Some Intel devices support vendor-unique write latency log page even
47 : * though the log page directory says otherwise.
48 : */
49 : #define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
50 :
51 : /*
52 : * The controller needs a delay before starts checking the device
53 : * readiness, which is done by reading the NVME_CSTS_RDY bit.
54 : */
55 : #define NVME_QUIRK_DELAY_BEFORE_CHK_RDY 0x4
56 :
57 : /*
58 : * The controller performs best when I/O is split on particular
59 : * LBA boundaries.
60 : */
61 : #define NVME_INTEL_QUIRK_STRIPING 0x8
62 :
63 : /*
64 : * The controller needs a delay after allocating an I/O queue pair
65 : * before it is ready to accept I/O commands.
66 : */
67 : #define NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC 0x10
68 :
69 : /*
70 : * Earlier NVMe devices do not indicate whether unmapped blocks
71 : * will read all zeroes or not. This define indicates that the
72 : * device does in fact read all zeroes after an unmap event
73 : */
74 : #define NVME_QUIRK_READ_ZERO_AFTER_DEALLOCATE 0x20
75 :
76 : /*
77 : * The controller doesn't handle Identify value others than 0 or 1 correctly.
78 : */
79 : #define NVME_QUIRK_IDENTIFY_CNS 0x40
80 :
81 : /*
82 : * The controller supports Open Channel command set if matching additional
83 : * condition, like the first byte (value 0x1) in the vendor specific
84 : * bits of the namespace identify structure is set.
85 : */
86 : #define NVME_QUIRK_OCSSD 0x80
87 :
88 : /*
89 : * The controller has an Intel vendor ID but does not support Intel vendor-specific
90 : * log pages. This is primarily for QEMU emulated SSDs which report an Intel vendor
91 : * ID but do not support these log pages.
92 : */
93 : #define NVME_INTEL_QUIRK_NO_LOG_PAGES 0x100
94 :
95 : /*
96 : * The controller does not set SHST_COMPLETE in a reasonable amount of time. This
97 : * is primarily seen in virtual VMWare NVMe SSDs. This quirk merely adds an additional
98 : * error message that on VMWare NVMe SSDs, the shutdown timeout may be expected.
99 : */
100 : #define NVME_QUIRK_SHST_COMPLETE 0x200
101 :
102 : /*
103 : * The controller requires an extra delay before starting the initialization process
104 : * during attach.
105 : */
106 : #define NVME_QUIRK_DELAY_BEFORE_INIT 0x400
107 :
108 : /*
109 : * Some SSDs exhibit poor performance with the default SPDK NVMe IO queue size.
110 : * This quirk will increase the default to 1024 which matches other operating
111 : * systems, at the cost of some extra memory usage. Users can still override
112 : * the increased default by changing the spdk_nvme_io_qpair_opts when allocating
113 : * a new queue pair.
114 : */
115 : #define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
116 :
117 : /**
118 : * The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
119 : * SSE instructions to optimize the memory access(memcpy or memset) larger than
120 : * 8 Bytes.
121 : */
122 : #define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
123 :
124 : /**
125 : * The SSD does not support OPAL even through it sets the security bit in OACS.
126 : */
127 : #define NVME_QUIRK_OACS_SECURITY 0x2000
128 :
129 : /**
130 : * Intel P55XX SSDs can't support Dataset Management command with SGL format,
131 : * so use PRP with DSM command.
132 : */
133 : #define NVME_QUIRK_NO_SGL_FOR_DSM 0x4000
134 :
135 : /**
136 : * Maximum Data Transfer Size(MDTS) excludes interleaved metadata.
137 : */
138 : #define NVME_QUIRK_MDTS_EXCLUDE_MD 0x8000
139 :
140 : /**
141 : * Force not to use SGL even the controller report that it can
142 : * support it.
143 : */
144 : #define NVME_QUIRK_NOT_USE_SGL 0x10000
145 :
146 : /*
147 : * Some SSDs require the admin submission queue size to equate to an even
148 : * 4KiB multiple.
149 : */
150 : #define NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE 0x20000
151 :
152 : #define NVME_MAX_ASYNC_EVENTS (8)
153 :
154 : #define NVME_MAX_ADMIN_TIMEOUT_IN_SECS (30)
155 :
156 : /* Maximum log page size to fetch for AERs. */
157 : #define NVME_MAX_AER_LOG_SIZE (4096)
158 :
159 : /*
160 : * NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
161 : * define specifies the maximum number of queues this driver will actually
162 : * try to configure, if available.
163 : */
164 : #define DEFAULT_MAX_IO_QUEUES (1024)
165 : #define DEFAULT_ADMIN_QUEUE_SIZE (32)
166 : #define DEFAULT_IO_QUEUE_SIZE (256)
167 : #define DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK (1024) /* Matches Linux kernel driver */
168 :
169 : #define DEFAULT_IO_QUEUE_REQUESTS (512)
170 :
171 : #define SPDK_NVME_DEFAULT_RETRY_COUNT (4)
172 :
173 : #define SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED (0)
174 : #define SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED
175 :
176 : #define SPDK_NVME_TRANSPORT_TOS_DISABLED (0)
177 :
178 : #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS (10000)
179 :
180 : /* We want to fit submission and completion rings each in a single 2MB
181 : * hugepage to ensure physical address contiguity.
182 : */
183 : #define MAX_IO_QUEUE_ENTRIES (VALUE_2MB / spdk_max( \
184 : sizeof(struct spdk_nvme_cmd), \
185 : sizeof(struct spdk_nvme_cpl)))
186 :
187 : /* Default timeout for fabrics connect commands. */
188 : #ifdef DEBUG
189 : #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 0
190 : #else
191 : /* 500 millisecond timeout. */
192 : #define NVME_FABRIC_CONNECT_COMMAND_TIMEOUT 500000
193 : #endif
194 :
195 : /* This value indicates that a read from a PCIe register is invalid. This can happen when a device is no longer present */
196 : #define SPDK_NVME_INVALID_REGISTER_VALUE 0xFFFFFFFFu
197 :
198 : enum nvme_payload_type {
199 : NVME_PAYLOAD_TYPE_INVALID = 0,
200 :
201 : /** nvme_request::u.payload.contig_buffer is valid for this request */
202 : NVME_PAYLOAD_TYPE_CONTIG,
203 :
204 : /** nvme_request::u.sgl is valid for this request */
205 : NVME_PAYLOAD_TYPE_SGL,
206 : };
207 :
208 : /** Boot partition write states */
209 : enum nvme_bp_write_state {
210 : SPDK_NVME_BP_WS_DOWNLOADING = 0x0,
211 : SPDK_NVME_BP_WS_DOWNLOADED = 0x1,
212 : SPDK_NVME_BP_WS_REPLACE = 0x2,
213 : SPDK_NVME_BP_WS_ACTIVATE = 0x3,
214 : };
215 :
216 : /**
217 : * Descriptor for a request data payload.
218 : */
219 : struct nvme_payload {
220 : /**
221 : * Functions for retrieving physical addresses for scattered payloads.
222 : */
223 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
224 : spdk_nvme_req_next_sge_cb next_sge_fn;
225 :
226 : /**
227 : * Extended IO options passed by the user
228 : */
229 : struct spdk_nvme_ns_cmd_ext_io_opts *opts;
230 : /**
231 : * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
232 : * virtual memory address of a single virtually contiguous buffer.
233 : *
234 : * If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
235 : * cb_arg that will be passed to the SGL callback functions.
236 : */
237 : void *contig_or_cb_arg;
238 :
239 : /** Virtual memory address of a single virtually contiguous metadata buffer */
240 : void *md;
241 : };
242 :
243 : #define NVME_PAYLOAD_CONTIG(contig_, md_) \
244 : (struct nvme_payload) { \
245 : .reset_sgl_fn = NULL, \
246 : .next_sge_fn = NULL, \
247 : .contig_or_cb_arg = (contig_), \
248 : .md = (md_), \
249 : }
250 :
251 : #define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
252 : (struct nvme_payload) { \
253 : .reset_sgl_fn = (reset_sgl_fn_), \
254 : .next_sge_fn = (next_sge_fn_), \
255 : .contig_or_cb_arg = (cb_arg_), \
256 : .md = (md_), \
257 : }
258 :
259 : static inline enum nvme_payload_type
260 149 : nvme_payload_type(const struct nvme_payload *payload) {
261 149 : return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
262 : }
263 :
264 : struct nvme_error_cmd {
265 : bool do_not_submit;
266 : uint64_t timeout_tsc;
267 : uint32_t err_count;
268 : uint8_t opc;
269 : struct spdk_nvme_status status;
270 : TAILQ_ENTRY(nvme_error_cmd) link;
271 : };
272 :
273 : struct nvme_request {
274 : struct spdk_nvme_cmd cmd;
275 :
276 : uint8_t retries;
277 :
278 : uint8_t timed_out : 1;
279 :
280 : /**
281 : * True if the request is in the queued_req list.
282 : */
283 : uint8_t queued : 1;
284 : uint8_t reserved : 6;
285 :
286 : /**
287 : * Number of children requests still outstanding for this
288 : * request which was split into multiple child requests.
289 : */
290 : uint16_t num_children;
291 :
292 : /**
293 : * Offset in bytes from the beginning of payload for this request.
294 : * This is used for I/O commands that are split into multiple requests.
295 : */
296 : uint32_t payload_offset;
297 : uint32_t md_offset;
298 :
299 : uint32_t payload_size;
300 :
301 : /**
302 : * Timeout ticks for error injection requests, can be extended in future
303 : * to support per-request timeout feature.
304 : */
305 : uint64_t timeout_tsc;
306 :
307 : /**
308 : * Data payload for this request's command.
309 : */
310 : struct nvme_payload payload;
311 :
312 : spdk_nvme_cmd_cb cb_fn;
313 : void *cb_arg;
314 : STAILQ_ENTRY(nvme_request) stailq;
315 :
316 : struct spdk_nvme_qpair *qpair;
317 :
318 : /*
319 : * The value of spdk_get_ticks() when the request was submitted to the hardware.
320 : * Only set if ctrlr->timeout_enabled is true.
321 : */
322 : uint64_t submit_tick;
323 :
324 : /**
325 : * The active admin request can be moved to a per process pending
326 : * list based on the saved pid to tell which process it belongs
327 : * to. The cpl saves the original completion information which
328 : * is used in the completion callback.
329 : * NOTE: these below two fields are only used for admin request.
330 : */
331 : pid_t pid;
332 : struct spdk_nvme_cpl cpl;
333 :
334 : uint32_t md_size;
335 :
336 : /**
337 : * The following members should not be reordered with members
338 : * above. These members are only needed when splitting
339 : * requests which is done rarely, and the driver is careful
340 : * to not touch the following fields until a split operation is
341 : * needed, to avoid touching an extra cacheline.
342 : */
343 :
344 : /**
345 : * Points to the outstanding child requests for a parent request.
346 : * Only valid if a request was split into multiple children
347 : * requests, and is not initialized for non-split requests.
348 : */
349 : TAILQ_HEAD(, nvme_request) children;
350 :
351 : /**
352 : * Linked-list pointers for a child request in its parent's list.
353 : */
354 : TAILQ_ENTRY(nvme_request) child_tailq;
355 :
356 : /**
357 : * Points to a parent request if part of a split request,
358 : * NULL otherwise.
359 : */
360 : struct nvme_request *parent;
361 :
362 : /**
363 : * Completion status for a parent request. Initialized to all 0's
364 : * (SUCCESS) before child requests are submitted. If a child
365 : * request completes with error, the error status is copied here,
366 : * to ensure that the parent request is also completed with error
367 : * status once all child requests are completed.
368 : */
369 : struct spdk_nvme_cpl parent_status;
370 :
371 : /**
372 : * The user_cb_fn and user_cb_arg fields are used for holding the original
373 : * callback data when using nvme_allocate_request_user_copy.
374 : */
375 : spdk_nvme_cmd_cb user_cb_fn;
376 : void *user_cb_arg;
377 : void *user_buffer;
378 :
379 : /** Sequence of accel operations associated with this request */
380 : void *accel_sequence;
381 : };
382 :
383 : struct nvme_completion_poll_status {
384 : struct spdk_nvme_cpl cpl;
385 : uint64_t timeout_tsc;
386 : /**
387 : * DMA buffer retained throughout the duration of the command. It'll be released
388 : * automatically if the command times out, otherwise the user is responsible for freeing it.
389 : */
390 : void *dma_data;
391 : bool done;
392 : /* This flag indicates that the request has been timed out and the memory
393 : must be freed in a completion callback */
394 : bool timed_out;
395 : };
396 :
397 : struct nvme_async_event_request {
398 : struct spdk_nvme_ctrlr *ctrlr;
399 : struct nvme_request *req;
400 : struct spdk_nvme_cpl cpl;
401 : };
402 :
403 : enum nvme_qpair_state {
404 : NVME_QPAIR_DISCONNECTED,
405 : NVME_QPAIR_DISCONNECTING,
406 : NVME_QPAIR_CONNECTING,
407 : NVME_QPAIR_CONNECTED,
408 : NVME_QPAIR_ENABLING,
409 : NVME_QPAIR_ENABLED,
410 : NVME_QPAIR_DESTROYING,
411 : };
412 :
413 : enum nvme_qpair_auth_state {
414 : NVME_QPAIR_AUTH_STATE_NEGOTIATE,
415 : NVME_QPAIR_AUTH_STATE_AWAIT_NEGOTIATE,
416 : NVME_QPAIR_AUTH_STATE_AWAIT_CHALLENGE,
417 : NVME_QPAIR_AUTH_STATE_AWAIT_REPLY,
418 : NVME_QPAIR_AUTH_STATE_AWAIT_SUCCESS1,
419 : NVME_QPAIR_AUTH_STATE_AWAIT_SUCCESS2,
420 : NVME_QPAIR_AUTH_STATE_AWAIT_FAILURE2,
421 : NVME_QPAIR_AUTH_STATE_DONE,
422 : };
423 :
424 : /* Authentication transaction required (authreq.atr) */
425 : #define NVME_QPAIR_AUTH_FLAG_ATR (1 << 0)
426 : /* Authentication and secure channel required (authreq.ascr) */
427 : #define NVME_QPAIR_AUTH_FLAG_ASCR (1 << 1)
428 :
429 : /* Maximum size of a digest */
430 : #define NVME_AUTH_DIGEST_MAX_SIZE 64
431 :
432 : struct nvme_auth {
433 : /* State of the authentication */
434 : enum nvme_qpair_auth_state state;
435 : /* Status of the authentication */
436 : int status;
437 : /* Transaction ID */
438 : uint16_t tid;
439 : /* Flags */
440 : uint32_t flags;
441 : /* Selected hash function */
442 : uint8_t hash;
443 : /* Buffer used for controller challenge */
444 : uint8_t challenge[NVME_AUTH_DIGEST_MAX_SIZE];
445 : /* User's auth cb fn/ctx */
446 : spdk_nvme_authenticate_cb cb_fn;
447 : void *cb_ctx;
448 : };
449 :
450 : struct spdk_nvme_qpair {
451 : struct spdk_nvme_ctrlr *ctrlr;
452 :
453 : uint16_t id;
454 :
455 : uint8_t qprio: 2;
456 :
457 : uint8_t state: 3;
458 :
459 : uint8_t async: 1;
460 :
461 : uint8_t is_new_qpair: 1;
462 :
463 : uint8_t abort_dnr: 1;
464 : /*
465 : * Members for handling IO qpair deletion inside of a completion context.
466 : * These are specifically defined as single bits, so that they do not
467 : * push this data structure out to another cacheline.
468 : */
469 : uint8_t in_completion_context: 1;
470 : uint8_t delete_after_completion_context: 1;
471 :
472 : /*
473 : * Set when no deletion notification is needed. For example, the process
474 : * which allocated this qpair exited unexpectedly.
475 : */
476 : uint8_t no_deletion_notification_needed: 1;
477 :
478 : uint8_t last_fuse: 2;
479 :
480 : uint8_t transport_failure_reason: 3;
481 : uint8_t last_transport_failure_reason: 3;
482 :
483 : /* The user is destroying qpair */
484 : uint8_t destroy_in_progress: 1;
485 :
486 : /* Number of IO outstanding at transport level */
487 : uint16_t queue_depth;
488 :
489 : enum spdk_nvme_transport_type trtype;
490 :
491 : uint32_t num_outstanding_reqs;
492 :
493 : /* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
494 : struct nvme_request *reserved_req;
495 :
496 : STAILQ_HEAD(, nvme_request) free_req;
497 : STAILQ_HEAD(, nvme_request) queued_req;
498 :
499 : /* List entry for spdk_nvme_transport_poll_group::qpairs */
500 : STAILQ_ENTRY(spdk_nvme_qpair) poll_group_stailq;
501 :
502 : /** Commands opcode in this list will return error */
503 : TAILQ_HEAD(, nvme_error_cmd) err_cmd_head;
504 : /** Requests in this list will return error */
505 : STAILQ_HEAD(, nvme_request) err_req_head;
506 :
507 : struct spdk_nvme_ctrlr_process *active_proc;
508 :
509 : struct spdk_nvme_transport_poll_group *poll_group;
510 :
511 : void *poll_group_tailq_head;
512 :
513 : const struct spdk_nvme_transport *transport;
514 :
515 : /* Entries below here are not touched in the main I/O path. */
516 :
517 : struct nvme_completion_poll_status *poll_status;
518 :
519 : /* List entry for spdk_nvme_ctrlr::active_io_qpairs */
520 : TAILQ_ENTRY(spdk_nvme_qpair) tailq;
521 :
522 : /* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
523 : TAILQ_ENTRY(spdk_nvme_qpair) per_process_tailq;
524 :
525 : STAILQ_HEAD(, nvme_request) aborting_queued_req;
526 :
527 : void *req_buf;
528 :
529 : /* In-band authentication state */
530 : struct nvme_auth auth;
531 : };
532 :
533 : struct spdk_nvme_poll_group {
534 : void *ctx;
535 : struct spdk_nvme_accel_fn_table accel_fn_table;
536 : STAILQ_HEAD(, spdk_nvme_transport_poll_group) tgroups;
537 : bool in_process_completions;
538 : };
539 :
540 : struct spdk_nvme_transport_poll_group {
541 : struct spdk_nvme_poll_group *group;
542 : const struct spdk_nvme_transport *transport;
543 : STAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs;
544 : STAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs;
545 : STAILQ_ENTRY(spdk_nvme_transport_poll_group) link;
546 : uint32_t num_connected_qpairs;
547 : };
548 :
549 : struct spdk_nvme_ns {
550 : struct spdk_nvme_ctrlr *ctrlr;
551 : uint32_t sector_size;
552 :
553 : /*
554 : * Size of data transferred as part of each block,
555 : * including metadata if FLBAS indicates the metadata is transferred
556 : * as part of the data buffer at the end of each LBA.
557 : */
558 : uint32_t extended_lba_size;
559 :
560 : uint32_t md_size;
561 : uint32_t pi_type;
562 : uint32_t pi_format;
563 : uint32_t sectors_per_max_io;
564 : uint32_t sectors_per_max_io_no_md;
565 : uint32_t sectors_per_stripe;
566 : uint32_t id;
567 : uint16_t flags;
568 : bool active;
569 :
570 : /* Command Set Identifier */
571 : enum spdk_nvme_csi csi;
572 :
573 : /* Namespace Identification Descriptor List (CNS = 03h) */
574 : uint8_t id_desc_list[4096];
575 :
576 : uint32_t ana_group_id;
577 : enum spdk_nvme_ana_state ana_state;
578 :
579 : /* Identify Namespace data. */
580 : struct spdk_nvme_ns_data nsdata;
581 :
582 : /* Zoned Namespace Command Set Specific Identify Namespace data. */
583 : struct spdk_nvme_zns_ns_data *nsdata_zns;
584 :
585 : struct spdk_nvme_nvm_ns_data *nsdata_nvm;
586 :
587 : RB_ENTRY(spdk_nvme_ns) node;
588 : };
589 :
590 : /**
591 : * State of struct spdk_nvme_ctrlr (in particular, during initialization).
592 : */
593 : enum nvme_ctrlr_state {
594 : /**
595 : * Wait before initializing the controller.
596 : */
597 : NVME_CTRLR_STATE_INIT_DELAY,
598 :
599 : /**
600 : * Connect the admin queue.
601 : */
602 : NVME_CTRLR_STATE_CONNECT_ADMINQ,
603 :
604 : /**
605 : * Controller has not started initialized yet.
606 : */
607 : NVME_CTRLR_STATE_INIT = NVME_CTRLR_STATE_CONNECT_ADMINQ,
608 :
609 : /**
610 : * Waiting for admin queue to connect.
611 : */
612 : NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
613 :
614 : /**
615 : * Read Version (VS) register.
616 : */
617 : NVME_CTRLR_STATE_READ_VS,
618 :
619 : /**
620 : * Waiting for Version (VS) register to be read.
621 : */
622 : NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS,
623 :
624 : /**
625 : * Read Capabilities (CAP) register.
626 : */
627 : NVME_CTRLR_STATE_READ_CAP,
628 :
629 : /**
630 : * Waiting for Capabilities (CAP) register to be read.
631 : */
632 : NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP,
633 :
634 : /**
635 : * Check EN to prepare for controller initialization.
636 : */
637 : NVME_CTRLR_STATE_CHECK_EN,
638 :
639 : /**
640 : * Waiting for CC to be read as part of EN check.
641 : */
642 : NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC,
643 :
644 : /**
645 : * Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
646 : */
647 : NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
648 :
649 : /**
650 : * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
651 : */
652 : NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
653 :
654 : /**
655 : * Disabling the controller by setting CC.EN to 0.
656 : */
657 : NVME_CTRLR_STATE_SET_EN_0,
658 :
659 : /**
660 : * Waiting for the CC register to be read as part of disabling the controller.
661 : */
662 : NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
663 :
664 : /**
665 : * Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
666 : */
667 : NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
668 :
669 : /**
670 : * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 0.
671 : */
672 : NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
673 :
674 : /**
675 : * The controller is disabled. (CC.EN and CSTS.RDY are 0.)
676 : */
677 : NVME_CTRLR_STATE_DISABLED,
678 :
679 : /**
680 : * Enable the controller by writing CC.EN to 1
681 : */
682 : NVME_CTRLR_STATE_ENABLE,
683 :
684 : /**
685 : * Waiting for CC register to be written as part of enabling the controller.
686 : */
687 : NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC,
688 :
689 : /**
690 : * Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
691 : */
692 : NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
693 :
694 : /**
695 : * Waiting for CSTS register to be read as part of waiting for CSTS.RDY = 1.
696 : */
697 : NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
698 :
699 : /**
700 : * Reset the Admin queue of the controller.
701 : */
702 : NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
703 :
704 : /**
705 : * Identify Controller command will be sent to then controller.
706 : */
707 : NVME_CTRLR_STATE_IDENTIFY,
708 :
709 : /**
710 : * Waiting for Identify Controller command be completed.
711 : */
712 : NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
713 :
714 : /**
715 : * Configure AER of the controller.
716 : */
717 : NVME_CTRLR_STATE_CONFIGURE_AER,
718 :
719 : /**
720 : * Waiting for the Configure AER to be completed.
721 : */
722 : NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
723 :
724 : /**
725 : * Set Keep Alive Timeout of the controller.
726 : */
727 : NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
728 :
729 : /**
730 : * Waiting for Set Keep Alive Timeout to be completed.
731 : */
732 : NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
733 :
734 : /**
735 : * Get Identify I/O Command Set Specific Controller data structure.
736 : */
737 : NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
738 :
739 : /**
740 : * Waiting for Identify I/O Command Set Specific Controller command to be completed.
741 : */
742 : NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
743 :
744 : /**
745 : * Get Commands Supported and Effects log page for the Zoned Namespace Command Set.
746 : */
747 : NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
748 :
749 : /**
750 : * Waiting for the Get Log Page command to be completed.
751 : */
752 : NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
753 :
754 : /**
755 : * Set Number of Queues of the controller.
756 : */
757 : NVME_CTRLR_STATE_SET_NUM_QUEUES,
758 :
759 : /**
760 : * Waiting for Set Num of Queues command to be completed.
761 : */
762 : NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
763 :
764 : /**
765 : * Get active Namespace list of the controller.
766 : */
767 : NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
768 :
769 : /**
770 : * Waiting for the Identify Active Namespace commands to be completed.
771 : */
772 : NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
773 :
774 : /**
775 : * Get Identify Namespace Data structure for each NS.
776 : */
777 : NVME_CTRLR_STATE_IDENTIFY_NS,
778 :
779 : /**
780 : * Waiting for the Identify Namespace commands to be completed.
781 : */
782 : NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
783 :
784 : /**
785 : * Get Identify Namespace Identification Descriptors.
786 : */
787 : NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
788 :
789 : /**
790 : * Get Identify I/O Command Set Specific Namespace data structure for each NS.
791 : */
792 : NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
793 :
794 : /**
795 : * Waiting for the Identify I/O Command Set Specific Namespace commands to be completed.
796 : */
797 : NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
798 :
799 : /**
800 : * Waiting for the Identify Namespace Identification
801 : * Descriptors to be completed.
802 : */
803 : NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
804 :
805 : /**
806 : * Set supported log pages of the controller.
807 : */
808 : NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
809 :
810 : /**
811 : * Set supported log pages of INTEL controller.
812 : */
813 : NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
814 :
815 : /**
816 : * Waiting for supported log pages of INTEL controller.
817 : */
818 : NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
819 :
820 : /**
821 : * Set supported features of the controller.
822 : */
823 : NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
824 :
825 : /**
826 : * Set the Host Behavior Support feature of the controller.
827 : */
828 : NVME_CTRLR_STATE_SET_HOST_FEATURE,
829 :
830 : /**
831 : * Waiting for the Host Behavior Support feature of the controller.
832 : */
833 : NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE,
834 :
835 : /**
836 : * Set Doorbell Buffer Config of the controller.
837 : */
838 : NVME_CTRLR_STATE_SET_DB_BUF_CFG,
839 :
840 : /**
841 : * Waiting for Doorbell Buffer Config to be completed.
842 : */
843 : NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
844 :
845 : /**
846 : * Set Host ID of the controller.
847 : */
848 : NVME_CTRLR_STATE_SET_HOST_ID,
849 :
850 : /**
851 : * Waiting for Set Host ID to be completed.
852 : */
853 : NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
854 :
855 : /**
856 : * Let transport layer do its part of initialization.
857 : */
858 : NVME_CTRLR_STATE_TRANSPORT_READY,
859 :
860 : /**
861 : * Controller initialization has completed and the controller is ready.
862 : */
863 : NVME_CTRLR_STATE_READY,
864 :
865 : /**
866 : * Controller initialization has an error.
867 : */
868 : NVME_CTRLR_STATE_ERROR,
869 :
870 : /**
871 : * Admin qpair was disconnected, controller needs to be re-initialized
872 : */
873 : NVME_CTRLR_STATE_DISCONNECTED,
874 : };
875 :
876 : #define NVME_TIMEOUT_INFINITE 0
877 : #define NVME_TIMEOUT_KEEP_EXISTING UINT64_MAX
878 :
879 : struct spdk_nvme_ctrlr_aer_completion_list {
880 : struct spdk_nvme_cpl cpl;
881 : STAILQ_ENTRY(spdk_nvme_ctrlr_aer_completion_list) link;
882 : };
883 :
884 : /*
885 : * Used to track properties for all processes accessing the controller.
886 : */
887 : struct spdk_nvme_ctrlr_process {
888 : /** Whether it is the primary process */
889 : bool is_primary;
890 :
891 : /** Process ID */
892 : pid_t pid;
893 :
894 : /** Active admin requests to be completed */
895 : STAILQ_HEAD(, nvme_request) active_reqs;
896 :
897 : TAILQ_ENTRY(spdk_nvme_ctrlr_process) tailq;
898 :
899 : /** Per process PCI device handle */
900 : struct spdk_pci_device *devhandle;
901 :
902 : /** Reference to track the number of attachment to this controller. */
903 : int ref;
904 :
905 : /** Allocated IO qpairs */
906 : TAILQ_HEAD(, spdk_nvme_qpair) allocated_io_qpairs;
907 :
908 : spdk_nvme_aer_cb aer_cb_fn;
909 : void *aer_cb_arg;
910 :
911 : /**
912 : * A function pointer to timeout callback function
913 : */
914 : spdk_nvme_timeout_cb timeout_cb_fn;
915 : void *timeout_cb_arg;
916 : /** separate timeout values for io vs. admin reqs */
917 : uint64_t timeout_io_ticks;
918 : uint64_t timeout_admin_ticks;
919 :
920 : /** List to publish AENs to all procs in multiprocess setup */
921 : STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list) async_events;
922 : };
923 :
924 : struct nvme_register_completion {
925 : struct spdk_nvme_cpl cpl;
926 : uint64_t value;
927 : spdk_nvme_reg_cb cb_fn;
928 : void *cb_ctx;
929 : STAILQ_ENTRY(nvme_register_completion) stailq;
930 : pid_t pid;
931 : };
932 :
933 : struct spdk_nvme_ctrlr {
934 : /* Hot data (accessed in I/O path) starts here. */
935 :
936 : /* Tree of namespaces */
937 : RB_HEAD(nvme_ns_tree, spdk_nvme_ns) ns;
938 :
939 : /* The number of active namespaces */
940 : uint32_t active_ns_count;
941 :
942 : bool is_removed;
943 :
944 : bool is_resetting;
945 :
946 : bool is_failed;
947 :
948 : bool is_destructed;
949 :
950 : bool timeout_enabled;
951 :
952 : /* The application is preparing to reset the controller. Transports
953 : * can use this to skip unnecessary parts of the qpair deletion process
954 : * for example, like the DELETE_SQ/CQ commands.
955 : */
956 : bool prepare_for_reset;
957 :
958 : bool is_disconnecting;
959 :
960 : bool needs_io_msg_update;
961 :
962 : uint16_t max_sges;
963 :
964 : uint16_t cntlid;
965 :
966 : /** Controller support flags */
967 : uint64_t flags;
968 :
969 : /** NVMEoF in-capsule data size in bytes */
970 : uint32_t ioccsz_bytes;
971 :
972 : /** NVMEoF in-capsule data offset in 16 byte units */
973 : uint16_t icdoff;
974 :
975 : /* Cold data (not accessed in normal I/O path) is after this point. */
976 :
977 : struct spdk_nvme_transport_id trid;
978 :
979 : struct {
980 : /** Is numa.id valid? Ensures numa.id == 0 is interpreted correctly. */
981 : uint32_t id_valid : 1;
982 : int32_t id : 31;
983 : } numa;
984 :
985 : union spdk_nvme_cap_register cap;
986 : union spdk_nvme_vs_register vs;
987 :
988 : int state;
989 : uint64_t state_timeout_tsc;
990 :
991 : uint64_t next_keep_alive_tick;
992 : uint64_t keep_alive_interval_ticks;
993 :
994 : TAILQ_ENTRY(spdk_nvme_ctrlr) tailq;
995 :
996 : /** All the log pages supported */
997 : bool log_page_supported[256];
998 :
999 : /** All the features supported */
1000 : bool feature_supported[256];
1001 :
1002 : /** maximum i/o size in bytes */
1003 : uint32_t max_xfer_size;
1004 :
1005 : /** minimum page size supported by this controller in bytes */
1006 : uint32_t min_page_size;
1007 :
1008 : /** selected memory page size for this controller in bytes */
1009 : uint32_t page_size;
1010 :
1011 : uint32_t num_aers;
1012 : struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
1013 :
1014 : /** guards access to the controller itself, including admin queues */
1015 : pthread_mutex_t ctrlr_lock;
1016 :
1017 : struct spdk_nvme_qpair *adminq;
1018 :
1019 : /** shadow doorbell buffer */
1020 : uint32_t *shadow_doorbell;
1021 : /** eventidx buffer */
1022 : uint32_t *eventidx;
1023 :
1024 : /**
1025 : * Identify Controller data.
1026 : */
1027 : struct spdk_nvme_ctrlr_data cdata;
1028 :
1029 : /**
1030 : * Zoned Namespace Command Set Specific Identify Controller data.
1031 : */
1032 : struct spdk_nvme_zns_ctrlr_data *cdata_zns;
1033 :
1034 : struct spdk_bit_array *free_io_qids;
1035 : TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs;
1036 :
1037 : struct spdk_nvme_ctrlr_opts opts;
1038 :
1039 : uint64_t quirks;
1040 :
1041 : /* Extra sleep time during controller initialization */
1042 : uint64_t sleep_timeout_tsc;
1043 :
1044 : /** Track all the processes manage this controller */
1045 : TAILQ_HEAD(, spdk_nvme_ctrlr_process) active_procs;
1046 :
1047 :
1048 : STAILQ_HEAD(, nvme_request) queued_aborts;
1049 : uint32_t outstanding_aborts;
1050 :
1051 : uint32_t lock_depth;
1052 :
1053 : /* CB to notify the user when the ctrlr is removed/failed. */
1054 : spdk_nvme_remove_cb remove_cb;
1055 : void *cb_ctx;
1056 :
1057 : struct spdk_nvme_qpair *external_io_msgs_qpair;
1058 : pthread_mutex_t external_io_msgs_lock;
1059 : struct spdk_ring *external_io_msgs;
1060 :
1061 : STAILQ_HEAD(, nvme_io_msg_producer) io_producers;
1062 :
1063 : struct spdk_nvme_ana_page *ana_log_page;
1064 : struct spdk_nvme_ana_group_descriptor *copied_ana_desc;
1065 : uint32_t ana_log_page_size;
1066 :
1067 : /* scratchpad pointer that can be used to send data between two NVME_CTRLR_STATEs */
1068 : void *tmp_ptr;
1069 :
1070 : /* maximum zone append size in bytes */
1071 : uint32_t max_zone_append_size;
1072 :
1073 : /* PMR size in bytes */
1074 : uint64_t pmr_size;
1075 :
1076 : /* Boot Partition Info */
1077 : enum nvme_bp_write_state bp_ws;
1078 : uint32_t bpid;
1079 : spdk_nvme_cmd_cb bp_write_cb_fn;
1080 : void *bp_write_cb_arg;
1081 :
1082 : /* Firmware Download */
1083 : void *fw_payload;
1084 : unsigned int fw_size_remaining;
1085 : unsigned int fw_offset;
1086 : unsigned int fw_transfer_size;
1087 :
1088 : /* Completed register operations */
1089 : STAILQ_HEAD(, nvme_register_completion) register_operations;
1090 :
1091 : union spdk_nvme_cc_register process_init_cc;
1092 :
1093 : /* Authentication transaction ID */
1094 : uint16_t auth_tid;
1095 : /* Authentication sequence number */
1096 : uint32_t auth_seqnum;
1097 : };
1098 :
1099 : struct spdk_nvme_probe_ctx {
1100 : struct spdk_nvme_transport_id trid;
1101 : const struct spdk_nvme_ctrlr_opts *opts;
1102 : void *cb_ctx;
1103 : spdk_nvme_probe_cb probe_cb;
1104 : spdk_nvme_attach_cb attach_cb;
1105 : spdk_nvme_attach_fail_cb attach_fail_cb;
1106 : spdk_nvme_remove_cb remove_cb;
1107 : TAILQ_HEAD(, spdk_nvme_ctrlr) init_ctrlrs;
1108 : };
1109 :
1110 : typedef void (*nvme_ctrlr_detach_cb)(struct spdk_nvme_ctrlr *ctrlr);
1111 :
1112 : enum nvme_ctrlr_detach_state {
1113 : NVME_CTRLR_DETACH_SET_CC,
1114 : NVME_CTRLR_DETACH_CHECK_CSTS,
1115 : NVME_CTRLR_DETACH_GET_CSTS,
1116 : NVME_CTRLR_DETACH_GET_CSTS_DONE,
1117 : };
1118 :
1119 : struct nvme_ctrlr_detach_ctx {
1120 : struct spdk_nvme_ctrlr *ctrlr;
1121 : nvme_ctrlr_detach_cb cb_fn;
1122 : uint64_t shutdown_start_tsc;
1123 : uint32_t shutdown_timeout_ms;
1124 : bool shutdown_complete;
1125 : enum nvme_ctrlr_detach_state state;
1126 : union spdk_nvme_csts_register csts;
1127 : TAILQ_ENTRY(nvme_ctrlr_detach_ctx) link;
1128 : };
1129 :
1130 : struct spdk_nvme_detach_ctx {
1131 : TAILQ_HEAD(, nvme_ctrlr_detach_ctx) head;
1132 : };
1133 :
1134 : struct nvme_driver {
1135 : pthread_mutex_t lock;
1136 :
1137 : /** Multi-process shared attached controller list */
1138 : TAILQ_HEAD(, spdk_nvme_ctrlr) shared_attached_ctrlrs;
1139 :
1140 : bool initialized;
1141 : struct spdk_uuid default_extended_host_id;
1142 :
1143 : /** netlink socket fd for hotplug messages */
1144 : int hotplug_fd;
1145 : };
1146 :
1147 : #define nvme_ns_cmd_get_ext_io_opt(opts, field, defval) \
1148 : ((opts) != NULL && offsetof(struct spdk_nvme_ns_cmd_ext_io_opts, field) + \
1149 : sizeof((opts)->field) <= (opts)->size ? (opts)->field : (defval))
1150 :
1151 : extern struct nvme_driver *g_spdk_nvme_driver;
1152 :
1153 : int nvme_driver_init(void);
1154 :
1155 : #define nvme_delay usleep
1156 :
1157 : static inline bool
1158 70 : nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
1159 : {
1160 70 : return qpair->id == 0;
1161 : }
1162 :
1163 : static inline bool
1164 : nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
1165 : {
1166 : return qpair->id != 0;
1167 : }
1168 :
1169 : static inline int
1170 12677 : nvme_robust_mutex_lock(pthread_mutex_t *mtx)
1171 : {
1172 12677 : int rc = pthread_mutex_lock(mtx);
1173 :
1174 : #ifndef __FreeBSD__
1175 12677 : if (rc == EOWNERDEAD) {
1176 0 : rc = pthread_mutex_consistent(mtx);
1177 : }
1178 : #endif
1179 :
1180 12677 : return rc;
1181 : }
1182 :
1183 : static inline int
1184 12609 : nvme_ctrlr_lock(struct spdk_nvme_ctrlr *ctrlr)
1185 : {
1186 : int rc;
1187 :
1188 12609 : rc = nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
1189 12609 : ctrlr->lock_depth++;
1190 12609 : return rc;
1191 : }
1192 :
1193 : static inline int
1194 12675 : nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
1195 : {
1196 12675 : return pthread_mutex_unlock(mtx);
1197 : }
1198 :
1199 : static inline int
1200 12607 : nvme_ctrlr_unlock(struct spdk_nvme_ctrlr *ctrlr)
1201 : {
1202 12607 : ctrlr->lock_depth--;
1203 12607 : return nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1204 : }
1205 :
1206 : /* Poll group management functions. */
1207 : int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1208 : int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1209 :
1210 : /* Admin functions */
1211 : int nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
1212 : uint8_t cns, uint16_t cntid, uint32_t nsid,
1213 : uint8_t csi, void *payload, size_t payload_size,
1214 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1215 : int nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1216 : uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
1217 : void *cb_arg);
1218 : int nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
1219 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1220 : int nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
1221 : union spdk_nvme_feat_async_event_configuration config,
1222 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1223 : int nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
1224 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1225 : int nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1226 : struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1227 : int nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1228 : struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1229 : int nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
1230 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1231 : int nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr,
1232 : uint64_t prp1, uint64_t prp2,
1233 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1234 : int nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
1235 : void *cb_arg);
1236 : int nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1237 : struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1238 : int nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
1239 : const struct spdk_nvme_fw_commit *fw_commit,
1240 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1241 : int nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
1242 : uint32_t size, uint32_t offset, void *payload,
1243 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1244 : int nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1245 : struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
1246 : spdk_nvme_cmd_cb cb_fn, void *cb_arg);
1247 : void nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
1248 : int nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
1249 : struct nvme_completion_poll_status *status);
1250 : int nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
1251 : struct nvme_completion_poll_status *status,
1252 : pthread_mutex_t *robust_mutex);
1253 : int nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
1254 : struct nvme_completion_poll_status *status,
1255 : uint64_t timeout_in_usecs);
1256 : int nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
1257 : struct nvme_completion_poll_status *status,
1258 : pthread_mutex_t *robust_mutex,
1259 : uint64_t timeout_in_usecs);
1260 : int nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair,
1261 : struct nvme_completion_poll_status *status,
1262 : pthread_mutex_t *robust_mutex);
1263 :
1264 : struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
1265 : pid_t pid);
1266 : struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr);
1267 : int nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
1268 : void nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr);
1269 : struct spdk_pci_device *nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr);
1270 :
1271 : int nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
1272 : struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle);
1273 :
1274 : int nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
1275 : void nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr);
1276 : void nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1277 : void nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
1278 : struct nvme_ctrlr_detach_ctx *ctx);
1279 : int nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
1280 : struct nvme_ctrlr_detach_ctx *ctx);
1281 : void nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove);
1282 : int nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
1283 : void nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr);
1284 : int nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr);
1285 : void nvme_ctrlr_connected(struct spdk_nvme_probe_ctx *probe_ctx,
1286 : struct spdk_nvme_ctrlr *ctrlr);
1287 :
1288 : int nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
1289 : struct nvme_request *req);
1290 : int nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
1291 : int nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs);
1292 : int nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz);
1293 : int nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap);
1294 : int nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo);
1295 : int nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel);
1296 : int nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value);
1297 : bool nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr);
1298 : void nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1299 : void nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr);
1300 : int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
1301 : struct spdk_nvme_ctrlr *ctrlr,
1302 : enum spdk_nvme_qprio qprio,
1303 : uint32_t num_requests, bool async);
1304 : void nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
1305 : void nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
1306 : int nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1307 : struct nvme_request *req);
1308 : void nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair);
1309 : uint32_t nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
1310 : void nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair);
1311 : void nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);
1312 : int nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);
1313 : void nvme_ns_set_identify_data(struct spdk_nvme_ns *ns);
1314 : void nvme_ns_set_id_desc_list_data(struct spdk_nvme_ns *ns);
1315 : void nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns);
1316 : void nvme_ns_free_nvm_specific_data(struct spdk_nvme_ns *ns);
1317 : void nvme_ns_free_iocs_specific_data(struct spdk_nvme_ns *ns);
1318 : bool nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns);
1319 : int nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
1320 : struct spdk_nvme_ctrlr *ctrlr);
1321 : void nvme_ns_destruct(struct spdk_nvme_ns *ns);
1322 : int nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1323 : void *buffer, void *metadata, uint64_t zslba,
1324 : uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1325 : uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag);
1326 : int nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1327 : uint64_t zslba, uint32_t lba_count,
1328 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1329 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1330 : spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1331 : uint16_t apptag_mask, uint16_t apptag);
1332 :
1333 : int nvme_fabric_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1334 : int nvme_fabric_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1335 : int nvme_fabric_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1336 : int nvme_fabric_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1337 : int nvme_fabric_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1338 : uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1339 : int nvme_fabric_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1340 : uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1341 : int nvme_fabric_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1342 : spdk_nvme_reg_cb cb_fn, void *cb_arg);
1343 : int nvme_fabric_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1344 : spdk_nvme_reg_cb cb_fn, void *cb_arg);
1345 : int nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1346 : int nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
1347 : struct spdk_nvme_probe_ctx *probe_ctx);
1348 : int nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1349 : int nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entries);
1350 : int nvme_fabric_qpair_connect_poll(struct spdk_nvme_qpair *qpair);
1351 : bool nvme_fabric_qpair_auth_required(struct spdk_nvme_qpair *qpair);
1352 : int nvme_fabric_qpair_authenticate_async(struct spdk_nvme_qpair *qpair);
1353 : int nvme_fabric_qpair_authenticate_poll(struct spdk_nvme_qpair *qpair);
1354 :
1355 : typedef int (*spdk_nvme_parse_ana_log_page_cb)(
1356 : const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg);
1357 : int nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
1358 : spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg);
1359 :
1360 : static inline void
1361 223 : nvme_request_clear(struct nvme_request *req)
1362 : {
1363 : /*
1364 : * Only memset/zero fields that need it. All other fields
1365 : * will be initialized appropriately either later in this
1366 : * function, or before they are needed later in the
1367 : * submission patch. For example, the children
1368 : * TAILQ_ENTRY and following members are
1369 : * only used as part of I/O splitting so we avoid
1370 : * memsetting them until it is actually needed.
1371 : * They will be initialized in nvme_request_add_child()
1372 : * if the request is split.
1373 : */
1374 223 : memset(req, 0, offsetof(struct nvme_request, payload_size));
1375 223 : }
1376 :
1377 : #define NVME_INIT_REQUEST(req, _cb_fn, _cb_arg, _payload, _payload_size, _md_size) \
1378 : do { \
1379 : nvme_request_clear(req); \
1380 : req->cb_fn = _cb_fn; \
1381 : req->cb_arg = _cb_arg; \
1382 : req->payload = _payload; \
1383 : req->payload_size = _payload_size; \
1384 : req->md_size = _md_size; \
1385 : req->pid = g_spdk_nvme_pid; \
1386 : req->submit_tick = 0; \
1387 : req->accel_sequence = NULL; \
1388 : } while (0);
1389 :
1390 : static inline struct nvme_request *
1391 234 : nvme_allocate_request(struct spdk_nvme_qpair *qpair,
1392 : const struct nvme_payload *payload, uint32_t payload_size, uint32_t md_size,
1393 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1394 : {
1395 : struct nvme_request *req;
1396 :
1397 234 : req = STAILQ_FIRST(&qpair->free_req);
1398 234 : if (req == NULL) {
1399 14 : return req;
1400 : }
1401 :
1402 220 : STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
1403 220 : qpair->num_outstanding_reqs++;
1404 :
1405 220 : NVME_INIT_REQUEST(req, cb_fn, cb_arg, *payload, payload_size, md_size);
1406 :
1407 220 : return req;
1408 : }
1409 :
1410 : static inline struct nvme_request *
1411 118 : nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
1412 : void *buffer, uint32_t payload_size,
1413 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1414 : {
1415 118 : struct nvme_payload payload;
1416 :
1417 118 : payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
1418 :
1419 118 : return nvme_allocate_request(qpair, &payload, payload_size, 0, cb_fn, cb_arg);
1420 : }
1421 :
1422 : static inline struct nvme_request *
1423 76 : nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1424 : {
1425 76 : return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
1426 : }
1427 :
1428 : struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
1429 : void *buffer, uint32_t payload_size,
1430 : spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
1431 :
1432 : static inline void
1433 159 : _nvme_free_request(struct nvme_request *req, struct spdk_nvme_qpair *qpair)
1434 : {
1435 159 : assert(req != NULL);
1436 159 : assert(req->num_children == 0);
1437 159 : assert(qpair != NULL);
1438 :
1439 : /* The reserved_req does not go in the free_req STAILQ - it is
1440 : * saved only for use with a FABRICS/CONNECT command.
1441 : */
1442 159 : if (spdk_likely(qpair->reserved_req != req)) {
1443 159 : STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
1444 :
1445 159 : assert(qpair->num_outstanding_reqs > 0);
1446 159 : qpair->num_outstanding_reqs--;
1447 : }
1448 159 : }
1449 :
1450 : static inline void
1451 143 : nvme_free_request(struct nvme_request *req)
1452 : {
1453 143 : _nvme_free_request(req, req->qpair);
1454 143 : }
1455 :
1456 : static inline void
1457 16 : nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpair *qpair,
1458 : struct nvme_request *req, struct spdk_nvme_cpl *cpl)
1459 : {
1460 16 : struct spdk_nvme_cpl err_cpl;
1461 : struct nvme_error_cmd *cmd;
1462 :
1463 16 : if (spdk_unlikely(req->accel_sequence != NULL)) {
1464 0 : struct spdk_nvme_poll_group *pg = qpair->poll_group->group;
1465 :
1466 : /* Transports are required to execute the sequence and clear req->accel_sequence.
1467 : * If it's left non-NULL it must mean the request is failed. */
1468 0 : assert(spdk_nvme_cpl_is_error(cpl));
1469 0 : pg->accel_fn_table.abort_sequence(req->accel_sequence);
1470 0 : req->accel_sequence = NULL;
1471 : }
1472 :
1473 : /* error injection at completion path,
1474 : * only inject for successful completed commands
1475 : */
1476 16 : if (spdk_unlikely(!TAILQ_EMPTY(&qpair->err_cmd_head) &&
1477 : !spdk_nvme_cpl_is_error(cpl))) {
1478 2 : TAILQ_FOREACH(cmd, &qpair->err_cmd_head, link) {
1479 :
1480 1 : if (cmd->do_not_submit) {
1481 0 : continue;
1482 : }
1483 :
1484 1 : if ((cmd->opc == req->cmd.opc) && cmd->err_count) {
1485 :
1486 0 : err_cpl = *cpl;
1487 0 : err_cpl.status.sct = cmd->status.sct;
1488 0 : err_cpl.status.sc = cmd->status.sc;
1489 :
1490 0 : cpl = &err_cpl;
1491 0 : cmd->err_count--;
1492 0 : break;
1493 : }
1494 : }
1495 : }
1496 :
1497 : /* For PCIe completions, we want to avoid touching the req itself to avoid
1498 : * dependencies on loading those cachelines. So call the internal helper
1499 : * function instead using the qpair that was passed by the caller, instead
1500 : * of getting it from the req.
1501 : */
1502 16 : _nvme_free_request(req, qpair);
1503 :
1504 16 : if (spdk_likely(cb_fn)) {
1505 15 : cb_fn(cb_arg, cpl);
1506 : }
1507 16 : }
1508 :
1509 : static inline void
1510 6 : nvme_cleanup_user_req(struct nvme_request *req)
1511 : {
1512 6 : if (req->user_buffer && req->payload_size) {
1513 2 : spdk_free(req->payload.contig_or_cb_arg);
1514 2 : req->user_buffer = NULL;
1515 : }
1516 :
1517 6 : req->user_cb_arg = NULL;
1518 6 : req->user_cb_fn = NULL;
1519 6 : }
1520 :
1521 : static inline bool
1522 3 : nvme_request_abort_match(struct nvme_request *req, void *cmd_cb_arg)
1523 : {
1524 4 : return req->cb_arg == cmd_cb_arg ||
1525 4 : req->user_cb_arg == cmd_cb_arg ||
1526 1 : (req->parent != NULL && req->parent->cb_arg == cmd_cb_arg);
1527 : }
1528 :
1529 : static inline void
1530 42 : nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
1531 : {
1532 42 : qpair->state = state;
1533 42 : if (state == NVME_QPAIR_ENABLED) {
1534 24 : qpair->is_new_qpair = false;
1535 : }
1536 42 : }
1537 :
1538 : static inline enum nvme_qpair_state
1539 136 : nvme_qpair_get_state(struct spdk_nvme_qpair *qpair) {
1540 136 : return qpair->state;
1541 : }
1542 :
1543 : static inline void
1544 70 : nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
1545 : {
1546 70 : assert(parent != NULL);
1547 70 : assert(child != NULL);
1548 70 : assert(child->parent == parent);
1549 70 : assert(parent->num_children != 0);
1550 :
1551 70 : parent->num_children--;
1552 70 : child->parent = NULL;
1553 70 : TAILQ_REMOVE(&parent->children, child, child_tailq);
1554 70 : }
1555 :
1556 : static inline void
1557 0 : nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
1558 : {
1559 0 : struct nvme_request *child = child_arg;
1560 0 : struct nvme_request *parent = child->parent;
1561 :
1562 0 : nvme_request_remove_child(parent, child);
1563 :
1564 0 : if (spdk_nvme_cpl_is_error(cpl)) {
1565 0 : memcpy(&parent->parent_status, cpl, sizeof(*cpl));
1566 : }
1567 :
1568 0 : if (parent->num_children == 0) {
1569 0 : nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
1570 : parent, &parent->parent_status);
1571 : }
1572 0 : }
1573 :
1574 : static inline void
1575 57 : nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
1576 : {
1577 57 : assert(parent->num_children != UINT16_MAX);
1578 :
1579 57 : if (parent->num_children == 0) {
1580 : /*
1581 : * Defer initialization of the children TAILQ since it falls
1582 : * on a separate cacheline. This ensures we do not touch this
1583 : * cacheline except on request splitting cases, which are
1584 : * relatively rare.
1585 : */
1586 15 : TAILQ_INIT(&parent->children);
1587 15 : parent->parent = NULL;
1588 15 : memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
1589 : }
1590 :
1591 57 : parent->num_children++;
1592 57 : TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
1593 57 : child->parent = parent;
1594 57 : child->cb_fn = nvme_cb_complete_child;
1595 57 : child->cb_arg = child;
1596 57 : }
1597 :
1598 : static inline void
1599 69 : nvme_request_free_children(struct nvme_request *req)
1600 : {
1601 : struct nvme_request *child, *tmp;
1602 :
1603 69 : if (req->num_children == 0) {
1604 57 : return;
1605 : }
1606 :
1607 : /* free all child nvme_request */
1608 62 : TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
1609 50 : nvme_request_remove_child(req, child);
1610 50 : nvme_request_free_children(child);
1611 50 : nvme_free_request(child);
1612 : }
1613 : }
1614 :
1615 : int nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
1616 : struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick);
1617 : uint64_t nvme_get_quirks(const struct spdk_pci_id *id);
1618 :
1619 : int nvme_robust_mutex_init_shared(pthread_mutex_t *mtx);
1620 : int nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
1621 :
1622 : bool nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
1623 :
1624 : struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
1625 : const struct spdk_nvme_transport_id *trid, const char *hostnqn);
1626 :
1627 : const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);
1628 : const struct spdk_nvme_transport *nvme_get_first_transport(void);
1629 : const struct spdk_nvme_transport *nvme_get_next_transport(const struct spdk_nvme_transport
1630 : *transport);
1631 : void nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr);
1632 :
1633 : /* Transport specific functions */
1634 : struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1635 : const struct spdk_nvme_ctrlr_opts *opts,
1636 : void *devhandle);
1637 : int nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1638 : int nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, bool direct_connect);
1639 : int nvme_transport_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx);
1640 : int nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr);
1641 : int nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr);
1642 : int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
1643 : int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
1644 : int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
1645 : int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
1646 : int nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1647 : uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1648 : int nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1649 : uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
1650 : int nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1651 : spdk_nvme_reg_cb cb_fn, void *cb_arg);
1652 : int nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
1653 : spdk_nvme_reg_cb cb_fn, void *cb_arg);
1654 : uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
1655 : uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
1656 : struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1657 : uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
1658 : int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
1659 : void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1660 : int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
1661 : int nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1662 : int nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr);
1663 : void *nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
1664 : int nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr);
1665 : void nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
1666 : struct spdk_nvme_qpair *qpair);
1667 : int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1668 : struct spdk_nvme_qpair *qpair);
1669 : void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
1670 : struct spdk_nvme_qpair *qpair);
1671 : void nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair);
1672 : int nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
1673 : struct spdk_memory_domain **domains, int array_size);
1674 : void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair);
1675 : int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
1676 : int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
1677 : int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1678 : uint32_t max_completions);
1679 : void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
1680 : int nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1681 : int (*iter_fn)(struct nvme_request *req, void *arg),
1682 : void *arg);
1683 : int nvme_transport_qpair_authenticate(struct spdk_nvme_qpair *qpair);
1684 :
1685 : struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
1686 : const struct spdk_nvme_transport *transport);
1687 : struct spdk_nvme_transport_poll_group *nvme_transport_qpair_get_optimal_poll_group(
1688 : const struct spdk_nvme_transport *transport,
1689 : struct spdk_nvme_qpair *qpair);
1690 : int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
1691 : struct spdk_nvme_qpair *qpair);
1692 : int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
1693 : struct spdk_nvme_qpair *qpair);
1694 : int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
1695 : int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
1696 : int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
1697 : uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
1698 : int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
1699 : int nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
1700 : struct spdk_nvme_transport_poll_group_stat **stats);
1701 : void nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
1702 : struct spdk_nvme_transport_poll_group_stat *stats);
1703 : enum spdk_nvme_transport_type nvme_transport_get_trtype(const struct spdk_nvme_transport
1704 : *transport);
1705 : /*
1706 : * Below ref related functions must be called with the global
1707 : * driver lock held for the multi-process condition.
1708 : * Within these functions, the per ctrlr ctrlr_lock is also
1709 : * acquired for the multi-thread condition.
1710 : */
1711 : void nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr);
1712 : void nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
1713 : int nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
1714 :
1715 : int nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
1716 : int nvme_parse_addr(struct sockaddr_storage *sa, int family,
1717 : const char *addr, const char *service, long int *port);
1718 : int nvme_get_default_hostnqn(char *buf, int len);
1719 :
1720 : static inline bool
1721 5 : _is_page_aligned(uint64_t address, uint64_t page_size)
1722 : {
1723 5 : return (address & (page_size - 1)) == 0;
1724 : }
1725 :
1726 : #endif /* __NVME_INTERNAL_H__ */
|