Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5 : * Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6 : */
7 :
8 : /*
9 : * NVMe transport abstraction
10 : */
11 :
12 : #include "nvme_internal.h"
13 : #include "spdk/queue.h"
14 :
15 : #define SPDK_MAX_NUM_OF_TRANSPORTS 16
16 :
17 : struct spdk_nvme_transport {
18 : struct spdk_nvme_transport_ops ops;
19 : TAILQ_ENTRY(spdk_nvme_transport) link;
20 : };
21 :
22 : TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
23 : TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
24 :
25 : static struct spdk_nvme_transport g_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
26 : static int g_current_transport_index = 0;
27 :
28 : struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {
29 : .rdma_srq_size = 0,
30 : .rdma_max_cq_size = 0,
31 : .rdma_cm_event_timeout_ms = 1000,
32 : .rdma_umr_per_io = false,
33 : };
34 :
35 : const struct spdk_nvme_transport *
36 0 : nvme_get_first_transport(void)
37 : {
38 0 : return TAILQ_FIRST(&g_spdk_nvme_transports);
39 : }
40 :
41 : const struct spdk_nvme_transport *
42 0 : nvme_get_next_transport(const struct spdk_nvme_transport *transport)
43 : {
44 0 : return TAILQ_NEXT(transport, link);
45 : }
46 :
47 : /*
48 : * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
49 : * transport object in either the controller struct or the admin qpair. This means
50 : * that a lot of admin related transport calls will have to call nvme_get_transport
51 : * in order to know which functions to call.
52 : * In the I/O path, we have the ability to store the transport struct in the I/O
53 : * qpairs to avoid taking a performance hit.
54 : */
55 : const struct spdk_nvme_transport *
56 4 : nvme_get_transport(const char *transport_name)
57 : {
58 : struct spdk_nvme_transport *registered_transport;
59 :
60 4 : TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
61 3 : if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
62 3 : return registered_transport;
63 : }
64 : }
65 :
66 1 : return NULL;
67 : }
68 :
69 : bool
70 0 : spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
71 : {
72 0 : return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
73 : }
74 :
75 : bool
76 0 : spdk_nvme_transport_available_by_name(const char *transport_name)
77 : {
78 0 : return nvme_get_transport(transport_name) == NULL ? false : true;
79 : }
80 :
81 : void
82 0 : spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
83 : {
84 : struct spdk_nvme_transport *new_transport;
85 :
86 0 : if (nvme_get_transport(ops->name)) {
87 0 : SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
88 0 : assert(false);
89 : }
90 :
91 0 : if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
92 0 : SPDK_ERRLOG("Unable to register new NVMe transport.\n");
93 0 : assert(false);
94 : return;
95 : }
96 0 : new_transport = &g_transports[g_current_transport_index++];
97 :
98 0 : new_transport->ops = *ops;
99 0 : TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
100 : }
101 :
102 0 : struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
103 : const struct spdk_nvme_ctrlr_opts *opts,
104 : void *devhandle)
105 : {
106 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
107 : struct spdk_nvme_ctrlr *ctrlr;
108 :
109 0 : if (transport == NULL) {
110 0 : SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
111 0 : return NULL;
112 : }
113 :
114 0 : ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
115 :
116 0 : return ctrlr;
117 : }
118 :
119 : int
120 0 : nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
121 : bool direct_connect)
122 : {
123 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
124 :
125 0 : if (transport == NULL) {
126 0 : SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
127 0 : return -ENOENT;
128 : }
129 :
130 0 : return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
131 : }
132 :
133 : int
134 0 : nvme_transport_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx)
135 : {
136 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
137 :
138 0 : if (transport == NULL) {
139 0 : SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
140 0 : return -ENOENT;
141 : }
142 :
143 0 : if (transport->ops.ctrlr_scan_attached != NULL) {
144 0 : return transport->ops.ctrlr_scan_attached(probe_ctx);
145 : }
146 0 : SPDK_ERRLOG("Transport %s does not support ctrlr_scan_attached callback\n",
147 : probe_ctx->trid.trstring);
148 0 : return -ENOTSUP;
149 : }
150 :
151 : int
152 0 : nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
153 : {
154 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
155 :
156 0 : assert(transport != NULL);
157 0 : return transport->ops.ctrlr_destruct(ctrlr);
158 : }
159 :
160 : int
161 0 : nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
162 : {
163 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
164 :
165 0 : assert(transport != NULL);
166 0 : return transport->ops.ctrlr_enable(ctrlr);
167 : }
168 :
169 : int
170 0 : nvme_transport_ctrlr_enable_interrupts(struct spdk_nvme_ctrlr *ctrlr)
171 : {
172 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
173 :
174 0 : assert(transport != NULL);
175 0 : if (transport->ops.ctrlr_enable_interrupts != NULL) {
176 0 : return transport->ops.ctrlr_enable_interrupts(ctrlr);
177 : }
178 :
179 0 : return -ENOTSUP;
180 : }
181 :
182 : int
183 0 : nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
184 : {
185 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
186 :
187 0 : assert(transport != NULL);
188 0 : if (transport->ops.ctrlr_ready) {
189 0 : return transport->ops.ctrlr_ready(ctrlr);
190 : }
191 :
192 0 : return 0;
193 : }
194 :
195 : int
196 0 : nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
197 : {
198 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
199 :
200 0 : assert(transport != NULL);
201 0 : return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
202 : }
203 :
204 : int
205 0 : nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
206 : {
207 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
208 :
209 0 : assert(transport != NULL);
210 0 : return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
211 : }
212 :
213 : int
214 0 : nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
215 : {
216 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
217 :
218 0 : assert(transport != NULL);
219 0 : return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
220 : }
221 :
222 : int
223 0 : nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
224 : {
225 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
226 :
227 0 : assert(transport != NULL);
228 0 : return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
229 : }
230 :
231 : static int
232 0 : nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
233 : spdk_nvme_reg_cb cb_fn, void *cb_ctx)
234 : {
235 : struct nvme_register_completion *ctx;
236 :
237 0 : ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
238 0 : if (ctx == NULL) {
239 0 : return -ENOMEM;
240 : }
241 :
242 0 : ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
243 0 : ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
244 0 : ctx->cb_fn = cb_fn;
245 0 : ctx->cb_ctx = cb_ctx;
246 0 : ctx->value = value;
247 0 : ctx->pid = getpid();
248 :
249 0 : nvme_ctrlr_lock(ctrlr);
250 0 : STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
251 0 : nvme_ctrlr_unlock(ctrlr);
252 :
253 0 : return 0;
254 : }
255 :
256 : int
257 0 : nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
258 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
259 : {
260 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
261 : int rc;
262 :
263 0 : assert(transport != NULL);
264 0 : if (transport->ops.ctrlr_set_reg_4_async == NULL) {
265 0 : rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
266 0 : if (rc != 0) {
267 0 : return rc;
268 : }
269 :
270 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
271 : }
272 :
273 0 : return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
274 : }
275 :
276 : int
277 0 : nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
278 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
279 :
280 : {
281 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
282 : int rc;
283 :
284 0 : assert(transport != NULL);
285 0 : if (transport->ops.ctrlr_set_reg_8_async == NULL) {
286 0 : rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
287 0 : if (rc != 0) {
288 0 : return rc;
289 : }
290 :
291 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
292 : }
293 :
294 0 : return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
295 : }
296 :
297 : int
298 0 : nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
299 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
300 : {
301 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
302 0 : uint32_t value;
303 : int rc;
304 :
305 0 : assert(transport != NULL);
306 0 : if (transport->ops.ctrlr_get_reg_4_async == NULL) {
307 0 : rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
308 0 : if (rc != 0) {
309 0 : return rc;
310 : }
311 :
312 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
313 : }
314 :
315 0 : return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
316 : }
317 :
318 : int
319 0 : nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
320 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
321 : {
322 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
323 0 : uint64_t value;
324 : int rc;
325 :
326 0 : assert(transport != NULL);
327 0 : if (transport->ops.ctrlr_get_reg_8_async == NULL) {
328 0 : rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
329 0 : if (rc != 0) {
330 0 : return rc;
331 : }
332 :
333 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
334 : }
335 :
336 0 : return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
337 : }
338 :
339 : uint32_t
340 0 : nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
341 : {
342 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
343 :
344 0 : assert(transport != NULL);
345 0 : return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
346 : }
347 :
348 : uint16_t
349 0 : nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
350 : {
351 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
352 :
353 0 : assert(transport != NULL);
354 0 : return transport->ops.ctrlr_get_max_sges(ctrlr);
355 : }
356 :
357 : int
358 0 : nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
359 : {
360 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
361 :
362 0 : assert(transport != NULL);
363 0 : if (transport->ops.ctrlr_reserve_cmb != NULL) {
364 0 : return transport->ops.ctrlr_reserve_cmb(ctrlr);
365 : }
366 :
367 0 : return -ENOTSUP;
368 : }
369 :
370 : void *
371 0 : nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
372 : {
373 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
374 :
375 0 : assert(transport != NULL);
376 0 : if (transport->ops.ctrlr_map_cmb != NULL) {
377 0 : return transport->ops.ctrlr_map_cmb(ctrlr, size);
378 : }
379 :
380 0 : return NULL;
381 : }
382 :
383 : int
384 0 : nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
385 : {
386 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
387 :
388 0 : assert(transport != NULL);
389 0 : if (transport->ops.ctrlr_unmap_cmb != NULL) {
390 0 : return transport->ops.ctrlr_unmap_cmb(ctrlr);
391 : }
392 :
393 0 : return 0;
394 : }
395 :
396 : int
397 0 : nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
398 : {
399 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
400 :
401 0 : assert(transport != NULL);
402 0 : if (transport->ops.ctrlr_enable_pmr != NULL) {
403 0 : return transport->ops.ctrlr_enable_pmr(ctrlr);
404 : }
405 :
406 0 : return -ENOSYS;
407 : }
408 :
409 : int
410 0 : nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
411 : {
412 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
413 :
414 0 : assert(transport != NULL);
415 0 : if (transport->ops.ctrlr_disable_pmr != NULL) {
416 0 : return transport->ops.ctrlr_disable_pmr(ctrlr);
417 : }
418 :
419 0 : return -ENOSYS;
420 : }
421 :
422 : void *
423 0 : nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
424 : {
425 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
426 :
427 0 : assert(transport != NULL);
428 0 : if (transport->ops.ctrlr_map_pmr != NULL) {
429 0 : return transport->ops.ctrlr_map_pmr(ctrlr, size);
430 : }
431 :
432 0 : return NULL;
433 : }
434 :
435 : int
436 0 : nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
437 : {
438 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
439 :
440 0 : assert(transport != NULL);
441 0 : if (transport->ops.ctrlr_unmap_pmr != NULL) {
442 0 : return transport->ops.ctrlr_unmap_pmr(ctrlr);
443 : }
444 :
445 0 : return -ENOSYS;
446 : }
447 :
448 : struct spdk_nvme_qpair *
449 0 : nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
450 : const struct spdk_nvme_io_qpair_opts *opts)
451 : {
452 : struct spdk_nvme_qpair *qpair;
453 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
454 :
455 0 : assert(transport != NULL);
456 0 : qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
457 0 : if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
458 0 : qpair->transport = transport;
459 : }
460 :
461 0 : return qpair;
462 : }
463 :
464 : void
465 0 : nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
466 : {
467 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
468 : int rc;
469 :
470 0 : assert(transport != NULL);
471 :
472 : /* Do not rely on qpair->transport. For multi-process cases, a foreign process may delete
473 : * the IO qpair, in which case the transport object would be invalid (each process has their
474 : * own unique transport objects since they contain function pointers). So we look up the
475 : * transport object in the delete_io_qpair case.
476 : */
477 0 : rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
478 0 : if (rc != 0) {
479 0 : SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n",
480 : transport->ops.name);
481 0 : assert(false);
482 : }
483 0 : }
484 :
485 : static void
486 0 : nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
487 : {
488 0 : struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
489 :
490 : /* If the qpair was unable to reconnect, restore the original failure reason */
491 0 : qpair->transport_failure_reason = qpair->last_transport_failure_reason;
492 0 : nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
493 0 : }
494 :
495 : int
496 0 : nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
497 : {
498 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
499 : int rc;
500 :
501 0 : assert(transport != NULL);
502 0 : if (!nvme_qpair_is_admin_queue(qpair) && qpair->transport == NULL) {
503 0 : qpair->transport = transport;
504 : }
505 :
506 0 : qpair->last_transport_failure_reason = qpair->transport_failure_reason;
507 0 : qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
508 :
509 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
510 0 : rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
511 0 : if (rc != 0) {
512 0 : goto err;
513 : }
514 :
515 0 : if (qpair->poll_group) {
516 0 : rc = nvme_poll_group_connect_qpair(qpair);
517 0 : if (rc) {
518 0 : goto err;
519 : }
520 : }
521 :
522 0 : if (!qpair->async) {
523 : /* Busy wait until the qpair exits the connecting state */
524 0 : while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
525 0 : if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
526 0 : rc = spdk_nvme_poll_group_process_completions(
527 0 : qpair->poll_group->group, 0,
528 : nvme_transport_connect_qpair_fail);
529 : } else {
530 0 : rc = spdk_nvme_qpair_process_completions(qpair, 0);
531 : }
532 :
533 0 : if (rc < 0) {
534 0 : goto err;
535 : }
536 : }
537 : }
538 :
539 0 : return 0;
540 0 : err:
541 0 : nvme_transport_connect_qpair_fail(qpair, NULL);
542 0 : if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
543 0 : assert(qpair->async == true);
544 : /* Let the caller to poll the qpair until it is actually disconnected. */
545 0 : return 0;
546 : }
547 :
548 0 : return rc;
549 : }
550 :
551 : void
552 0 : nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
553 : {
554 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
555 :
556 0 : if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
557 0 : nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
558 0 : return;
559 : }
560 :
561 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
562 0 : assert(transport != NULL);
563 :
564 0 : if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
565 0 : nvme_poll_group_disconnect_qpair(qpair);
566 : }
567 :
568 0 : transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
569 : }
570 :
571 : int
572 0 : nvme_transport_qpair_get_fd(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
573 : struct spdk_event_handler_opts *opts)
574 : {
575 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
576 :
577 0 : assert(transport != NULL);
578 0 : if (transport->ops.qpair_get_fd != NULL) {
579 0 : return transport->ops.qpair_get_fd(qpair, opts);
580 : }
581 :
582 0 : return -ENOTSUP;
583 : }
584 :
585 : void
586 0 : nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair)
587 : {
588 0 : if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) ||
589 0 : nvme_qpair_is_admin_queue(qpair)) {
590 0 : nvme_qpair_abort_all_queued_reqs(qpair);
591 : }
592 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
593 :
594 : /* In interrupt mode qpairs that are added to poll group need an event for the
595 : * disconnected qpairs handling to kick in.
596 : */
597 0 : if (qpair->poll_group) {
598 0 : nvme_poll_group_write_disconnect_qpair_fd(qpair->poll_group->group);
599 : }
600 0 : }
601 :
602 : int
603 2 : nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
604 : struct spdk_memory_domain **domains, int array_size)
605 : {
606 2 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
607 :
608 2 : assert(transport != NULL);
609 2 : if (transport->ops.ctrlr_get_memory_domains) {
610 1 : return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size);
611 : }
612 :
613 1 : return 0;
614 : }
615 :
616 : void
617 0 : nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
618 : {
619 : const struct spdk_nvme_transport *transport;
620 :
621 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
622 0 : qpair->transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
623 : } else {
624 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
625 0 : assert(transport != NULL);
626 0 : transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
627 : }
628 0 : }
629 :
630 : int
631 0 : nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
632 : {
633 : const struct spdk_nvme_transport *transport;
634 :
635 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
636 0 : return qpair->transport->ops.qpair_reset(qpair);
637 : }
638 :
639 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
640 0 : assert(transport != NULL);
641 0 : return transport->ops.qpair_reset(qpair);
642 : }
643 :
644 : int
645 0 : nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
646 : {
647 : const struct spdk_nvme_transport *transport;
648 :
649 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
650 0 : return qpair->transport->ops.qpair_submit_request(qpair, req);
651 : }
652 :
653 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
654 0 : assert(transport != NULL);
655 0 : return transport->ops.qpair_submit_request(qpair, req);
656 : }
657 :
658 : int32_t
659 0 : nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
660 : {
661 : const struct spdk_nvme_transport *transport;
662 :
663 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
664 0 : return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
665 : }
666 :
667 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
668 0 : assert(transport != NULL);
669 0 : return transport->ops.qpair_process_completions(qpair, max_completions);
670 : }
671 :
672 : int
673 0 : nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
674 : int (*iter_fn)(struct nvme_request *req, void *arg),
675 : void *arg)
676 : {
677 : const struct spdk_nvme_transport *transport;
678 :
679 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
680 0 : return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
681 : }
682 :
683 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
684 0 : assert(transport != NULL);
685 0 : return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
686 : }
687 :
688 : int
689 0 : nvme_transport_qpair_authenticate(struct spdk_nvme_qpair *qpair)
690 : {
691 : const struct spdk_nvme_transport *transport;
692 :
693 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
694 0 : if (transport->ops.qpair_authenticate == NULL) {
695 0 : return -ENOTSUP;
696 : }
697 :
698 0 : return transport->ops.qpair_authenticate(qpair);
699 : }
700 :
701 : void
702 0 : nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
703 : {
704 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
705 :
706 0 : assert(transport != NULL);
707 0 : transport->ops.admin_qpair_abort_aers(qpair);
708 0 : }
709 :
710 : struct spdk_nvme_transport_poll_group *
711 0 : nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
712 : {
713 0 : struct spdk_nvme_transport_poll_group *group = NULL;
714 :
715 0 : group = transport->ops.poll_group_create();
716 0 : if (group) {
717 0 : group->transport = transport;
718 0 : STAILQ_INIT(&group->connected_qpairs);
719 0 : STAILQ_INIT(&group->disconnected_qpairs);
720 0 : group->num_connected_qpairs = 0;
721 : }
722 :
723 0 : return group;
724 : }
725 :
726 : struct spdk_nvme_transport_poll_group *
727 0 : nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport,
728 : struct spdk_nvme_qpair *qpair)
729 : {
730 0 : if (transport->ops.qpair_get_optimal_poll_group) {
731 0 : return transport->ops.qpair_get_optimal_poll_group(qpair);
732 : } else {
733 0 : return NULL;
734 : }
735 : }
736 :
737 : int
738 1 : nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
739 : struct spdk_nvme_qpair *qpair)
740 : {
741 : int rc;
742 :
743 1 : rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
744 1 : if (rc == 0) {
745 1 : qpair->poll_group = tgroup;
746 1 : assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
747 1 : qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
748 1 : STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
749 : }
750 :
751 1 : return rc;
752 : }
753 :
754 : int
755 3 : nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
756 : struct spdk_nvme_qpair *qpair)
757 : {
758 : int rc __attribute__((unused));
759 :
760 3 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
761 1 : return -EINVAL;
762 2 : } else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) {
763 1 : return -ENOENT;
764 : }
765 :
766 1 : rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
767 1 : assert(rc == 0);
768 :
769 1 : STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
770 :
771 1 : qpair->poll_group = NULL;
772 1 : qpair->poll_group_tailq_head = NULL;
773 :
774 1 : return 0;
775 : }
776 :
777 : int64_t
778 0 : nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
779 : uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
780 : {
781 0 : return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
782 : disconnected_qpair_cb);
783 : }
784 :
785 : void
786 0 : nvme_transport_poll_group_check_disconnected_qpairs(struct spdk_nvme_transport_poll_group *tgroup,
787 : spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
788 : {
789 0 : return tgroup->transport->ops.poll_group_check_disconnected_qpairs(tgroup,
790 : disconnected_qpair_cb);
791 : }
792 :
793 : int
794 0 : nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
795 : {
796 0 : return tgroup->transport->ops.poll_group_destroy(tgroup);
797 : }
798 :
799 : int
800 3 : nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
801 : {
802 : struct spdk_nvme_transport_poll_group *tgroup;
803 : int rc __attribute__((unused));
804 :
805 3 : tgroup = qpair->poll_group;
806 :
807 3 : if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
808 1 : return 0;
809 : }
810 :
811 2 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
812 1 : rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
813 1 : assert(rc == 0);
814 :
815 1 : qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
816 1 : STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
817 1 : assert(tgroup->num_connected_qpairs > 0);
818 1 : tgroup->num_connected_qpairs--;
819 1 : STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
820 :
821 1 : return 0;
822 : }
823 :
824 1 : return -EINVAL;
825 : }
826 :
827 : int
828 3 : nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
829 : {
830 : struct spdk_nvme_transport_poll_group *tgroup;
831 : int rc;
832 :
833 3 : tgroup = qpair->poll_group;
834 :
835 3 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
836 1 : return 0;
837 : }
838 :
839 2 : if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
840 1 : rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
841 1 : if (rc == 0) {
842 1 : qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
843 1 : STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
844 1 : STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
845 1 : tgroup->num_connected_qpairs++;
846 : }
847 :
848 1 : return rc == -EINPROGRESS ? 0 : rc;
849 : }
850 :
851 :
852 1 : return -EINVAL;
853 : }
854 :
855 : int
856 0 : nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
857 : struct spdk_nvme_transport_poll_group_stat **stats)
858 : {
859 0 : if (tgroup->transport->ops.poll_group_get_stats) {
860 0 : return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
861 : }
862 0 : return -ENOTSUP;
863 : }
864 :
865 : void
866 0 : nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
867 : struct spdk_nvme_transport_poll_group_stat *stats)
868 : {
869 0 : if (tgroup->transport->ops.poll_group_free_stats) {
870 0 : tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
871 : }
872 0 : }
873 :
874 : spdk_nvme_transport_type_t
875 0 : nvme_transport_get_trtype(const struct spdk_nvme_transport *transport)
876 : {
877 0 : return transport->ops.type;
878 : }
879 :
880 : void
881 0 : spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size)
882 : {
883 0 : if (opts == NULL) {
884 0 : SPDK_ERRLOG("opts should not be NULL.\n");
885 0 : return;
886 : }
887 :
888 0 : if (opts_size == 0) {
889 0 : SPDK_ERRLOG("opts_size should not be zero.\n");
890 0 : return;
891 : }
892 :
893 0 : opts->opts_size = opts_size;
894 :
895 : #define SET_FIELD(field) \
896 : if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
897 : opts->field = g_spdk_nvme_transport_opts.field; \
898 : } \
899 :
900 0 : SET_FIELD(rdma_srq_size);
901 0 : SET_FIELD(rdma_max_cq_size);
902 0 : SET_FIELD(rdma_cm_event_timeout_ms);
903 0 : SET_FIELD(rdma_umr_per_io);
904 :
905 : /* Do not remove this statement, you should always update this statement when you adding a new field,
906 : * and do not forget to add the SET_FIELD statement for your added field. */
907 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 24, "Incorrect size");
908 :
909 : #undef SET_FIELD
910 : }
911 :
912 : int
913 0 : spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size)
914 : {
915 0 : if (opts == NULL) {
916 0 : SPDK_ERRLOG("opts should not be NULL.\n");
917 0 : return -EINVAL;
918 : }
919 :
920 0 : if (opts_size == 0) {
921 0 : SPDK_ERRLOG("opts_size should not be zero.\n");
922 0 : return -EINVAL;
923 : }
924 :
925 : #define SET_FIELD(field) \
926 : if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
927 : g_spdk_nvme_transport_opts.field = opts->field; \
928 : } \
929 :
930 0 : SET_FIELD(rdma_srq_size);
931 0 : SET_FIELD(rdma_max_cq_size);
932 0 : SET_FIELD(rdma_cm_event_timeout_ms);
933 0 : SET_FIELD(rdma_umr_per_io);
934 :
935 0 : g_spdk_nvme_transport_opts.opts_size = opts->opts_size;
936 :
937 : #undef SET_FIELD
938 :
939 0 : return 0;
940 : }
941 :
942 : volatile struct spdk_nvme_registers *
943 0 : spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
944 : {
945 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
946 :
947 0 : if (transport == NULL) {
948 : /* Transport does not exist. */
949 0 : return NULL;
950 : }
951 :
952 0 : if (transport->ops.ctrlr_get_registers) {
953 0 : return transport->ops.ctrlr_get_registers(ctrlr);
954 : }
955 :
956 0 : return NULL;
957 : }
|