Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5 : * Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6 : */
7 :
8 : /*
9 : * NVMe transport abstraction
10 : */
11 :
12 : #include "nvme_internal.h"
13 : #include "spdk/queue.h"
14 :
15 : #define SPDK_MAX_NUM_OF_TRANSPORTS 16
16 :
17 : struct spdk_nvme_transport {
18 : struct spdk_nvme_transport_ops ops;
19 : TAILQ_ENTRY(spdk_nvme_transport) link;
20 : };
21 :
22 : TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
23 : TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
24 :
25 : static struct spdk_nvme_transport g_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
26 : static int g_current_transport_index = 0;
27 :
28 : struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {
29 : .rdma_srq_size = 0,
30 : .rdma_max_cq_size = 0,
31 : .rdma_cm_event_timeout_ms = 1000
32 : };
33 :
34 : const struct spdk_nvme_transport *
35 0 : nvme_get_first_transport(void)
36 : {
37 0 : return TAILQ_FIRST(&g_spdk_nvme_transports);
38 : }
39 :
40 : const struct spdk_nvme_transport *
41 0 : nvme_get_next_transport(const struct spdk_nvme_transport *transport)
42 : {
43 0 : return TAILQ_NEXT(transport, link);
44 : }
45 :
46 : /*
47 : * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
48 : * transport object in either the controller struct or the admin qpair. This means
49 : * that a lot of admin related transport calls will have to call nvme_get_transport
50 : * in order to know which functions to call.
51 : * In the I/O path, we have the ability to store the transport struct in the I/O
52 : * qpairs to avoid taking a performance hit.
53 : */
54 : const struct spdk_nvme_transport *
55 4 : nvme_get_transport(const char *transport_name)
56 : {
57 4 : struct spdk_nvme_transport *registered_transport;
58 :
59 4 : TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
60 3 : if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
61 3 : return registered_transport;
62 : }
63 0 : }
64 :
65 1 : return NULL;
66 4 : }
67 :
68 : bool
69 0 : spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
70 : {
71 0 : return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
72 : }
73 :
74 : bool
75 0 : spdk_nvme_transport_available_by_name(const char *transport_name)
76 : {
77 0 : return nvme_get_transport(transport_name) == NULL ? false : true;
78 : }
79 :
80 : void
81 0 : spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
82 : {
83 0 : struct spdk_nvme_transport *new_transport;
84 :
85 0 : if (nvme_get_transport(ops->name)) {
86 0 : SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
87 0 : assert(false);
88 : }
89 :
90 0 : if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
91 0 : SPDK_ERRLOG("Unable to register new NVMe transport.\n");
92 0 : assert(false);
93 : return;
94 : }
95 0 : new_transport = &g_transports[g_current_transport_index++];
96 :
97 0 : new_transport->ops = *ops;
98 0 : TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
99 0 : }
100 :
101 0 : struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
102 : const struct spdk_nvme_ctrlr_opts *opts,
103 : void *devhandle)
104 : {
105 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
106 0 : struct spdk_nvme_ctrlr *ctrlr;
107 :
108 0 : if (transport == NULL) {
109 0 : SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
110 0 : return NULL;
111 : }
112 :
113 0 : ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
114 :
115 0 : return ctrlr;
116 0 : }
117 :
118 : int
119 0 : nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
120 : bool direct_connect)
121 : {
122 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
123 :
124 0 : if (transport == NULL) {
125 0 : SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
126 0 : return -ENOENT;
127 : }
128 :
129 0 : return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
130 0 : }
131 :
132 : int
133 0 : nvme_transport_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx)
134 : {
135 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
136 :
137 0 : if (transport == NULL) {
138 0 : SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
139 0 : return -ENOENT;
140 : }
141 :
142 0 : if (transport->ops.ctrlr_scan_attached != NULL) {
143 0 : return transport->ops.ctrlr_scan_attached(probe_ctx);
144 : }
145 0 : SPDK_ERRLOG("Transport %s does not support ctrlr_scan_attached callback\n",
146 : probe_ctx->trid.trstring);
147 0 : return -ENOTSUP;
148 0 : }
149 :
150 : int
151 0 : nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
152 : {
153 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
154 :
155 0 : assert(transport != NULL);
156 0 : return transport->ops.ctrlr_destruct(ctrlr);
157 0 : }
158 :
159 : int
160 0 : nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
161 : {
162 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
163 :
164 0 : assert(transport != NULL);
165 0 : return transport->ops.ctrlr_enable(ctrlr);
166 0 : }
167 :
168 : int
169 0 : nvme_transport_ctrlr_enable_interrupts(struct spdk_nvme_ctrlr *ctrlr)
170 : {
171 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
172 :
173 0 : assert(transport != NULL);
174 0 : if (transport->ops.ctrlr_enable_interrupts != NULL) {
175 0 : return transport->ops.ctrlr_enable_interrupts(ctrlr);
176 : }
177 :
178 0 : return -ENOTSUP;
179 0 : }
180 :
181 : int
182 0 : nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
183 : {
184 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
185 :
186 0 : assert(transport != NULL);
187 0 : if (transport->ops.ctrlr_ready) {
188 0 : return transport->ops.ctrlr_ready(ctrlr);
189 : }
190 :
191 0 : return 0;
192 0 : }
193 :
194 : int
195 0 : nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
196 : {
197 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
198 :
199 0 : assert(transport != NULL);
200 0 : return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
201 0 : }
202 :
203 : int
204 0 : nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
205 : {
206 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
207 :
208 0 : assert(transport != NULL);
209 0 : return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
210 0 : }
211 :
212 : int
213 0 : nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
214 : {
215 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
216 :
217 0 : assert(transport != NULL);
218 0 : return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
219 0 : }
220 :
221 : int
222 0 : nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
223 : {
224 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
225 :
226 0 : assert(transport != NULL);
227 0 : return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
228 0 : }
229 :
230 : static int
231 0 : nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
232 : spdk_nvme_reg_cb cb_fn, void *cb_ctx)
233 : {
234 0 : struct nvme_register_completion *ctx;
235 :
236 0 : ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
237 0 : if (ctx == NULL) {
238 0 : return -ENOMEM;
239 : }
240 :
241 0 : ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
242 0 : ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
243 0 : ctx->cb_fn = cb_fn;
244 0 : ctx->cb_ctx = cb_ctx;
245 0 : ctx->value = value;
246 0 : ctx->pid = getpid();
247 :
248 0 : nvme_ctrlr_lock(ctrlr);
249 0 : STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
250 0 : nvme_ctrlr_unlock(ctrlr);
251 :
252 0 : return 0;
253 0 : }
254 :
255 : int
256 0 : nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
257 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
258 : {
259 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
260 0 : int rc;
261 :
262 0 : assert(transport != NULL);
263 0 : if (transport->ops.ctrlr_set_reg_4_async == NULL) {
264 0 : rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
265 0 : if (rc != 0) {
266 0 : return rc;
267 : }
268 :
269 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
270 : }
271 :
272 0 : return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
273 0 : }
274 :
275 : int
276 0 : nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
277 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
278 :
279 : {
280 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
281 0 : int rc;
282 :
283 0 : assert(transport != NULL);
284 0 : if (transport->ops.ctrlr_set_reg_8_async == NULL) {
285 0 : rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
286 0 : if (rc != 0) {
287 0 : return rc;
288 : }
289 :
290 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
291 : }
292 :
293 0 : return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
294 0 : }
295 :
296 : int
297 0 : nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
298 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
299 : {
300 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
301 0 : uint32_t value;
302 0 : int rc;
303 :
304 0 : assert(transport != NULL);
305 0 : if (transport->ops.ctrlr_get_reg_4_async == NULL) {
306 0 : rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
307 0 : if (rc != 0) {
308 0 : return rc;
309 : }
310 :
311 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
312 : }
313 :
314 0 : return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
315 0 : }
316 :
317 : int
318 0 : nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
319 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
320 : {
321 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
322 0 : uint64_t value;
323 0 : int rc;
324 :
325 0 : assert(transport != NULL);
326 0 : if (transport->ops.ctrlr_get_reg_8_async == NULL) {
327 0 : rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
328 0 : if (rc != 0) {
329 0 : return rc;
330 : }
331 :
332 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
333 : }
334 :
335 0 : return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
336 0 : }
337 :
338 : uint32_t
339 0 : nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
340 : {
341 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
342 :
343 0 : assert(transport != NULL);
344 0 : return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
345 0 : }
346 :
347 : uint16_t
348 0 : nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
349 : {
350 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
351 :
352 0 : assert(transport != NULL);
353 0 : return transport->ops.ctrlr_get_max_sges(ctrlr);
354 0 : }
355 :
356 : int
357 0 : nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
358 : {
359 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
360 :
361 0 : assert(transport != NULL);
362 0 : if (transport->ops.ctrlr_reserve_cmb != NULL) {
363 0 : return transport->ops.ctrlr_reserve_cmb(ctrlr);
364 : }
365 :
366 0 : return -ENOTSUP;
367 0 : }
368 :
369 : void *
370 0 : nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
371 : {
372 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
373 :
374 0 : assert(transport != NULL);
375 0 : if (transport->ops.ctrlr_map_cmb != NULL) {
376 0 : return transport->ops.ctrlr_map_cmb(ctrlr, size);
377 : }
378 :
379 0 : return NULL;
380 0 : }
381 :
382 : int
383 0 : nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
384 : {
385 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
386 :
387 0 : assert(transport != NULL);
388 0 : if (transport->ops.ctrlr_unmap_cmb != NULL) {
389 0 : return transport->ops.ctrlr_unmap_cmb(ctrlr);
390 : }
391 :
392 0 : return 0;
393 0 : }
394 :
395 : int
396 0 : nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
397 : {
398 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
399 :
400 0 : assert(transport != NULL);
401 0 : if (transport->ops.ctrlr_enable_pmr != NULL) {
402 0 : return transport->ops.ctrlr_enable_pmr(ctrlr);
403 : }
404 :
405 0 : return -ENOSYS;
406 0 : }
407 :
408 : int
409 0 : nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
410 : {
411 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
412 :
413 0 : assert(transport != NULL);
414 0 : if (transport->ops.ctrlr_disable_pmr != NULL) {
415 0 : return transport->ops.ctrlr_disable_pmr(ctrlr);
416 : }
417 :
418 0 : return -ENOSYS;
419 0 : }
420 :
421 : void *
422 0 : nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
423 : {
424 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
425 :
426 0 : assert(transport != NULL);
427 0 : if (transport->ops.ctrlr_map_pmr != NULL) {
428 0 : return transport->ops.ctrlr_map_pmr(ctrlr, size);
429 : }
430 :
431 0 : return NULL;
432 0 : }
433 :
434 : int
435 0 : nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
436 : {
437 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
438 :
439 0 : assert(transport != NULL);
440 0 : if (transport->ops.ctrlr_unmap_pmr != NULL) {
441 0 : return transport->ops.ctrlr_unmap_pmr(ctrlr);
442 : }
443 :
444 0 : return -ENOSYS;
445 0 : }
446 :
447 : struct spdk_nvme_qpair *
448 0 : nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
449 : const struct spdk_nvme_io_qpair_opts *opts)
450 : {
451 0 : struct spdk_nvme_qpair *qpair;
452 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
453 :
454 0 : assert(transport != NULL);
455 0 : qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
456 0 : if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
457 0 : qpair->transport = transport;
458 0 : }
459 :
460 0 : return qpair;
461 0 : }
462 :
463 : void
464 0 : nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
465 : {
466 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
467 0 : int rc;
468 :
469 0 : assert(transport != NULL);
470 :
471 : /* Do not rely on qpair->transport. For multi-process cases, a foreign process may delete
472 : * the IO qpair, in which case the transport object would be invalid (each process has their
473 : * own unique transport objects since they contain function pointers). So we look up the
474 : * transport object in the delete_io_qpair case.
475 : */
476 0 : rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
477 0 : if (rc != 0) {
478 0 : SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n",
479 : transport->ops.name);
480 0 : assert(false);
481 : }
482 0 : }
483 :
484 : static void
485 0 : nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
486 : {
487 0 : struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
488 :
489 : /* If the qpair was unable to reconnect, restore the original failure reason */
490 0 : qpair->transport_failure_reason = qpair->last_transport_failure_reason;
491 0 : nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
492 0 : }
493 :
494 : int
495 0 : nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
496 : {
497 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
498 0 : int rc;
499 :
500 0 : assert(transport != NULL);
501 0 : if (!nvme_qpair_is_admin_queue(qpair) && qpair->transport == NULL) {
502 0 : qpair->transport = transport;
503 0 : }
504 :
505 0 : qpair->last_transport_failure_reason = qpair->transport_failure_reason;
506 0 : qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
507 :
508 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
509 0 : rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
510 0 : if (rc != 0) {
511 0 : goto err;
512 : }
513 :
514 0 : if (qpair->poll_group) {
515 0 : rc = nvme_poll_group_connect_qpair(qpair);
516 0 : if (rc) {
517 0 : goto err;
518 : }
519 0 : }
520 :
521 0 : if (!qpair->async) {
522 : /* Busy wait until the qpair exits the connecting state */
523 0 : while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
524 0 : if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
525 0 : rc = spdk_nvme_poll_group_process_completions(
526 0 : qpair->poll_group->group, 0,
527 : nvme_transport_connect_qpair_fail);
528 0 : } else {
529 0 : rc = spdk_nvme_qpair_process_completions(qpair, 0);
530 : }
531 :
532 0 : if (rc < 0) {
533 0 : goto err;
534 : }
535 : }
536 0 : }
537 :
538 0 : return 0;
539 : err:
540 0 : nvme_transport_connect_qpair_fail(qpair, NULL);
541 0 : if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
542 0 : assert(qpair->async == true);
543 : /* Let the caller to poll the qpair until it is actually disconnected. */
544 0 : return 0;
545 : }
546 :
547 0 : return rc;
548 0 : }
549 :
550 : void
551 0 : nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
552 : {
553 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
554 :
555 0 : if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
556 0 : nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
557 0 : return;
558 : }
559 :
560 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
561 0 : assert(transport != NULL);
562 :
563 0 : if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
564 0 : nvme_poll_group_disconnect_qpair(qpair);
565 0 : }
566 :
567 0 : transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
568 0 : }
569 :
570 : int
571 0 : nvme_transport_qpair_get_fd(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
572 : struct spdk_event_handler_opts *opts)
573 : {
574 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
575 :
576 0 : assert(transport != NULL);
577 0 : if (transport->ops.qpair_get_fd != NULL) {
578 0 : return transport->ops.qpair_get_fd(qpair, opts);
579 : }
580 :
581 0 : return -ENOTSUP;
582 0 : }
583 :
584 : void
585 0 : nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair)
586 : {
587 0 : if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) ||
588 0 : nvme_qpair_is_admin_queue(qpair)) {
589 0 : nvme_qpair_abort_all_queued_reqs(qpair);
590 0 : }
591 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
592 :
593 : /* In interrupt mode qpairs that are added to poll group need an event for the
594 : * disconnected qpairs handling to kick in.
595 : */
596 0 : if (qpair->poll_group) {
597 0 : nvme_poll_group_write_disconnect_qpair_fd(qpair->poll_group->group);
598 0 : }
599 0 : }
600 :
601 : int
602 2 : nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
603 : struct spdk_memory_domain **domains, int array_size)
604 : {
605 2 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
606 :
607 2 : assert(transport != NULL);
608 2 : if (transport->ops.ctrlr_get_memory_domains) {
609 1 : return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size);
610 : }
611 :
612 1 : return 0;
613 2 : }
614 :
615 : void
616 0 : nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
617 : {
618 0 : const struct spdk_nvme_transport *transport;
619 :
620 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
621 0 : qpair->transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
622 0 : } else {
623 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
624 0 : assert(transport != NULL);
625 0 : transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
626 : }
627 0 : }
628 :
629 : int
630 0 : nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
631 : {
632 0 : const struct spdk_nvme_transport *transport;
633 :
634 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
635 0 : return qpair->transport->ops.qpair_reset(qpair);
636 : }
637 :
638 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
639 0 : assert(transport != NULL);
640 0 : return transport->ops.qpair_reset(qpair);
641 0 : }
642 :
643 : int
644 0 : nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
645 : {
646 0 : const struct spdk_nvme_transport *transport;
647 :
648 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
649 0 : return qpair->transport->ops.qpair_submit_request(qpair, req);
650 : }
651 :
652 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
653 0 : assert(transport != NULL);
654 0 : return transport->ops.qpair_submit_request(qpair, req);
655 0 : }
656 :
657 : int32_t
658 0 : nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
659 : {
660 0 : const struct spdk_nvme_transport *transport;
661 :
662 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
663 0 : return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
664 : }
665 :
666 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
667 0 : assert(transport != NULL);
668 0 : return transport->ops.qpair_process_completions(qpair, max_completions);
669 0 : }
670 :
671 : int
672 0 : nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
673 : int (*iter_fn)(struct nvme_request *req, void *arg),
674 : void *arg)
675 : {
676 0 : const struct spdk_nvme_transport *transport;
677 :
678 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
679 0 : return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
680 : }
681 :
682 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
683 0 : assert(transport != NULL);
684 0 : return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
685 0 : }
686 :
687 : int
688 0 : nvme_transport_qpair_authenticate(struct spdk_nvme_qpair *qpair)
689 : {
690 0 : const struct spdk_nvme_transport *transport;
691 :
692 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
693 0 : if (transport->ops.qpair_authenticate == NULL) {
694 0 : return -ENOTSUP;
695 : }
696 :
697 0 : return transport->ops.qpair_authenticate(qpair);
698 0 : }
699 :
700 : void
701 0 : nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
702 : {
703 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
704 :
705 0 : assert(transport != NULL);
706 0 : transport->ops.admin_qpair_abort_aers(qpair);
707 0 : }
708 :
709 : struct spdk_nvme_transport_poll_group *
710 0 : nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
711 : {
712 0 : struct spdk_nvme_transport_poll_group *group = NULL;
713 :
714 0 : group = transport->ops.poll_group_create();
715 0 : if (group) {
716 0 : group->transport = transport;
717 0 : STAILQ_INIT(&group->connected_qpairs);
718 0 : STAILQ_INIT(&group->disconnected_qpairs);
719 0 : group->num_connected_qpairs = 0;
720 0 : }
721 :
722 0 : return group;
723 0 : }
724 :
725 : struct spdk_nvme_transport_poll_group *
726 0 : nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport,
727 : struct spdk_nvme_qpair *qpair)
728 : {
729 0 : if (transport->ops.qpair_get_optimal_poll_group) {
730 0 : return transport->ops.qpair_get_optimal_poll_group(qpair);
731 : } else {
732 0 : return NULL;
733 : }
734 0 : }
735 :
736 : int
737 1 : nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
738 : struct spdk_nvme_qpair *qpair)
739 : {
740 1 : int rc;
741 :
742 1 : rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
743 1 : if (rc == 0) {
744 1 : qpair->poll_group = tgroup;
745 1 : assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
746 1 : qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
747 1 : STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
748 1 : }
749 :
750 2 : return rc;
751 1 : }
752 :
753 : int
754 3 : nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
755 : struct spdk_nvme_qpair *qpair)
756 : {
757 3 : int rc __attribute__((unused));
758 :
759 3 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
760 1 : return -EINVAL;
761 2 : } else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) {
762 1 : return -ENOENT;
763 : }
764 :
765 1 : rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
766 1 : assert(rc == 0);
767 :
768 1 : STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
769 :
770 1 : qpair->poll_group = NULL;
771 1 : qpair->poll_group_tailq_head = NULL;
772 :
773 1 : return 0;
774 3 : }
775 :
776 : int64_t
777 0 : nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
778 : uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
779 : {
780 0 : return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
781 0 : disconnected_qpair_cb);
782 : }
783 :
784 : void
785 0 : nvme_transport_poll_group_check_disconnected_qpairs(struct spdk_nvme_transport_poll_group *tgroup,
786 : spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
787 : {
788 0 : return tgroup->transport->ops.poll_group_check_disconnected_qpairs(tgroup,
789 0 : disconnected_qpair_cb);
790 : }
791 :
792 : int
793 0 : nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
794 : {
795 0 : return tgroup->transport->ops.poll_group_destroy(tgroup);
796 : }
797 :
798 : int
799 3 : nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
800 : {
801 3 : struct spdk_nvme_transport_poll_group *tgroup;
802 3 : int rc __attribute__((unused));
803 :
804 3 : tgroup = qpair->poll_group;
805 :
806 3 : if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
807 1 : return 0;
808 : }
809 :
810 2 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
811 1 : rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
812 1 : assert(rc == 0);
813 :
814 1 : qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
815 1 : STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
816 1 : assert(tgroup->num_connected_qpairs > 0);
817 1 : tgroup->num_connected_qpairs--;
818 1 : STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
819 :
820 1 : return 0;
821 : }
822 :
823 1 : return -EINVAL;
824 3 : }
825 :
826 : int
827 3 : nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
828 : {
829 3 : struct spdk_nvme_transport_poll_group *tgroup;
830 3 : int rc;
831 :
832 3 : tgroup = qpair->poll_group;
833 :
834 3 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
835 1 : return 0;
836 : }
837 :
838 2 : if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
839 1 : rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
840 1 : if (rc == 0) {
841 1 : qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
842 1 : STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
843 1 : STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
844 1 : tgroup->num_connected_qpairs++;
845 1 : }
846 :
847 1 : return rc == -EINPROGRESS ? 0 : rc;
848 : }
849 :
850 :
851 1 : return -EINVAL;
852 3 : }
853 :
854 : int
855 0 : nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
856 : struct spdk_nvme_transport_poll_group_stat **stats)
857 : {
858 0 : if (tgroup->transport->ops.poll_group_get_stats) {
859 0 : return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
860 : }
861 0 : return -ENOTSUP;
862 0 : }
863 :
864 : void
865 0 : nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
866 : struct spdk_nvme_transport_poll_group_stat *stats)
867 : {
868 0 : if (tgroup->transport->ops.poll_group_free_stats) {
869 0 : tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
870 0 : }
871 0 : }
872 :
873 : spdk_nvme_transport_type_t
874 0 : nvme_transport_get_trtype(const struct spdk_nvme_transport *transport)
875 : {
876 0 : return transport->ops.type;
877 : }
878 :
879 : void
880 0 : spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size)
881 : {
882 0 : if (opts == NULL) {
883 0 : SPDK_ERRLOG("opts should not be NULL.\n");
884 0 : return;
885 : }
886 :
887 0 : if (opts_size == 0) {
888 0 : SPDK_ERRLOG("opts_size should not be zero.\n");
889 0 : return;
890 : }
891 :
892 0 : opts->opts_size = opts_size;
893 :
894 : #define SET_FIELD(field) \
895 : if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
896 : opts->field = g_spdk_nvme_transport_opts.field; \
897 : } \
898 :
899 0 : SET_FIELD(rdma_srq_size);
900 0 : SET_FIELD(rdma_max_cq_size);
901 0 : SET_FIELD(rdma_cm_event_timeout_ms);
902 :
903 : /* Do not remove this statement, you should always update this statement when you adding a new field,
904 : * and do not forget to add the SET_FIELD statement for your added field. */
905 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 24, "Incorrect size");
906 :
907 : #undef SET_FIELD
908 0 : }
909 :
910 : int
911 0 : spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size)
912 : {
913 0 : if (opts == NULL) {
914 0 : SPDK_ERRLOG("opts should not be NULL.\n");
915 0 : return -EINVAL;
916 : }
917 :
918 0 : if (opts_size == 0) {
919 0 : SPDK_ERRLOG("opts_size should not be zero.\n");
920 0 : return -EINVAL;
921 : }
922 :
923 : #define SET_FIELD(field) \
924 : if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
925 : g_spdk_nvme_transport_opts.field = opts->field; \
926 : } \
927 :
928 0 : SET_FIELD(rdma_srq_size);
929 0 : SET_FIELD(rdma_max_cq_size);
930 0 : SET_FIELD(rdma_cm_event_timeout_ms);
931 :
932 0 : g_spdk_nvme_transport_opts.opts_size = opts->opts_size;
933 :
934 : #undef SET_FIELD
935 :
936 0 : return 0;
937 0 : }
938 :
939 : volatile struct spdk_nvme_registers *
940 0 : spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
941 : {
942 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
943 :
944 0 : if (transport == NULL) {
945 : /* Transport does not exist. */
946 0 : return NULL;
947 : }
948 :
949 0 : if (transport->ops.ctrlr_get_registers) {
950 0 : return transport->ops.ctrlr_get_registers(ctrlr);
951 : }
952 :
953 0 : return NULL;
954 0 : }
|