Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5 : * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6 : */
7 :
8 : /*
9 : * NVMe transport abstraction
10 : */
11 :
12 : #include "nvme_internal.h"
13 : #include "spdk/queue.h"
14 :
15 : #define SPDK_MAX_NUM_OF_TRANSPORTS 16
16 :
17 : struct spdk_nvme_transport {
18 : struct spdk_nvme_transport_ops ops;
19 : TAILQ_ENTRY(spdk_nvme_transport) link;
20 : };
21 :
22 : TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
23 : TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
24 :
25 : struct spdk_nvme_transport g_spdk_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
26 : int g_current_transport_index = 0;
27 :
28 : struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {
29 : .rdma_srq_size = 0,
30 : .rdma_max_cq_size = 0,
31 : };
32 :
33 : const struct spdk_nvme_transport *
34 0 : nvme_get_first_transport(void)
35 : {
36 0 : return TAILQ_FIRST(&g_spdk_nvme_transports);
37 : }
38 :
39 : const struct spdk_nvme_transport *
40 0 : nvme_get_next_transport(const struct spdk_nvme_transport *transport)
41 : {
42 0 : return TAILQ_NEXT(transport, link);
43 : }
44 :
45 : /*
46 : * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
47 : * transport object in either the controller struct or the admin qpair. This means
48 : * that a lot of admin related transport calls will have to call nvme_get_transport
49 : * in order to know which functions to call.
50 : * In the I/O path, we have the ability to store the transport struct in the I/O
51 : * qpairs to avoid taking a performance hit.
52 : */
53 : const struct spdk_nvme_transport *
54 4 : nvme_get_transport(const char *transport_name)
55 : {
56 : struct spdk_nvme_transport *registered_transport;
57 :
58 4 : TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
59 3 : if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
60 3 : return registered_transport;
61 : }
62 : }
63 :
64 1 : return NULL;
65 : }
66 :
67 : bool
68 0 : spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
69 : {
70 0 : return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
71 : }
72 :
73 : bool
74 0 : spdk_nvme_transport_available_by_name(const char *transport_name)
75 : {
76 0 : return nvme_get_transport(transport_name) == NULL ? false : true;
77 : }
78 :
79 : void
80 0 : spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
81 : {
82 : struct spdk_nvme_transport *new_transport;
83 :
84 0 : if (nvme_get_transport(ops->name)) {
85 0 : SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
86 0 : assert(false);
87 : }
88 :
89 0 : if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
90 0 : SPDK_ERRLOG("Unable to register new NVMe transport.\n");
91 0 : assert(false);
92 : return;
93 : }
94 0 : new_transport = &g_spdk_transports[g_current_transport_index++];
95 :
96 0 : new_transport->ops = *ops;
97 0 : TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
98 : }
99 :
100 0 : struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
101 : const struct spdk_nvme_ctrlr_opts *opts,
102 : void *devhandle)
103 : {
104 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
105 : struct spdk_nvme_ctrlr *ctrlr;
106 :
107 0 : if (transport == NULL) {
108 0 : SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
109 0 : return NULL;
110 : }
111 :
112 0 : ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
113 :
114 0 : return ctrlr;
115 : }
116 :
117 : int
118 0 : nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
119 : bool direct_connect)
120 : {
121 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
122 :
123 0 : if (transport == NULL) {
124 0 : SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
125 0 : return -ENOENT;
126 : }
127 :
128 0 : return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
129 : }
130 :
131 : int
132 0 : nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
133 : {
134 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
135 :
136 0 : assert(transport != NULL);
137 0 : return transport->ops.ctrlr_destruct(ctrlr);
138 : }
139 :
140 : int
141 0 : nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
142 : {
143 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
144 :
145 0 : assert(transport != NULL);
146 0 : return transport->ops.ctrlr_enable(ctrlr);
147 : }
148 :
149 : int
150 0 : nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
151 : {
152 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
153 :
154 0 : assert(transport != NULL);
155 0 : if (transport->ops.ctrlr_ready) {
156 0 : return transport->ops.ctrlr_ready(ctrlr);
157 : }
158 :
159 0 : return 0;
160 : }
161 :
162 : int
163 0 : nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
164 : {
165 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
166 :
167 0 : assert(transport != NULL);
168 0 : return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
169 : }
170 :
171 : int
172 0 : nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
173 : {
174 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
175 :
176 0 : assert(transport != NULL);
177 0 : return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
178 : }
179 :
180 : int
181 0 : nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
182 : {
183 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
184 :
185 0 : assert(transport != NULL);
186 0 : return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
187 : }
188 :
189 : int
190 0 : nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
191 : {
192 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
193 :
194 0 : assert(transport != NULL);
195 0 : return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
196 : }
197 :
198 : static int
199 0 : nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
200 : spdk_nvme_reg_cb cb_fn, void *cb_ctx)
201 : {
202 : struct nvme_register_completion *ctx;
203 :
204 0 : ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
205 0 : if (ctx == NULL) {
206 0 : return -ENOMEM;
207 : }
208 :
209 0 : ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
210 0 : ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
211 0 : ctx->cb_fn = cb_fn;
212 0 : ctx->cb_ctx = cb_ctx;
213 0 : ctx->value = value;
214 0 : ctx->pid = getpid();
215 :
216 0 : nvme_ctrlr_lock(ctrlr);
217 0 : STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
218 0 : nvme_ctrlr_unlock(ctrlr);
219 :
220 0 : return 0;
221 : }
222 :
223 : int
224 0 : nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
225 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
226 : {
227 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
228 : int rc;
229 :
230 0 : assert(transport != NULL);
231 0 : if (transport->ops.ctrlr_set_reg_4_async == NULL) {
232 0 : rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
233 0 : if (rc != 0) {
234 0 : return rc;
235 : }
236 :
237 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
238 : }
239 :
240 0 : return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
241 : }
242 :
243 : int
244 0 : nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
245 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
246 :
247 : {
248 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
249 : int rc;
250 :
251 0 : assert(transport != NULL);
252 0 : if (transport->ops.ctrlr_set_reg_8_async == NULL) {
253 0 : rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
254 0 : if (rc != 0) {
255 0 : return rc;
256 : }
257 :
258 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
259 : }
260 :
261 0 : return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
262 : }
263 :
264 : int
265 0 : nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
266 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
267 : {
268 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
269 0 : uint32_t value;
270 : int rc;
271 :
272 0 : assert(transport != NULL);
273 0 : if (transport->ops.ctrlr_get_reg_4_async == NULL) {
274 0 : rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
275 0 : if (rc != 0) {
276 0 : return rc;
277 : }
278 :
279 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
280 : }
281 :
282 0 : return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
283 : }
284 :
285 : int
286 0 : nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
287 : spdk_nvme_reg_cb cb_fn, void *cb_arg)
288 : {
289 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
290 0 : uint64_t value;
291 : int rc;
292 :
293 0 : assert(transport != NULL);
294 0 : if (transport->ops.ctrlr_get_reg_8_async == NULL) {
295 0 : rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
296 0 : if (rc != 0) {
297 0 : return rc;
298 : }
299 :
300 0 : return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
301 : }
302 :
303 0 : return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
304 : }
305 :
306 : uint32_t
307 0 : nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
308 : {
309 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
310 :
311 0 : assert(transport != NULL);
312 0 : return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
313 : }
314 :
315 : uint16_t
316 0 : nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
317 : {
318 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
319 :
320 0 : assert(transport != NULL);
321 0 : return transport->ops.ctrlr_get_max_sges(ctrlr);
322 : }
323 :
324 : int
325 0 : nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
326 : {
327 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
328 :
329 0 : assert(transport != NULL);
330 0 : if (transport->ops.ctrlr_reserve_cmb != NULL) {
331 0 : return transport->ops.ctrlr_reserve_cmb(ctrlr);
332 : }
333 :
334 0 : return -ENOTSUP;
335 : }
336 :
337 : void *
338 0 : nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
339 : {
340 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
341 :
342 0 : assert(transport != NULL);
343 0 : if (transport->ops.ctrlr_map_cmb != NULL) {
344 0 : return transport->ops.ctrlr_map_cmb(ctrlr, size);
345 : }
346 :
347 0 : return NULL;
348 : }
349 :
350 : int
351 0 : nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
352 : {
353 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
354 :
355 0 : assert(transport != NULL);
356 0 : if (transport->ops.ctrlr_unmap_cmb != NULL) {
357 0 : return transport->ops.ctrlr_unmap_cmb(ctrlr);
358 : }
359 :
360 0 : return 0;
361 : }
362 :
363 : int
364 0 : nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
365 : {
366 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
367 :
368 0 : assert(transport != NULL);
369 0 : if (transport->ops.ctrlr_enable_pmr != NULL) {
370 0 : return transport->ops.ctrlr_enable_pmr(ctrlr);
371 : }
372 :
373 0 : return -ENOSYS;
374 : }
375 :
376 : int
377 0 : nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
378 : {
379 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
380 :
381 0 : assert(transport != NULL);
382 0 : if (transport->ops.ctrlr_disable_pmr != NULL) {
383 0 : return transport->ops.ctrlr_disable_pmr(ctrlr);
384 : }
385 :
386 0 : return -ENOSYS;
387 : }
388 :
389 : void *
390 0 : nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
391 : {
392 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
393 :
394 0 : assert(transport != NULL);
395 0 : if (transport->ops.ctrlr_map_pmr != NULL) {
396 0 : return transport->ops.ctrlr_map_pmr(ctrlr, size);
397 : }
398 :
399 0 : return NULL;
400 : }
401 :
402 : int
403 0 : nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
404 : {
405 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
406 :
407 0 : assert(transport != NULL);
408 0 : if (transport->ops.ctrlr_unmap_pmr != NULL) {
409 0 : return transport->ops.ctrlr_unmap_pmr(ctrlr);
410 : }
411 :
412 0 : return -ENOSYS;
413 : }
414 :
415 : struct spdk_nvme_qpair *
416 0 : nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
417 : const struct spdk_nvme_io_qpair_opts *opts)
418 : {
419 : struct spdk_nvme_qpair *qpair;
420 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
421 :
422 0 : assert(transport != NULL);
423 0 : qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
424 0 : if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
425 0 : qpair->transport = transport;
426 : }
427 :
428 0 : return qpair;
429 : }
430 :
431 : void
432 0 : nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
433 : {
434 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
435 : int rc;
436 :
437 0 : assert(transport != NULL);
438 :
439 : /* Do not rely on qpair->transport. For multi-process cases, a foreign process may delete
440 : * the IO qpair, in which case the transport object would be invalid (each process has their
441 : * own unique transport objects since they contain function pointers). So we look up the
442 : * transport object in the delete_io_qpair case.
443 : */
444 0 : rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
445 0 : if (rc != 0) {
446 0 : SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n",
447 : transport->ops.name);
448 0 : assert(false);
449 : }
450 0 : }
451 :
452 : static void
453 0 : nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
454 : {
455 0 : struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
456 :
457 : /* If the qpair was unable to reconnect, restore the original failure reason */
458 0 : qpair->transport_failure_reason = qpair->last_transport_failure_reason;
459 0 : nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
460 0 : }
461 :
462 : int
463 0 : nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
464 : {
465 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
466 : int rc;
467 :
468 0 : assert(transport != NULL);
469 0 : if (!nvme_qpair_is_admin_queue(qpair) && qpair->transport == NULL) {
470 0 : qpair->transport = transport;
471 : }
472 :
473 0 : qpair->last_transport_failure_reason = qpair->transport_failure_reason;
474 0 : qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
475 :
476 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
477 0 : rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
478 0 : if (rc != 0) {
479 0 : goto err;
480 : }
481 :
482 0 : if (qpair->poll_group) {
483 0 : rc = nvme_poll_group_connect_qpair(qpair);
484 0 : if (rc) {
485 0 : goto err;
486 : }
487 : }
488 :
489 0 : if (!qpair->async) {
490 : /* Busy wait until the qpair exits the connecting state */
491 0 : while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
492 0 : if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
493 0 : rc = spdk_nvme_poll_group_process_completions(
494 0 : qpair->poll_group->group, 0,
495 : nvme_transport_connect_qpair_fail);
496 : } else {
497 0 : rc = spdk_nvme_qpair_process_completions(qpair, 0);
498 : }
499 :
500 0 : if (rc < 0) {
501 0 : goto err;
502 : }
503 : }
504 : }
505 :
506 0 : return 0;
507 0 : err:
508 0 : nvme_transport_connect_qpair_fail(qpair, NULL);
509 0 : if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
510 0 : assert(qpair->async == true);
511 : /* Let the caller to poll the qpair until it is actually disconnected. */
512 0 : return 0;
513 : }
514 :
515 0 : return rc;
516 : }
517 :
518 : void
519 0 : nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
520 : {
521 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
522 :
523 0 : if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
524 0 : nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
525 0 : return;
526 : }
527 :
528 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
529 0 : assert(transport != NULL);
530 :
531 0 : if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
532 0 : nvme_poll_group_disconnect_qpair(qpair);
533 : }
534 :
535 0 : transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
536 : }
537 :
538 : void
539 0 : nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair)
540 : {
541 0 : if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) ||
542 0 : nvme_qpair_is_admin_queue(qpair)) {
543 0 : nvme_qpair_abort_all_queued_reqs(qpair);
544 : }
545 0 : nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
546 0 : }
547 :
548 : int
549 2 : nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
550 : struct spdk_memory_domain **domains, int array_size)
551 : {
552 2 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
553 :
554 2 : assert(transport != NULL);
555 2 : if (transport->ops.ctrlr_get_memory_domains) {
556 1 : return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size);
557 : }
558 :
559 1 : return 0;
560 : }
561 :
562 : void
563 0 : nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
564 : {
565 : const struct spdk_nvme_transport *transport;
566 :
567 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
568 0 : qpair->transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
569 : } else {
570 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
571 0 : assert(transport != NULL);
572 0 : transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
573 : }
574 0 : }
575 :
576 : int
577 0 : nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
578 : {
579 : const struct spdk_nvme_transport *transport;
580 :
581 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
582 0 : return qpair->transport->ops.qpair_reset(qpair);
583 : }
584 :
585 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
586 0 : assert(transport != NULL);
587 0 : return transport->ops.qpair_reset(qpair);
588 : }
589 :
590 : int
591 0 : nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
592 : {
593 : const struct spdk_nvme_transport *transport;
594 :
595 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
596 0 : return qpair->transport->ops.qpair_submit_request(qpair, req);
597 : }
598 :
599 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
600 0 : assert(transport != NULL);
601 0 : return transport->ops.qpair_submit_request(qpair, req);
602 : }
603 :
604 : int32_t
605 0 : nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
606 : {
607 : const struct spdk_nvme_transport *transport;
608 :
609 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
610 0 : return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
611 : }
612 :
613 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
614 0 : assert(transport != NULL);
615 0 : return transport->ops.qpair_process_completions(qpair, max_completions);
616 : }
617 :
618 : int
619 0 : nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
620 : int (*iter_fn)(struct nvme_request *req, void *arg),
621 : void *arg)
622 : {
623 : const struct spdk_nvme_transport *transport;
624 :
625 0 : if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
626 0 : return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
627 : }
628 :
629 0 : transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
630 0 : assert(transport != NULL);
631 0 : return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
632 : }
633 :
634 : void
635 0 : nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
636 : {
637 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
638 :
639 0 : assert(transport != NULL);
640 0 : transport->ops.admin_qpair_abort_aers(qpair);
641 0 : }
642 :
643 : struct spdk_nvme_transport_poll_group *
644 0 : nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
645 : {
646 0 : struct spdk_nvme_transport_poll_group *group = NULL;
647 :
648 0 : group = transport->ops.poll_group_create();
649 0 : if (group) {
650 0 : group->transport = transport;
651 0 : STAILQ_INIT(&group->connected_qpairs);
652 0 : STAILQ_INIT(&group->disconnected_qpairs);
653 0 : group->num_connected_qpairs = 0;
654 : }
655 :
656 0 : return group;
657 : }
658 :
659 : struct spdk_nvme_transport_poll_group *
660 0 : nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport,
661 : struct spdk_nvme_qpair *qpair)
662 : {
663 0 : if (transport->ops.qpair_get_optimal_poll_group) {
664 0 : return transport->ops.qpair_get_optimal_poll_group(qpair);
665 : } else {
666 0 : return NULL;
667 : }
668 : }
669 :
670 : int
671 1 : nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
672 : struct spdk_nvme_qpair *qpair)
673 : {
674 : int rc;
675 :
676 1 : rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
677 1 : if (rc == 0) {
678 1 : qpair->poll_group = tgroup;
679 1 : assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
680 1 : qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
681 1 : STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
682 : }
683 :
684 1 : return rc;
685 : }
686 :
687 : int
688 3 : nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
689 : struct spdk_nvme_qpair *qpair)
690 : {
691 : int rc __attribute__((unused));
692 :
693 3 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
694 1 : return -EINVAL;
695 2 : } else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) {
696 1 : return -ENOENT;
697 : }
698 :
699 1 : rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
700 1 : assert(rc == 0);
701 :
702 1 : STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
703 :
704 1 : qpair->poll_group = NULL;
705 1 : qpair->poll_group_tailq_head = NULL;
706 :
707 1 : return 0;
708 : }
709 :
710 : int64_t
711 0 : nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
712 : uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
713 : {
714 0 : return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
715 : disconnected_qpair_cb);
716 : }
717 :
718 : int
719 0 : nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
720 : {
721 0 : return tgroup->transport->ops.poll_group_destroy(tgroup);
722 : }
723 :
724 : int
725 3 : nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
726 : {
727 : struct spdk_nvme_transport_poll_group *tgroup;
728 : int rc __attribute__((unused));
729 :
730 3 : tgroup = qpair->poll_group;
731 :
732 3 : if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
733 1 : return 0;
734 : }
735 :
736 2 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
737 1 : rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
738 1 : assert(rc == 0);
739 :
740 1 : qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
741 1 : STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
742 1 : assert(tgroup->num_connected_qpairs > 0);
743 1 : tgroup->num_connected_qpairs--;
744 1 : STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
745 :
746 1 : return 0;
747 : }
748 :
749 1 : return -EINVAL;
750 : }
751 :
752 : int
753 3 : nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
754 : {
755 : struct spdk_nvme_transport_poll_group *tgroup;
756 : int rc;
757 :
758 3 : tgroup = qpair->poll_group;
759 :
760 3 : if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
761 1 : return 0;
762 : }
763 :
764 2 : if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
765 1 : rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
766 1 : if (rc == 0) {
767 1 : qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
768 1 : STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
769 1 : STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
770 1 : tgroup->num_connected_qpairs++;
771 : }
772 :
773 1 : return rc == -EINPROGRESS ? 0 : rc;
774 : }
775 :
776 :
777 1 : return -EINVAL;
778 : }
779 :
780 : int
781 0 : nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
782 : struct spdk_nvme_transport_poll_group_stat **stats)
783 : {
784 0 : if (tgroup->transport->ops.poll_group_get_stats) {
785 0 : return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
786 : }
787 0 : return -ENOTSUP;
788 : }
789 :
790 : void
791 0 : nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
792 : struct spdk_nvme_transport_poll_group_stat *stats)
793 : {
794 0 : if (tgroup->transport->ops.poll_group_free_stats) {
795 0 : tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
796 : }
797 0 : }
798 :
799 : spdk_nvme_transport_type_t
800 0 : nvme_transport_get_trtype(const struct spdk_nvme_transport *transport)
801 : {
802 0 : return transport->ops.type;
803 : }
804 :
805 : void
806 0 : spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size)
807 : {
808 0 : if (opts == NULL) {
809 0 : SPDK_ERRLOG("opts should not be NULL.\n");
810 0 : return;
811 : }
812 :
813 0 : if (opts_size == 0) {
814 0 : SPDK_ERRLOG("opts_size should not be zero.\n");
815 0 : return;
816 : }
817 :
818 0 : opts->opts_size = opts_size;
819 :
820 : #define SET_FIELD(field) \
821 : if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
822 : opts->field = g_spdk_nvme_transport_opts.field; \
823 : } \
824 :
825 0 : SET_FIELD(rdma_srq_size);
826 0 : SET_FIELD(rdma_max_cq_size);
827 :
828 : /* Do not remove this statement, you should always update this statement when you adding a new field,
829 : * and do not forget to add the SET_FIELD statement for your added field. */
830 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 24, "Incorrect size");
831 :
832 : #undef SET_FIELD
833 : }
834 :
835 : int
836 0 : spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size)
837 : {
838 0 : if (opts == NULL) {
839 0 : SPDK_ERRLOG("opts should not be NULL.\n");
840 0 : return -EINVAL;
841 : }
842 :
843 0 : if (opts_size == 0) {
844 0 : SPDK_ERRLOG("opts_size should not be zero.\n");
845 0 : return -EINVAL;
846 : }
847 :
848 : #define SET_FIELD(field) \
849 : if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
850 : g_spdk_nvme_transport_opts.field = opts->field; \
851 : } \
852 :
853 0 : SET_FIELD(rdma_srq_size);
854 0 : SET_FIELD(rdma_max_cq_size);
855 :
856 0 : g_spdk_nvme_transport_opts.opts_size = opts->opts_size;
857 :
858 : #undef SET_FIELD
859 :
860 0 : return 0;
861 : }
862 :
863 : volatile struct spdk_nvme_registers *
864 0 : spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
865 : {
866 0 : const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
867 :
868 0 : if (transport == NULL) {
869 : /* Transport does not exist. */
870 0 : return NULL;
871 : }
872 :
873 0 : if (transport->ops.ctrlr_get_registers) {
874 0 : return transport->ops.ctrlr_get_registers(ctrlr);
875 : }
876 :
877 0 : return NULL;
878 : }
|