Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "nvmf_internal.h"
10 : #include "transport.h"
11 :
12 : #include "spdk/config.h"
13 : #include "spdk/log.h"
14 : #include "spdk/nvmf.h"
15 : #include "spdk/nvmf_transport.h"
16 : #include "spdk/queue.h"
17 : #include "spdk/util.h"
18 : #include "spdk_internal/usdt.h"
19 :
20 : #define NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS 120000
21 :
22 : struct nvmf_transport_ops_list_element {
23 : struct spdk_nvmf_transport_ops ops;
24 : TAILQ_ENTRY(nvmf_transport_ops_list_element) link;
25 : };
26 :
27 : TAILQ_HEAD(nvmf_transport_ops_list, nvmf_transport_ops_list_element)
28 : g_spdk_nvmf_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_transport_ops);
29 :
30 : static inline const struct spdk_nvmf_transport_ops *
31 22 : nvmf_get_transport_ops(const char *transport_name)
32 : {
33 : struct nvmf_transport_ops_list_element *ops;
34 40 : TAILQ_FOREACH(ops, &g_spdk_nvmf_transport_ops, link) {
35 31 : if (strcasecmp(transport_name, ops->ops.name) == 0) {
36 13 : return &ops->ops;
37 : }
38 : }
39 9 : return NULL;
40 : }
41 :
42 : void
43 7 : spdk_nvmf_transport_register(const struct spdk_nvmf_transport_ops *ops)
44 : {
45 : struct nvmf_transport_ops_list_element *new_ops;
46 :
47 7 : if (nvmf_get_transport_ops(ops->name) != NULL) {
48 0 : SPDK_ERRLOG("Double registering nvmf transport type %s.\n", ops->name);
49 0 : assert(false);
50 : return;
51 : }
52 :
53 7 : new_ops = calloc(1, sizeof(*new_ops));
54 7 : if (new_ops == NULL) {
55 0 : SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
56 0 : assert(false);
57 : return;
58 : }
59 :
60 7 : new_ops->ops = *ops;
61 :
62 7 : TAILQ_INSERT_TAIL(&g_spdk_nvmf_transport_ops, new_ops, link);
63 : }
64 :
65 : const struct spdk_nvmf_transport_opts *
66 0 : spdk_nvmf_get_transport_opts(struct spdk_nvmf_transport *transport)
67 : {
68 0 : return &transport->opts;
69 : }
70 :
71 : void
72 0 : nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
73 : bool named)
74 : {
75 0 : const struct spdk_nvmf_transport_opts *opts = spdk_nvmf_get_transport_opts(transport);
76 :
77 0 : named ? spdk_json_write_named_object_begin(w, "params") : spdk_json_write_object_begin(w);
78 :
79 0 : spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(transport));
80 0 : spdk_json_write_named_uint32(w, "max_queue_depth", opts->max_queue_depth);
81 0 : spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr - 1);
82 0 : spdk_json_write_named_uint32(w, "in_capsule_data_size", opts->in_capsule_data_size);
83 0 : spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
84 0 : spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);
85 0 : spdk_json_write_named_uint32(w, "max_aq_depth", opts->max_aq_depth);
86 0 : spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
87 0 : spdk_json_write_named_uint32(w, "buf_cache_size", opts->buf_cache_size);
88 0 : spdk_json_write_named_bool(w, "dif_insert_or_strip", opts->dif_insert_or_strip);
89 0 : spdk_json_write_named_bool(w, "zcopy", opts->zcopy);
90 :
91 0 : if (transport->ops->dump_opts) {
92 0 : transport->ops->dump_opts(transport, w);
93 : }
94 :
95 0 : spdk_json_write_named_uint32(w, "abort_timeout_sec", opts->abort_timeout_sec);
96 0 : spdk_json_write_named_uint32(w, "ack_timeout", opts->ack_timeout);
97 0 : spdk_json_write_named_uint32(w, "data_wr_pool_size", opts->data_wr_pool_size);
98 0 : spdk_json_write_object_end(w);
99 0 : }
100 :
101 : void
102 0 : nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid,
103 : struct spdk_json_write_ctx *w)
104 : {
105 0 : const char *adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
106 :
107 0 : spdk_json_write_named_string(w, "trtype", trid->trstring);
108 0 : spdk_json_write_named_string(w, "adrfam", adrfam ? adrfam : "unknown");
109 0 : spdk_json_write_named_string(w, "traddr", trid->traddr);
110 0 : spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
111 0 : }
112 :
113 : spdk_nvme_transport_type_t
114 0 : spdk_nvmf_get_transport_type(struct spdk_nvmf_transport *transport)
115 : {
116 0 : return transport->ops->type;
117 : }
118 :
119 : const char *
120 0 : spdk_nvmf_get_transport_name(struct spdk_nvmf_transport *transport)
121 : {
122 0 : return transport->ops->name;
123 : }
124 :
125 : static void
126 8 : nvmf_transport_opts_copy(struct spdk_nvmf_transport_opts *opts,
127 : struct spdk_nvmf_transport_opts *opts_src,
128 : size_t opts_size)
129 : {
130 8 : assert(opts);
131 8 : assert(opts_src);
132 :
133 8 : opts->opts_size = opts_size;
134 :
135 : #define SET_FIELD(field) \
136 : if (offsetof(struct spdk_nvmf_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
137 : opts->field = opts_src->field; \
138 : } \
139 :
140 8 : SET_FIELD(max_queue_depth);
141 8 : SET_FIELD(max_qpairs_per_ctrlr);
142 8 : SET_FIELD(in_capsule_data_size);
143 8 : SET_FIELD(max_io_size);
144 8 : SET_FIELD(io_unit_size);
145 8 : SET_FIELD(max_aq_depth);
146 8 : SET_FIELD(buf_cache_size);
147 8 : SET_FIELD(num_shared_buffers);
148 8 : SET_FIELD(dif_insert_or_strip);
149 8 : SET_FIELD(abort_timeout_sec);
150 8 : SET_FIELD(association_timeout);
151 8 : SET_FIELD(transport_specific);
152 8 : SET_FIELD(acceptor_poll_rate);
153 8 : SET_FIELD(zcopy);
154 8 : SET_FIELD(ack_timeout);
155 8 : SET_FIELD(data_wr_pool_size);
156 :
157 : /* Do not remove this statement, you should always update this statement when you adding a new field,
158 : * and do not forget to add the SET_FIELD statement for your added field. */
159 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_transport_opts) == 72, "Incorrect size");
160 :
161 : #undef SET_FIELD
162 : #undef FILED_CHECK
163 8 : }
164 :
165 : struct nvmf_transport_create_ctx {
166 : const struct spdk_nvmf_transport_ops *ops;
167 : struct spdk_nvmf_transport_opts opts;
168 : void *cb_arg;
169 : spdk_nvmf_transport_create_done_cb cb_fn;
170 : };
171 :
172 : static bool
173 34 : nvmf_transport_use_iobuf(struct spdk_nvmf_transport *transport)
174 : {
175 34 : return transport->opts.num_shared_buffers || transport->opts.buf_cache_size;
176 : }
177 :
178 : static void
179 4 : nvmf_transport_create_async_done(void *cb_arg, struct spdk_nvmf_transport *transport)
180 : {
181 4 : struct nvmf_transport_create_ctx *ctx = cb_arg;
182 : int chars_written;
183 :
184 4 : if (!transport) {
185 0 : SPDK_ERRLOG("Failed to create transport.\n");
186 0 : goto err;
187 : }
188 :
189 4 : pthread_mutex_init(&transport->mutex, NULL);
190 4 : TAILQ_INIT(&transport->listeners);
191 4 : transport->ops = ctx->ops;
192 4 : transport->opts = ctx->opts;
193 4 : chars_written = snprintf(transport->iobuf_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s", "nvmf",
194 4 : transport->ops->name);
195 4 : if (chars_written < 0) {
196 0 : SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
197 0 : goto err;
198 : }
199 :
200 4 : if (nvmf_transport_use_iobuf(transport)) {
201 3 : spdk_iobuf_register_module(transport->iobuf_name);
202 : }
203 :
204 4 : ctx->cb_fn(ctx->cb_arg, transport);
205 4 : free(ctx);
206 4 : return;
207 :
208 0 : err:
209 0 : if (transport) {
210 0 : transport->ops->destroy(transport, NULL, NULL);
211 : }
212 :
213 0 : ctx->cb_fn(ctx->cb_arg, NULL);
214 0 : free(ctx);
215 : }
216 :
217 : static void
218 1 : _nvmf_transport_create_done(void *ctx)
219 : {
220 1 : struct nvmf_transport_create_ctx *_ctx = (struct nvmf_transport_create_ctx *)ctx;
221 :
222 1 : nvmf_transport_create_async_done(_ctx, _ctx->ops->create(&_ctx->opts));
223 1 : }
224 :
225 : static int
226 8 : nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts,
227 : spdk_nvmf_transport_create_done_cb cb_fn, void *cb_arg, bool sync)
228 : {
229 : struct nvmf_transport_create_ctx *ctx;
230 8 : struct spdk_iobuf_opts opts_iobuf = {};
231 : int rc;
232 : uint64_t count;
233 :
234 8 : ctx = calloc(1, sizeof(*ctx));
235 8 : if (!ctx) {
236 0 : return -ENOMEM;
237 : }
238 :
239 8 : if (!opts) {
240 0 : SPDK_ERRLOG("opts should not be NULL\n");
241 0 : goto err;
242 : }
243 :
244 8 : if (!opts->opts_size) {
245 0 : SPDK_ERRLOG("The opts_size in opts structure should not be zero\n");
246 0 : goto err;
247 : }
248 :
249 8 : ctx->ops = nvmf_get_transport_ops(transport_name);
250 8 : if (!ctx->ops) {
251 1 : SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
252 1 : goto err;
253 : }
254 :
255 7 : nvmf_transport_opts_copy(&ctx->opts, opts, opts->opts_size);
256 7 : if (ctx->opts.max_io_size != 0 && (!spdk_u32_is_pow2(ctx->opts.max_io_size) ||
257 6 : ctx->opts.max_io_size < 8192)) {
258 1 : SPDK_ERRLOG("max_io_size %u must be a power of 2 and be greater than or equal 8KB\n",
259 : ctx->opts.max_io_size);
260 1 : goto err;
261 : }
262 :
263 6 : if (ctx->opts.max_aq_depth < SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE) {
264 1 : SPDK_ERRLOG("max_aq_depth %u is less than minimum defined by NVMf spec, use min value\n",
265 : ctx->opts.max_aq_depth);
266 1 : ctx->opts.max_aq_depth = SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE;
267 : }
268 :
269 6 : spdk_iobuf_get_opts(&opts_iobuf, sizeof(opts_iobuf));
270 6 : if (ctx->opts.io_unit_size == 0) {
271 1 : SPDK_ERRLOG("io_unit_size cannot be 0\n");
272 1 : goto err;
273 : }
274 5 : if (ctx->opts.io_unit_size > opts_iobuf.large_bufsize) {
275 1 : SPDK_ERRLOG("io_unit_size %u is larger than iobuf pool large buffer size %d\n",
276 : ctx->opts.io_unit_size, opts_iobuf.large_bufsize);
277 1 : goto err;
278 : }
279 :
280 4 : if (ctx->opts.io_unit_size <= opts_iobuf.small_bufsize) {
281 : /* We'll be using the small buffer pool only */
282 1 : count = opts_iobuf.small_pool_count;
283 : } else {
284 3 : count = spdk_min(opts_iobuf.small_pool_count, opts_iobuf.large_pool_count);
285 : }
286 :
287 4 : if (ctx->opts.num_shared_buffers > count) {
288 0 : SPDK_WARNLOG("The num_shared_buffers value (%u) is larger than the available iobuf"
289 : " pool size (%lu). Please increase the iobuf pool sizes.\n",
290 : ctx->opts.num_shared_buffers, count);
291 : }
292 :
293 4 : ctx->cb_fn = cb_fn;
294 4 : ctx->cb_arg = cb_arg;
295 :
296 : /* Prioritize sync create operation. */
297 4 : if (ctx->ops->create) {
298 1 : if (sync) {
299 1 : _nvmf_transport_create_done(ctx);
300 1 : return 0;
301 : }
302 :
303 0 : rc = spdk_thread_send_msg(spdk_get_thread(), _nvmf_transport_create_done, ctx);
304 0 : if (rc) {
305 0 : goto err;
306 : }
307 :
308 0 : return 0;
309 : }
310 :
311 3 : assert(ctx->ops->create_async);
312 3 : rc = ctx->ops->create_async(&ctx->opts, nvmf_transport_create_async_done, ctx);
313 3 : if (rc) {
314 0 : SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
315 0 : goto err;
316 : }
317 :
318 3 : return 0;
319 4 : err:
320 4 : free(ctx);
321 4 : return -1;
322 : }
323 :
324 : int
325 7 : spdk_nvmf_transport_create_async(const char *transport_name, struct spdk_nvmf_transport_opts *opts,
326 : spdk_nvmf_transport_create_done_cb cb_fn, void *cb_arg)
327 : {
328 7 : return nvmf_transport_create(transport_name, opts, cb_fn, cb_arg, false);
329 : }
330 :
331 : static void
332 1 : nvmf_transport_create_sync_done(void *cb_arg, struct spdk_nvmf_transport *transport)
333 : {
334 1 : struct spdk_nvmf_transport **_transport = cb_arg;
335 :
336 1 : *_transport = transport;
337 1 : }
338 :
339 : struct spdk_nvmf_transport *
340 1 : spdk_nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts)
341 : {
342 1 : struct spdk_nvmf_transport *transport = NULL;
343 :
344 : /* Current implementation supports synchronous version of create operation only. */
345 1 : assert(nvmf_get_transport_ops(transport_name) && nvmf_get_transport_ops(transport_name)->create);
346 :
347 1 : nvmf_transport_create(transport_name, opts, nvmf_transport_create_sync_done, &transport, true);
348 1 : return transport;
349 : }
350 :
351 : struct spdk_nvmf_transport *
352 13 : spdk_nvmf_transport_get_first(struct spdk_nvmf_tgt *tgt)
353 : {
354 13 : return TAILQ_FIRST(&tgt->transports);
355 : }
356 :
357 : struct spdk_nvmf_transport *
358 1 : spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
359 : {
360 1 : return TAILQ_NEXT(transport, link);
361 : }
362 :
363 : int
364 3 : spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport,
365 : spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
366 : {
367 : struct spdk_nvmf_listener *listener, *listener_tmp;
368 :
369 3 : TAILQ_FOREACH_SAFE(listener, &transport->listeners, link, listener_tmp) {
370 0 : TAILQ_REMOVE(&transport->listeners, listener, link);
371 0 : transport->ops->stop_listen(transport, &listener->trid);
372 0 : free(listener);
373 : }
374 :
375 3 : if (nvmf_transport_use_iobuf(transport)) {
376 3 : spdk_iobuf_unregister_module(transport->iobuf_name);
377 : }
378 :
379 3 : pthread_mutex_destroy(&transport->mutex);
380 3 : return transport->ops->destroy(transport, cb_fn, cb_arg);
381 : }
382 :
383 : struct spdk_nvmf_listener *
384 11 : nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
385 : const struct spdk_nvme_transport_id *trid)
386 : {
387 : struct spdk_nvmf_listener *listener;
388 :
389 11 : TAILQ_FOREACH(listener, &transport->listeners, link) {
390 6 : if (spdk_nvme_transport_id_compare(&listener->trid, trid) == 0) {
391 6 : return listener;
392 : }
393 : }
394 :
395 5 : return NULL;
396 : }
397 :
398 : int
399 3 : spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
400 : const struct spdk_nvme_transport_id *trid, struct spdk_nvmf_listen_opts *opts)
401 : {
402 : struct spdk_nvmf_listener *listener;
403 : int rc;
404 :
405 3 : listener = nvmf_transport_find_listener(transport, trid);
406 3 : if (!listener) {
407 2 : listener = calloc(1, sizeof(*listener));
408 2 : if (!listener) {
409 0 : return -ENOMEM;
410 : }
411 :
412 2 : listener->ref = 1;
413 2 : listener->trid = *trid;
414 2 : listener->sock_impl = opts->sock_impl;
415 2 : TAILQ_INSERT_TAIL(&transport->listeners, listener, link);
416 2 : pthread_mutex_lock(&transport->mutex);
417 2 : rc = transport->ops->listen(transport, &listener->trid, opts);
418 2 : pthread_mutex_unlock(&transport->mutex);
419 2 : if (rc != 0) {
420 1 : TAILQ_REMOVE(&transport->listeners, listener, link);
421 1 : free(listener);
422 : }
423 2 : return rc;
424 : }
425 :
426 1 : if (opts->sock_impl && strncmp(opts->sock_impl, listener->sock_impl, strlen(listener->sock_impl))) {
427 0 : SPDK_ERRLOG("opts->sock_impl: '%s' doesn't match listener->sock_impl: '%s'\n", opts->sock_impl,
428 : listener->sock_impl);
429 0 : return -EINVAL;
430 : }
431 :
432 1 : ++listener->ref;
433 :
434 1 : return 0;
435 : }
436 :
437 : int
438 3 : spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
439 : const struct spdk_nvme_transport_id *trid)
440 : {
441 : struct spdk_nvmf_listener *listener;
442 :
443 3 : listener = nvmf_transport_find_listener(transport, trid);
444 3 : if (!listener) {
445 1 : return -ENOENT;
446 : }
447 :
448 2 : if (--listener->ref == 0) {
449 1 : TAILQ_REMOVE(&transport->listeners, listener, link);
450 1 : pthread_mutex_lock(&transport->mutex);
451 1 : transport->ops->stop_listen(transport, trid);
452 1 : pthread_mutex_unlock(&transport->mutex);
453 1 : free(listener);
454 : }
455 :
456 2 : return 0;
457 : }
458 :
459 : struct nvmf_stop_listen_ctx {
460 : struct spdk_nvmf_transport *transport;
461 : struct spdk_nvme_transport_id trid;
462 : struct spdk_nvmf_subsystem *subsystem;
463 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
464 : void *cb_arg;
465 : };
466 :
467 : static void
468 0 : nvmf_stop_listen_fini(struct spdk_io_channel_iter *i, int status)
469 : {
470 : struct nvmf_stop_listen_ctx *ctx;
471 : struct spdk_nvmf_transport *transport;
472 0 : int rc = status;
473 :
474 0 : ctx = spdk_io_channel_iter_get_ctx(i);
475 0 : transport = ctx->transport;
476 0 : assert(transport != NULL);
477 :
478 0 : rc = spdk_nvmf_transport_stop_listen(transport, &ctx->trid);
479 0 : if (rc) {
480 0 : SPDK_ERRLOG("Failed to stop listening on address '%s'\n", ctx->trid.traddr);
481 : }
482 :
483 0 : if (ctx->cb_fn) {
484 0 : ctx->cb_fn(ctx->cb_arg, rc);
485 : }
486 0 : free(ctx);
487 0 : }
488 :
489 : static void nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i);
490 :
491 : static void
492 0 : nvmf_stop_listen_disconnect_qpairs_msg(void *ctx)
493 : {
494 0 : nvmf_stop_listen_disconnect_qpairs((struct spdk_io_channel_iter *)ctx);
495 0 : }
496 :
497 : static void
498 0 : nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i)
499 : {
500 : struct nvmf_stop_listen_ctx *ctx;
501 : struct spdk_nvmf_poll_group *group;
502 : struct spdk_io_channel *ch;
503 : struct spdk_nvmf_qpair *qpair, *tmp_qpair;
504 0 : struct spdk_nvme_transport_id tmp_trid;
505 0 : bool qpair_found = false;
506 :
507 0 : ctx = spdk_io_channel_iter_get_ctx(i);
508 0 : ch = spdk_io_channel_iter_get_channel(i);
509 0 : group = spdk_io_channel_get_ctx(ch);
510 :
511 0 : TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
512 0 : if (spdk_nvmf_qpair_get_listen_trid(qpair, &tmp_trid)) {
513 0 : continue;
514 : }
515 :
516 : /* Skip qpairs that don't match the listen trid and subsystem pointer. If
517 : * the ctx->subsystem is NULL, it means disconnect all qpairs that match
518 : * the listen trid. */
519 0 : if (!spdk_nvme_transport_id_compare(&ctx->trid, &tmp_trid)) {
520 0 : if (ctx->subsystem == NULL ||
521 0 : (qpair->ctrlr != NULL && ctx->subsystem == qpair->ctrlr->subsys)) {
522 0 : spdk_nvmf_qpair_disconnect(qpair);
523 0 : qpair_found = true;
524 : }
525 : }
526 : }
527 0 : if (qpair_found) {
528 0 : spdk_thread_send_msg(spdk_get_thread(), nvmf_stop_listen_disconnect_qpairs_msg, i);
529 0 : return;
530 : }
531 :
532 0 : spdk_for_each_channel_continue(i, 0);
533 : }
534 :
535 : int
536 0 : spdk_nvmf_transport_stop_listen_async(struct spdk_nvmf_transport *transport,
537 : const struct spdk_nvme_transport_id *trid,
538 : struct spdk_nvmf_subsystem *subsystem,
539 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
540 : void *cb_arg)
541 : {
542 : struct nvmf_stop_listen_ctx *ctx;
543 :
544 0 : if (trid->subnqn[0] != '\0') {
545 0 : SPDK_ERRLOG("subnqn should be empty, use subsystem pointer instead\n");
546 0 : return -EINVAL;
547 : }
548 :
549 0 : ctx = calloc(1, sizeof(struct nvmf_stop_listen_ctx));
550 0 : if (ctx == NULL) {
551 0 : return -ENOMEM;
552 : }
553 :
554 0 : ctx->trid = *trid;
555 0 : ctx->subsystem = subsystem;
556 0 : ctx->transport = transport;
557 0 : ctx->cb_fn = cb_fn;
558 0 : ctx->cb_arg = cb_arg;
559 :
560 0 : spdk_for_each_channel(transport->tgt, nvmf_stop_listen_disconnect_qpairs, ctx,
561 : nvmf_stop_listen_fini);
562 :
563 0 : return 0;
564 : }
565 :
566 : void
567 0 : nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
568 : struct spdk_nvme_transport_id *trid,
569 : struct spdk_nvmf_discovery_log_page_entry *entry)
570 : {
571 0 : transport->ops->listener_discover(transport, trid, entry);
572 0 : }
573 :
574 : static int
575 0 : nvmf_tgroup_poll(void *arg)
576 : {
577 0 : struct spdk_nvmf_transport_poll_group *tgroup = arg;
578 : int rc;
579 :
580 0 : rc = nvmf_transport_poll_group_poll(tgroup);
581 0 : return rc == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY;
582 : }
583 :
584 : static void
585 2 : nvmf_transport_poll_group_create_poller(struct spdk_nvmf_transport_poll_group *tgroup)
586 : {
587 2 : char poller_name[SPDK_NVMF_TRSTRING_MAX_LEN + 32];
588 :
589 2 : snprintf(poller_name, sizeof(poller_name), "nvmf_%s", tgroup->transport->ops->name);
590 2 : tgroup->poller = spdk_poller_register_named(nvmf_tgroup_poll, tgroup, 0, poller_name);
591 2 : spdk_poller_register_interrupt(tgroup->poller, NULL, NULL);
592 2 : }
593 :
594 : struct spdk_nvmf_transport_poll_group *
595 2 : nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,
596 : struct spdk_nvmf_poll_group *group)
597 : {
598 : struct spdk_nvmf_transport_poll_group *tgroup;
599 2 : struct spdk_iobuf_opts opts_iobuf = {};
600 : uint32_t buf_cache_size, small_cache_size, large_cache_size;
601 : int rc;
602 :
603 2 : pthread_mutex_lock(&transport->mutex);
604 2 : tgroup = transport->ops->poll_group_create(transport, group);
605 2 : pthread_mutex_unlock(&transport->mutex);
606 2 : if (!tgroup) {
607 0 : return NULL;
608 : }
609 2 : tgroup->transport = transport;
610 2 : nvmf_transport_poll_group_create_poller(tgroup);
611 :
612 2 : STAILQ_INIT(&tgroup->pending_buf_queue);
613 :
614 2 : if (!nvmf_transport_use_iobuf(transport)) {
615 : /* We aren't going to allocate any shared buffers or cache, so just return now. */
616 0 : return tgroup;
617 : }
618 :
619 2 : buf_cache_size = transport->opts.buf_cache_size;
620 :
621 : /* buf_cache_size of UINT32_MAX means the value should be calculated dynamically
622 : * based on the number of buffers in the shared pool and the number of poll groups
623 : * that are sharing them. We allocate 75% of the pool for the cache, and then
624 : * divide that by number of poll groups to determine the buf_cache_size for this
625 : * poll group.
626 : */
627 2 : if (buf_cache_size == UINT32_MAX) {
628 0 : uint32_t num_shared_buffers = transport->opts.num_shared_buffers;
629 :
630 : /* Theoretically the nvmf library can dynamically add poll groups to
631 : * the target, after transports have already been created. We aren't
632 : * going to try to really handle this case efficiently, just do enough
633 : * here to ensure we don't divide-by-zero.
634 : */
635 0 : uint16_t num_poll_groups = group->tgt->num_poll_groups ? : spdk_env_get_core_count();
636 :
637 0 : buf_cache_size = (num_shared_buffers * 3 / 4) / num_poll_groups;
638 : }
639 :
640 2 : spdk_iobuf_get_opts(&opts_iobuf, sizeof(opts_iobuf));
641 2 : small_cache_size = buf_cache_size;
642 2 : if (transport->opts.io_unit_size <= opts_iobuf.small_bufsize) {
643 2 : large_cache_size = 0;
644 : } else {
645 0 : large_cache_size = buf_cache_size;
646 : }
647 :
648 2 : tgroup->buf_cache = calloc(1, sizeof(*tgroup->buf_cache));
649 2 : if (!tgroup->buf_cache) {
650 0 : SPDK_ERRLOG("Unable to allocate an iobuf channel in the poll group.\n");
651 0 : goto err;
652 : }
653 :
654 2 : rc = spdk_iobuf_channel_init(tgroup->buf_cache, transport->iobuf_name, small_cache_size,
655 : large_cache_size);
656 2 : if (rc != 0) {
657 0 : SPDK_ERRLOG("Unable to reserve the full number of buffers for the pg buffer cache.\n");
658 0 : rc = spdk_iobuf_channel_init(tgroup->buf_cache, transport->iobuf_name, 0, 0);
659 0 : if (rc != 0) {
660 0 : SPDK_ERRLOG("Unable to create an iobuf channel in the poll group.\n");
661 0 : goto err;
662 : }
663 : }
664 :
665 2 : return tgroup;
666 0 : err:
667 0 : transport->ops->poll_group_destroy(tgroup);
668 0 : return NULL;
669 : }
670 :
671 : struct spdk_nvmf_transport_poll_group *
672 0 : nvmf_transport_get_optimal_poll_group(struct spdk_nvmf_transport *transport,
673 : struct spdk_nvmf_qpair *qpair)
674 : {
675 : struct spdk_nvmf_transport_poll_group *tgroup;
676 :
677 0 : if (transport->ops->get_optimal_poll_group) {
678 0 : pthread_mutex_lock(&transport->mutex);
679 0 : tgroup = transport->ops->get_optimal_poll_group(qpair);
680 0 : pthread_mutex_unlock(&transport->mutex);
681 :
682 0 : return tgroup;
683 : } else {
684 0 : return NULL;
685 : }
686 : }
687 :
688 : void
689 2 : nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
690 : {
691 : struct spdk_nvmf_transport *transport;
692 2 : struct spdk_iobuf_channel *ch = NULL;
693 :
694 2 : transport = group->transport;
695 :
696 2 : spdk_poller_unregister(&group->poller);
697 :
698 2 : if (!STAILQ_EMPTY(&group->pending_buf_queue)) {
699 0 : SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
700 : }
701 :
702 2 : if (nvmf_transport_use_iobuf(transport)) {
703 : /* The call to poll_group_destroy both frees the group memory, but also
704 : * releases any remaining buffers. Cache channel pointer so we can still
705 : * release the resources after the group has been freed. */
706 2 : ch = group->buf_cache;
707 : }
708 :
709 2 : pthread_mutex_lock(&transport->mutex);
710 2 : transport->ops->poll_group_destroy(group);
711 2 : pthread_mutex_unlock(&transport->mutex);
712 :
713 2 : if (nvmf_transport_use_iobuf(transport)) {
714 2 : spdk_iobuf_channel_fini(ch);
715 2 : free(ch);
716 : }
717 2 : }
718 :
719 : void
720 0 : nvmf_transport_poll_group_pause(struct spdk_nvmf_transport_poll_group *tgroup)
721 : {
722 0 : spdk_poller_unregister(&tgroup->poller);
723 0 : }
724 :
725 : void
726 0 : nvmf_transport_poll_group_resume(struct spdk_nvmf_transport_poll_group *tgroup)
727 : {
728 0 : nvmf_transport_poll_group_create_poller(tgroup);
729 0 : }
730 :
731 : int
732 0 : nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
733 : struct spdk_nvmf_qpair *qpair)
734 : {
735 0 : if (qpair->transport) {
736 0 : assert(qpair->transport == group->transport);
737 0 : if (qpair->transport != group->transport) {
738 0 : return -1;
739 : }
740 : } else {
741 0 : qpair->transport = group->transport;
742 : }
743 :
744 : SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_add, qpair, qpair->qid,
745 : spdk_thread_get_id(group->group->thread));
746 :
747 0 : return group->transport->ops->poll_group_add(group, qpair);
748 : }
749 :
750 : int
751 0 : nvmf_transport_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
752 : struct spdk_nvmf_qpair *qpair)
753 : {
754 0 : int rc = ENOTSUP;
755 :
756 : SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_remove, qpair, qpair->qid,
757 : spdk_thread_get_id(group->group->thread));
758 :
759 0 : assert(qpair->transport == group->transport);
760 0 : if (group->transport->ops->poll_group_remove) {
761 0 : rc = group->transport->ops->poll_group_remove(group, qpair);
762 : }
763 :
764 0 : return rc;
765 : }
766 :
767 : int
768 0 : nvmf_transport_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
769 : {
770 0 : return group->transport->ops->poll_group_poll(group);
771 : }
772 :
773 : int
774 0 : nvmf_transport_req_free(struct spdk_nvmf_request *req)
775 : {
776 0 : return req->qpair->transport->ops->req_free(req);
777 : }
778 :
779 : int
780 0 : nvmf_transport_req_complete(struct spdk_nvmf_request *req)
781 : {
782 0 : return req->qpair->transport->ops->req_complete(req);
783 : }
784 :
785 : void
786 0 : nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair,
787 : spdk_nvmf_transport_qpair_fini_cb cb_fn,
788 : void *cb_arg)
789 : {
790 : SPDK_DTRACE_PROBE1(nvmf_transport_qpair_fini, qpair);
791 :
792 0 : qpair->transport->ops->qpair_fini(qpair, cb_fn, cb_arg);
793 0 : }
794 :
795 : int
796 0 : nvmf_transport_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
797 : struct spdk_nvme_transport_id *trid)
798 : {
799 0 : return qpair->transport->ops->qpair_get_peer_trid(qpair, trid);
800 : }
801 :
802 : int
803 0 : nvmf_transport_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
804 : struct spdk_nvme_transport_id *trid)
805 : {
806 0 : return qpair->transport->ops->qpair_get_local_trid(qpair, trid);
807 : }
808 :
809 : int
810 0 : nvmf_transport_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
811 : struct spdk_nvme_transport_id *trid)
812 : {
813 0 : return qpair->transport->ops->qpair_get_listen_trid(qpair, trid);
814 : }
815 :
816 : void
817 0 : nvmf_transport_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
818 : struct spdk_nvmf_request *req)
819 : {
820 0 : if (qpair->transport->ops->qpair_abort_request) {
821 0 : qpair->transport->ops->qpair_abort_request(qpair, req);
822 : }
823 0 : }
824 :
825 : bool
826 4 : spdk_nvmf_transport_opts_init(const char *transport_name,
827 : struct spdk_nvmf_transport_opts *opts, size_t opts_size)
828 : {
829 : const struct spdk_nvmf_transport_ops *ops;
830 4 : struct spdk_nvmf_transport_opts opts_local = {};
831 :
832 4 : ops = nvmf_get_transport_ops(transport_name);
833 4 : if (!ops) {
834 1 : SPDK_ERRLOG("Transport type %s unavailable.\n", transport_name);
835 1 : return false;
836 : }
837 :
838 3 : if (!opts) {
839 1 : SPDK_ERRLOG("opts should not be NULL\n");
840 1 : return false;
841 : }
842 :
843 2 : if (!opts_size) {
844 1 : SPDK_ERRLOG("opts_size inside opts should not be zero value\n");
845 1 : return false;
846 : }
847 :
848 1 : opts_local.association_timeout = NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS;
849 1 : opts_local.acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US;
850 1 : opts_local.disable_command_passthru = false;
851 1 : ops->opts_init(&opts_local);
852 :
853 1 : nvmf_transport_opts_copy(opts, &opts_local, opts_size);
854 :
855 1 : return true;
856 : }
857 :
858 : void
859 6 : spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
860 : struct spdk_nvmf_transport_poll_group *group,
861 : struct spdk_nvmf_transport *transport)
862 : {
863 : uint32_t i;
864 :
865 11 : for (i = 0; i < req->iovcnt; i++) {
866 5 : spdk_iobuf_put(group->buf_cache, req->iov[i].iov_base, req->iov[i].iov_len);
867 5 : req->iov[i].iov_base = NULL;
868 5 : req->iov[i].iov_len = 0;
869 : }
870 6 : req->iovcnt = 0;
871 6 : req->data_from_pool = false;
872 6 : }
873 :
874 : static int
875 74 : nvmf_request_set_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
876 : uint32_t io_unit_size)
877 : {
878 74 : req->iov[req->iovcnt].iov_base = buf;
879 74 : req->iov[req->iovcnt].iov_len = spdk_min(length, io_unit_size);
880 74 : length -= req->iov[req->iovcnt].iov_len;
881 74 : req->iovcnt++;
882 :
883 74 : return length;
884 : }
885 :
886 : static int
887 6 : nvmf_request_set_stripped_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
888 : uint32_t io_unit_size)
889 : {
890 6 : struct spdk_nvmf_stripped_data *data = req->stripped_data;
891 :
892 6 : data->iov[data->iovcnt].iov_base = buf;
893 6 : data->iov[data->iovcnt].iov_len = spdk_min(length, io_unit_size);
894 6 : length -= data->iov[data->iovcnt].iov_len;
895 6 : data->iovcnt++;
896 :
897 6 : return length;
898 : }
899 :
900 : static void nvmf_request_iobuf_get_cb(struct spdk_iobuf_entry *entry, void *buf);
901 :
902 : static int
903 25 : nvmf_request_get_buffers(struct spdk_nvmf_request *req,
904 : struct spdk_nvmf_transport_poll_group *group,
905 : struct spdk_nvmf_transport *transport,
906 : uint32_t length, uint32_t io_unit_size,
907 : bool stripped_buffers)
908 : {
909 25 : struct spdk_iobuf_entry *entry = NULL;
910 : uint32_t num_buffers;
911 25 : uint32_t i = 0;
912 : void *buffer;
913 :
914 : /* If the number of buffers is too large, then we know the I/O is larger than allowed.
915 : * Fail it.
916 : */
917 25 : num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
918 25 : if (spdk_unlikely(num_buffers > NVMF_REQ_MAX_BUFFERS)) {
919 0 : return -EINVAL;
920 : }
921 :
922 : /* Use iobuf queuing only if transport supports it */
923 25 : if (transport->ops->req_get_buffers_done != NULL) {
924 0 : entry = &req->iobuf.entry;
925 : }
926 :
927 105 : while (i < num_buffers) {
928 81 : buffer = spdk_iobuf_get(group->buf_cache, spdk_min(io_unit_size, length), entry,
929 : nvmf_request_iobuf_get_cb);
930 81 : if (spdk_unlikely(buffer == NULL)) {
931 1 : req->iobuf.remaining_length = length;
932 1 : return -ENOMEM;
933 : }
934 80 : if (stripped_buffers) {
935 6 : length = nvmf_request_set_stripped_buffer(req, buffer, length, io_unit_size);
936 : } else {
937 74 : length = nvmf_request_set_buffer(req, buffer, length, io_unit_size);
938 : }
939 80 : i++;
940 : }
941 :
942 24 : assert(length == 0);
943 24 : req->data_from_pool = true;
944 :
945 24 : return 0;
946 : }
947 :
948 : static void
949 0 : nvmf_request_iobuf_get_cb(struct spdk_iobuf_entry *entry, void *buf)
950 : {
951 0 : struct spdk_nvmf_request *req = SPDK_CONTAINEROF(entry, struct spdk_nvmf_request, iobuf.entry);
952 0 : struct spdk_nvmf_transport *transport = req->qpair->transport;
953 0 : struct spdk_nvmf_poll_group *group = req->qpair->group;
954 0 : struct spdk_nvmf_transport_poll_group *tgroup = nvmf_get_transport_poll_group(group, transport);
955 0 : uint32_t length = req->iobuf.remaining_length;
956 0 : uint32_t io_unit_size = transport->opts.io_unit_size;
957 : int rc;
958 :
959 0 : assert(tgroup != NULL);
960 :
961 0 : length = nvmf_request_set_buffer(req, buf, length, io_unit_size);
962 0 : rc = nvmf_request_get_buffers(req, tgroup, transport, length, io_unit_size, false);
963 0 : if (rc == 0) {
964 0 : transport->ops->req_get_buffers_done(req);
965 : }
966 0 : }
967 :
968 : int
969 21 : spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
970 : struct spdk_nvmf_transport_poll_group *group,
971 : struct spdk_nvmf_transport *transport,
972 : uint32_t length)
973 : {
974 : int rc;
975 :
976 21 : assert(nvmf_transport_use_iobuf(transport));
977 :
978 21 : req->iovcnt = 0;
979 21 : rc = nvmf_request_get_buffers(req, group, transport, length, transport->opts.io_unit_size, false);
980 21 : if (spdk_unlikely(rc == -ENOMEM && transport->ops->req_get_buffers_done == NULL)) {
981 1 : spdk_nvmf_request_free_buffers(req, group, transport);
982 : }
983 :
984 21 : return rc;
985 : }
986 :
987 : static int
988 0 : nvmf_request_get_buffers_abort_cb(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry,
989 : void *cb_ctx)
990 : {
991 0 : struct spdk_nvmf_request *req, *req_to_abort = cb_ctx;
992 :
993 0 : req = SPDK_CONTAINEROF(entry, struct spdk_nvmf_request, iobuf.entry);
994 0 : if (req != req_to_abort) {
995 0 : return 0;
996 : }
997 :
998 0 : spdk_iobuf_entry_abort(ch, entry, spdk_min(req->iobuf.remaining_length,
999 : req->qpair->transport->opts.io_unit_size));
1000 0 : return 1;
1001 : }
1002 :
1003 : bool
1004 0 : nvmf_request_get_buffers_abort(struct spdk_nvmf_request *req)
1005 : {
1006 0 : struct spdk_nvmf_transport_poll_group *tgroup = nvmf_get_transport_poll_group(req->qpair->group,
1007 0 : req->qpair->transport);
1008 : int rc;
1009 :
1010 0 : assert(tgroup != NULL);
1011 :
1012 0 : rc = spdk_iobuf_for_each_entry(tgroup->buf_cache, nvmf_request_get_buffers_abort_cb, req);
1013 0 : return rc == 1;
1014 : }
1015 :
1016 : void
1017 0 : nvmf_request_free_stripped_buffers(struct spdk_nvmf_request *req,
1018 : struct spdk_nvmf_transport_poll_group *group,
1019 : struct spdk_nvmf_transport *transport)
1020 : {
1021 0 : struct spdk_nvmf_stripped_data *data = req->stripped_data;
1022 : uint32_t i;
1023 :
1024 0 : for (i = 0; i < data->iovcnt; i++) {
1025 0 : spdk_iobuf_put(group->buf_cache, data->iov[i].iov_base, data->iov[i].iov_len);
1026 : }
1027 0 : free(data);
1028 0 : req->stripped_data = NULL;
1029 0 : }
1030 :
1031 : int
1032 8 : nvmf_request_get_stripped_buffers(struct spdk_nvmf_request *req,
1033 : struct spdk_nvmf_transport_poll_group *group,
1034 : struct spdk_nvmf_transport *transport,
1035 : uint32_t length)
1036 : {
1037 8 : uint32_t block_size = req->dif.dif_ctx.block_size;
1038 8 : uint32_t data_block_size = block_size - req->dif.dif_ctx.md_size;
1039 8 : uint32_t io_unit_size = transport->opts.io_unit_size / block_size * data_block_size;
1040 : struct spdk_nvmf_stripped_data *data;
1041 : uint32_t i;
1042 : int rc;
1043 :
1044 : /* We don't support iobuf queueing with stripped buffers yet */
1045 8 : assert(transport->ops->req_get_buffers_done == NULL);
1046 :
1047 : /* Data blocks must be block aligned */
1048 14 : for (i = 0; i < req->iovcnt; i++) {
1049 10 : if (req->iov[i].iov_len % block_size) {
1050 4 : return -EINVAL;
1051 : }
1052 : }
1053 :
1054 4 : data = calloc(1, sizeof(*data));
1055 4 : if (data == NULL) {
1056 0 : SPDK_ERRLOG("Unable to allocate memory for stripped_data.\n");
1057 0 : return -ENOMEM;
1058 : }
1059 4 : req->stripped_data = data;
1060 4 : req->stripped_data->iovcnt = 0;
1061 :
1062 4 : rc = nvmf_request_get_buffers(req, group, transport, length, io_unit_size, true);
1063 4 : if (rc == -ENOMEM) {
1064 0 : nvmf_request_free_stripped_buffers(req, group, transport);
1065 0 : return rc;
1066 : }
1067 4 : return rc;
1068 : }
|