Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2020 Intel Corporation.
3 : * Copyright (c) 2021 Mellanox Technologies LTD.
4 : * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
5 : * All rights reserved.
6 : */
7 :
8 : #include "nvme_internal.h"
9 :
10 : struct spdk_nvme_poll_group *
11 9 : spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
12 : {
13 : struct spdk_nvme_poll_group *group;
14 : int rc;
15 :
16 9 : group = calloc(1, sizeof(*group));
17 9 : if (group == NULL) {
18 1 : return NULL;
19 : }
20 :
21 8 : group->accel_fn_table.table_size = sizeof(struct spdk_nvme_accel_fn_table);
22 8 : if (table && table->table_size != 0) {
23 0 : group->accel_fn_table.table_size = table->table_size;
24 : #define SET_FIELD(field) \
25 : if (offsetof(struct spdk_nvme_accel_fn_table, field) + sizeof(table->field) <= table->table_size) { \
26 : group->accel_fn_table.field = table->field; \
27 : } \
28 :
29 0 : SET_FIELD(append_crc32c);
30 0 : SET_FIELD(append_copy);
31 0 : SET_FIELD(finish_sequence);
32 0 : SET_FIELD(reverse_sequence);
33 0 : SET_FIELD(abort_sequence);
34 : /* Do not remove this statement, you should always update this statement when you adding a new field,
35 : * and do not forget to add the SET_FIELD statement for your added field. */
36 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_accel_fn_table) == 56, "Incorrect size");
37 :
38 : #undef SET_FIELD
39 : }
40 :
41 : /* Make sure either all or none of the sequence manipulation callbacks are implemented */
42 8 : if ((group->accel_fn_table.finish_sequence && group->accel_fn_table.reverse_sequence &&
43 8 : group->accel_fn_table.abort_sequence) !=
44 16 : (group->accel_fn_table.finish_sequence || group->accel_fn_table.reverse_sequence ||
45 8 : group->accel_fn_table.abort_sequence)) {
46 0 : SPDK_ERRLOG("Invalid accel_fn_table configuration: either all or none of the "
47 : "sequence callbacks must be provided\n");
48 0 : free(group);
49 0 : return NULL;
50 : }
51 :
52 : /* Make sure that sequence callbacks are implemented if append* callbacks are provided */
53 8 : if ((group->accel_fn_table.append_crc32c || group->accel_fn_table.append_copy) &&
54 0 : !group->accel_fn_table.finish_sequence) {
55 0 : SPDK_ERRLOG("Invalid accel_fn_table configuration: append_crc32c and/or append_copy require sequence "
56 : "callbacks to be provided\n");
57 0 : free(group);
58 0 : return NULL;
59 : }
60 :
61 : /* If interrupt is enabled, this fd_group will be used to manage events triggerd on file
62 : * descriptors of all the qpairs in this poll group */
63 8 : rc = spdk_fd_group_create(&group->fgrp);
64 8 : if (rc) {
65 : /* Ignore this for non-Linux platforms, as fd_groups aren't supported there. */
66 : #if defined(__linux__)
67 0 : SPDK_ERRLOG("Cannot create fd group for the nvme poll group\n");
68 0 : free(group);
69 0 : return NULL;
70 : #endif
71 : }
72 :
73 8 : group->disconnect_qpair_fd = -1;
74 8 : group->ctx = ctx;
75 8 : STAILQ_INIT(&group->tgroups);
76 :
77 8 : return group;
78 : }
79 :
80 : int
81 0 : spdk_nvme_poll_group_get_fd(struct spdk_nvme_poll_group *group)
82 : {
83 0 : if (!group->fgrp) {
84 0 : SPDK_ERRLOG("No fd group present for the nvme poll group.\n");
85 0 : assert(false);
86 : return -EINVAL;
87 : }
88 :
89 0 : return spdk_fd_group_get_fd(group->fgrp);
90 : }
91 :
92 : struct spdk_fd_group *
93 0 : spdk_nvme_poll_group_get_fd_group(struct spdk_nvme_poll_group *group)
94 : {
95 0 : return group->fgrp;
96 : }
97 :
98 : int
99 0 : spdk_nvme_poll_group_set_interrupt_callback(struct spdk_nvme_poll_group *group,
100 : spdk_nvme_poll_group_interrupt_cb cb_fn, void *cb_ctx)
101 : {
102 0 : if (group->interrupt.cb_fn != NULL && cb_fn != NULL) {
103 0 : return -EEXIST;
104 : }
105 :
106 0 : group->interrupt.cb_fn = cb_fn;
107 0 : group->interrupt.cb_ctx = cb_ctx;
108 :
109 0 : return 0;
110 : }
111 :
112 : struct spdk_nvme_poll_group *
113 0 : spdk_nvme_qpair_get_optimal_poll_group(struct spdk_nvme_qpair *qpair)
114 : {
115 : struct spdk_nvme_transport_poll_group *tgroup;
116 :
117 0 : tgroup = nvme_transport_qpair_get_optimal_poll_group(qpair->transport, qpair);
118 :
119 0 : if (tgroup == NULL) {
120 0 : return NULL;
121 : }
122 :
123 0 : return tgroup->group;
124 : }
125 :
126 : #ifdef __linux__
127 : static int
128 0 : nvme_poll_group_read_disconnect_qpair_fd(void *arg)
129 : {
130 0 : struct spdk_nvme_poll_group *group = arg;
131 :
132 0 : if (group->interrupt.cb_fn != NULL) {
133 0 : group->interrupt.cb_fn(group, group->interrupt.cb_ctx);
134 : }
135 :
136 0 : return 0;
137 : }
138 :
139 : void
140 0 : nvme_poll_group_write_disconnect_qpair_fd(struct spdk_nvme_poll_group *group)
141 : {
142 0 : uint64_t notify = 1;
143 : int rc;
144 :
145 0 : if (!group->enable_interrupts) {
146 0 : return;
147 : }
148 :
149 : /* Write to the disconnect qpair fd. This will generate event on the epoll fd of poll
150 : * group. We then check for disconnected qpairs either in spdk_nvme_poll_group_wait() or
151 : * in transport's poll_group_process_completions() callback.
152 : */
153 0 : rc = write(group->disconnect_qpair_fd, ¬ify, sizeof(notify));
154 0 : if (rc < 0) {
155 0 : SPDK_ERRLOG("failed to write the disconnect qpair fd: %s.\n", strerror(errno));
156 : }
157 : }
158 :
159 : static int
160 0 : nvme_poll_group_add_disconnect_qpair_fd(struct spdk_nvme_poll_group *group)
161 : {
162 0 : struct spdk_event_handler_opts opts = {};
163 : int fd;
164 :
165 0 : fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
166 0 : if (fd < 0) {
167 0 : return fd;
168 : }
169 :
170 0 : assert(group->disconnect_qpair_fd == -1);
171 0 : group->disconnect_qpair_fd = fd;
172 :
173 0 : spdk_fd_group_get_default_event_handler_opts(&opts, sizeof(opts));
174 0 : opts.fd_type = SPDK_FD_TYPE_EVENTFD;
175 :
176 0 : return SPDK_FD_GROUP_ADD_EXT(group->fgrp, fd, nvme_poll_group_read_disconnect_qpair_fd,
177 : group, &opts);
178 : }
179 :
180 : #else
181 :
182 : void
183 : nvme_poll_group_write_disconnect_qpair_fd(struct spdk_nvme_poll_group *group)
184 : {
185 : }
186 :
187 : static int
188 : nvme_poll_group_add_disconnect_qpair_fd(struct spdk_nvme_poll_group *group)
189 : {
190 : return -ENOTSUP;
191 : }
192 :
193 : #endif
194 :
195 : int
196 13 : spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
197 : {
198 : struct spdk_nvme_transport_poll_group *tgroup;
199 : const struct spdk_nvme_transport *transport;
200 : int rc;
201 :
202 13 : if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
203 1 : return -EINVAL;
204 : }
205 :
206 12 : if (!group->enable_interrupts_is_valid) {
207 4 : group->enable_interrupts_is_valid = true;
208 4 : group->enable_interrupts = qpair->ctrlr->opts.enable_interrupts;
209 4 : if (group->enable_interrupts) {
210 0 : rc = nvme_poll_group_add_disconnect_qpair_fd(group);
211 0 : if (rc != 0) {
212 0 : return rc;
213 : }
214 : }
215 8 : } else if (qpair->ctrlr->opts.enable_interrupts != group->enable_interrupts) {
216 1 : SPDK_ERRLOG("Queue pair %s interrupts cannot be added to poll group\n",
217 : qpair->ctrlr->opts.enable_interrupts ? "without" : "with");
218 1 : return -EINVAL;
219 : }
220 :
221 21 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
222 13 : if (tgroup->transport == qpair->transport) {
223 3 : break;
224 : }
225 : }
226 :
227 : /* See if a new transport has been added (dlopen style) and we need to update the poll group */
228 11 : if (!tgroup) {
229 8 : transport = nvme_get_first_transport();
230 18 : while (transport != NULL) {
231 16 : if (transport == qpair->transport) {
232 6 : tgroup = nvme_transport_poll_group_create(transport);
233 6 : if (tgroup == NULL) {
234 0 : return -ENOMEM;
235 : }
236 6 : tgroup->group = group;
237 6 : STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
238 6 : break;
239 : }
240 10 : transport = nvme_get_next_transport(transport);
241 : }
242 : }
243 :
244 11 : return tgroup ? nvme_transport_poll_group_add(tgroup, qpair) : -ENODEV;
245 : }
246 :
247 : int
248 9 : spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
249 : {
250 : struct spdk_nvme_transport_poll_group *tgroup;
251 :
252 17 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
253 16 : if (tgroup->transport == qpair->transport) {
254 8 : return nvme_transport_poll_group_remove(tgroup, qpair);
255 : }
256 : }
257 :
258 1 : return -ENODEV;
259 : }
260 :
261 : static int
262 0 : nvme_qpair_process_completion_wrapper(void *arg)
263 : {
264 0 : struct spdk_nvme_qpair *qpair = arg;
265 :
266 0 : return spdk_nvme_qpair_process_completions(qpair, 0);
267 : }
268 :
269 : static int
270 1 : nvme_poll_group_add_qpair_fd(struct spdk_nvme_qpair *qpair)
271 : {
272 : struct spdk_nvme_poll_group *group;
273 1 : struct spdk_event_handler_opts opts = {
274 : .opts_size = SPDK_SIZEOF(&opts, fd_type),
275 : };
276 : int fd;
277 :
278 1 : group = qpair->poll_group->group;
279 1 : if (group->enable_interrupts == false) {
280 1 : return 0;
281 : }
282 :
283 0 : fd = spdk_nvme_qpair_get_fd(qpair, &opts);
284 0 : if (fd < 0) {
285 0 : SPDK_ERRLOG("Cannot get fd for the qpair: %d\n", fd);
286 0 : return -EINVAL;
287 : }
288 :
289 0 : return SPDK_FD_GROUP_ADD_EXT(group->fgrp, fd, nvme_qpair_process_completion_wrapper,
290 : qpair, &opts);
291 : }
292 :
293 : static void
294 0 : nvme_poll_group_remove_qpair_fd(struct spdk_nvme_qpair *qpair)
295 : {
296 : struct spdk_nvme_poll_group *group;
297 : int fd;
298 :
299 0 : group = qpair->poll_group->group;
300 0 : if (group->enable_interrupts == false) {
301 0 : return;
302 : }
303 :
304 0 : fd = spdk_nvme_qpair_get_fd(qpair, NULL);
305 0 : if (fd < 0) {
306 0 : SPDK_ERRLOG("Cannot get fd for the qpair: %d\n", fd);
307 0 : assert(false);
308 : return;
309 : }
310 :
311 0 : spdk_fd_group_remove(group->fgrp, fd);
312 : }
313 :
314 : int
315 1 : nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
316 : {
317 : int rc;
318 :
319 1 : rc = nvme_transport_poll_group_connect_qpair(qpair);
320 1 : if (rc != 0) {
321 0 : return rc;
322 : }
323 :
324 1 : rc = nvme_poll_group_add_qpair_fd(qpair);
325 1 : if (rc != 0) {
326 0 : nvme_transport_poll_group_disconnect_qpair(qpair);
327 0 : return rc;
328 : }
329 :
330 1 : return 0;
331 : }
332 :
333 : int
334 0 : nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
335 : {
336 0 : nvme_poll_group_remove_qpair_fd(qpair);
337 :
338 0 : return nvme_transport_poll_group_disconnect_qpair(qpair);
339 : }
340 :
341 : int
342 0 : spdk_nvme_poll_group_wait(struct spdk_nvme_poll_group *group,
343 : spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
344 : {
345 : struct spdk_nvme_transport_poll_group *tgroup;
346 0 : int num_events, timeout = -1;
347 :
348 0 : if (disconnected_qpair_cb == NULL) {
349 0 : return -EINVAL;
350 : }
351 :
352 0 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
353 0 : nvme_transport_poll_group_check_disconnected_qpairs(tgroup, disconnected_qpair_cb);
354 : }
355 :
356 0 : num_events = spdk_fd_group_wait(group->fgrp, timeout);
357 :
358 0 : return num_events;
359 : }
360 :
361 : int64_t
362 2 : spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
363 : uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
364 : {
365 : struct spdk_nvme_transport_poll_group *tgroup;
366 2 : int64_t local_completions = 0, error_reason = 0, num_completions = 0;
367 :
368 2 : if (disconnected_qpair_cb == NULL) {
369 0 : return -EINVAL;
370 : }
371 :
372 2 : if (spdk_unlikely(group->in_process_completions)) {
373 0 : return 0;
374 : }
375 2 : group->in_process_completions = true;
376 :
377 3 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
378 1 : local_completions = nvme_transport_poll_group_process_completions(tgroup, completions_per_qpair,
379 : disconnected_qpair_cb);
380 1 : if (local_completions < 0 && error_reason == 0) {
381 0 : error_reason = local_completions;
382 : } else {
383 1 : num_completions += local_completions;
384 : /* Just to be safe */
385 1 : assert(num_completions >= 0);
386 : }
387 : }
388 2 : group->in_process_completions = false;
389 :
390 2 : return error_reason ? error_reason : num_completions;
391 : }
392 :
393 : int
394 0 : spdk_nvme_poll_group_all_connected(struct spdk_nvme_poll_group *group)
395 : {
396 : struct spdk_nvme_transport_poll_group *tgroup;
397 : struct spdk_nvme_qpair *qpair;
398 0 : int rc = 0;
399 :
400 0 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
401 0 : if (!STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
402 : /* Treat disconnected qpairs as highest priority for notification.
403 : * This means we can just return immediately here.
404 : */
405 0 : return -EIO;
406 : }
407 0 : STAILQ_FOREACH(qpair, &tgroup->connected_qpairs, poll_group_stailq) {
408 0 : if (nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTING) {
409 0 : return -EIO;
410 0 : } else if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
411 0 : rc = -EAGAIN;
412 : /* Break so that we can check the remaining transport groups,
413 : * in case any of them have a disconnected qpair.
414 : */
415 0 : break;
416 : }
417 : }
418 : }
419 :
420 0 : return rc;
421 : }
422 :
423 : void *
424 0 : spdk_nvme_poll_group_get_ctx(struct spdk_nvme_poll_group *group)
425 : {
426 0 : return group->ctx;
427 : }
428 :
429 : int
430 9 : spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
431 : {
432 : struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
433 9 : struct spdk_fd_group *fgrp = group->fgrp;
434 :
435 10 : STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
436 2 : STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
437 2 : if (nvme_transport_poll_group_destroy(tgroup) != 0) {
438 1 : STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
439 1 : return -EBUSY;
440 : }
441 :
442 : }
443 :
444 8 : if (fgrp) {
445 8 : if (group->enable_interrupts) {
446 0 : spdk_fd_group_remove(fgrp, group->disconnect_qpair_fd);
447 0 : close(group->disconnect_qpair_fd);
448 : }
449 8 : spdk_fd_group_destroy(fgrp);
450 : }
451 :
452 8 : free(group);
453 :
454 8 : return 0;
455 : }
456 :
457 : int
458 2 : spdk_nvme_poll_group_get_stats(struct spdk_nvme_poll_group *group,
459 : struct spdk_nvme_poll_group_stat **stats)
460 : {
461 : struct spdk_nvme_transport_poll_group *tgroup;
462 : struct spdk_nvme_poll_group_stat *result;
463 2 : uint32_t transports_count = 0;
464 : /* Not all transports used by this poll group may support statistics reporting */
465 2 : uint32_t reported_stats_count = 0;
466 : int rc;
467 :
468 2 : assert(group);
469 2 : assert(stats);
470 :
471 2 : result = calloc(1, sizeof(*result));
472 2 : if (!result) {
473 0 : SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
474 0 : return -ENOMEM;
475 : }
476 :
477 5 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
478 3 : transports_count++;
479 : }
480 :
481 2 : result->transport_stat = calloc(transports_count, sizeof(*result->transport_stat));
482 2 : if (!result->transport_stat) {
483 0 : SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
484 0 : free(result);
485 0 : return -ENOMEM;
486 : }
487 :
488 5 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
489 3 : rc = nvme_transport_poll_group_get_stats(tgroup, &result->transport_stat[reported_stats_count]);
490 3 : if (rc == 0) {
491 3 : reported_stats_count++;
492 : }
493 : }
494 :
495 2 : if (reported_stats_count == 0) {
496 1 : free(result->transport_stat);
497 1 : free(result);
498 1 : SPDK_DEBUGLOG(nvme, "No transport statistics available\n");
499 1 : return -ENOTSUP;
500 : }
501 :
502 1 : result->num_transports = reported_stats_count;
503 1 : *stats = result;
504 :
505 1 : return 0;
506 : }
507 :
508 : void
509 1 : spdk_nvme_poll_group_free_stats(struct spdk_nvme_poll_group *group,
510 : struct spdk_nvme_poll_group_stat *stat)
511 : {
512 : struct spdk_nvme_transport_poll_group *tgroup;
513 : uint32_t i;
514 1 : uint32_t freed_stats __attribute__((unused)) = 0;
515 :
516 1 : assert(group);
517 1 : assert(stat);
518 :
519 4 : for (i = 0; i < stat->num_transports; i++) {
520 3 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
521 3 : if (nvme_transport_get_trtype(tgroup->transport) == stat->transport_stat[i]->trtype) {
522 3 : nvme_transport_poll_group_free_stats(tgroup, stat->transport_stat[i]);
523 3 : freed_stats++;
524 3 : break;
525 : }
526 : }
527 : }
528 :
529 1 : assert(freed_stats == stat->num_transports);
530 :
531 1 : free(stat->transport_stat);
532 1 : free(stat);
533 1 : }
|