Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2020 Intel Corporation.
3 : * Copyright (c) 2021 Mellanox Technologies LTD.
4 : * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
5 : * All rights reserved.
6 : */
7 :
8 : #include "nvme_internal.h"
9 :
10 1 : SPDK_LOG_DEPRECATION_REGISTER(nvme_accel_fn_submit_crc,
11 : "spdk_nvme_accel_fn_table.submit_accel_crc32c", "v25.01", 0);
12 :
13 : struct spdk_nvme_poll_group *
14 8 : spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
15 : {
16 : struct spdk_nvme_poll_group *group;
17 :
18 8 : group = calloc(1, sizeof(*group));
19 8 : if (group == NULL) {
20 1 : return NULL;
21 : }
22 :
23 7 : group->accel_fn_table.table_size = sizeof(struct spdk_nvme_accel_fn_table);
24 7 : if (table && table->table_size != 0) {
25 0 : group->accel_fn_table.table_size = table->table_size;
26 : #define SET_FIELD(field) \
27 : if (offsetof(struct spdk_nvme_accel_fn_table, field) + sizeof(table->field) <= table->table_size) { \
28 : group->accel_fn_table.field = table->field; \
29 : } \
30 :
31 0 : SET_FIELD(submit_accel_crc32c);
32 0 : SET_FIELD(append_crc32c);
33 0 : SET_FIELD(append_copy);
34 0 : SET_FIELD(finish_sequence);
35 0 : SET_FIELD(reverse_sequence);
36 0 : SET_FIELD(abort_sequence);
37 : /* Do not remove this statement, you should always update this statement when you adding a new field,
38 : * and do not forget to add the SET_FIELD statement for your added field. */
39 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_accel_fn_table) == 56, "Incorrect size");
40 :
41 : #undef SET_FIELD
42 : }
43 :
44 : /* Make sure either all or none of the sequence manipulation callbacks are implemented */
45 7 : if ((group->accel_fn_table.finish_sequence && group->accel_fn_table.reverse_sequence &&
46 7 : group->accel_fn_table.abort_sequence) !=
47 14 : (group->accel_fn_table.finish_sequence || group->accel_fn_table.reverse_sequence ||
48 7 : group->accel_fn_table.abort_sequence)) {
49 0 : SPDK_ERRLOG("Invalid accel_fn_table configuration: either all or none of the "
50 : "sequence callbacks must be provided\n");
51 0 : free(group);
52 0 : return NULL;
53 : }
54 :
55 : /* Make sure that sequence callbacks are implemented if append* callbacks are provided */
56 7 : if ((group->accel_fn_table.append_crc32c || group->accel_fn_table.append_copy) &&
57 0 : !group->accel_fn_table.finish_sequence) {
58 0 : SPDK_ERRLOG("Invalid accel_fn_table configuration: append_crc32c and/or append_copy require sequence "
59 : "callbacks to be provided\n");
60 0 : free(group);
61 0 : return NULL;
62 : }
63 :
64 7 : if (group->accel_fn_table.submit_accel_crc32c != NULL) {
65 0 : SPDK_LOG_DEPRECATED(nvme_accel_fn_submit_crc);
66 : }
67 :
68 7 : group->ctx = ctx;
69 7 : STAILQ_INIT(&group->tgroups);
70 :
71 7 : return group;
72 : }
73 :
74 : struct spdk_nvme_poll_group *
75 0 : spdk_nvme_qpair_get_optimal_poll_group(struct spdk_nvme_qpair *qpair)
76 : {
77 : struct spdk_nvme_transport_poll_group *tgroup;
78 :
79 0 : tgroup = nvme_transport_qpair_get_optimal_poll_group(qpair->transport, qpair);
80 :
81 0 : if (tgroup == NULL) {
82 0 : return NULL;
83 : }
84 :
85 0 : return tgroup->group;
86 : }
87 :
88 : int
89 10 : spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
90 : {
91 : struct spdk_nvme_transport_poll_group *tgroup;
92 : const struct spdk_nvme_transport *transport;
93 :
94 10 : if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
95 1 : return -EINVAL;
96 : }
97 :
98 19 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
99 12 : if (tgroup->transport == qpair->transport) {
100 2 : break;
101 : }
102 : }
103 :
104 : /* See if a new transport has been added (dlopen style) and we need to update the poll group */
105 9 : if (!tgroup) {
106 7 : transport = nvme_get_first_transport();
107 17 : while (transport != NULL) {
108 15 : if (transport == qpair->transport) {
109 5 : tgroup = nvme_transport_poll_group_create(transport);
110 5 : if (tgroup == NULL) {
111 0 : return -ENOMEM;
112 : }
113 5 : tgroup->group = group;
114 5 : STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
115 5 : break;
116 : }
117 10 : transport = nvme_get_next_transport(transport);
118 : }
119 : }
120 :
121 9 : return tgroup ? nvme_transport_poll_group_add(tgroup, qpair) : -ENODEV;
122 : }
123 :
124 : int
125 7 : spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
126 : {
127 : struct spdk_nvme_transport_poll_group *tgroup;
128 :
129 15 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
130 14 : if (tgroup->transport == qpair->transport) {
131 6 : return nvme_transport_poll_group_remove(tgroup, qpair);
132 : }
133 : }
134 :
135 1 : return -ENODEV;
136 : }
137 :
138 : int
139 1 : nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
140 : {
141 1 : return nvme_transport_poll_group_connect_qpair(qpair);
142 : }
143 :
144 : int
145 0 : nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
146 : {
147 0 : return nvme_transport_poll_group_disconnect_qpair(qpair);
148 : }
149 :
150 : int64_t
151 2 : spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
152 : uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
153 : {
154 : struct spdk_nvme_transport_poll_group *tgroup;
155 2 : int64_t local_completions = 0, error_reason = 0, num_completions = 0;
156 :
157 2 : if (disconnected_qpair_cb == NULL) {
158 0 : return -EINVAL;
159 : }
160 :
161 2 : if (spdk_unlikely(group->in_process_completions)) {
162 0 : return 0;
163 : }
164 2 : group->in_process_completions = true;
165 :
166 3 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
167 1 : local_completions = nvme_transport_poll_group_process_completions(tgroup, completions_per_qpair,
168 : disconnected_qpair_cb);
169 1 : if (local_completions < 0 && error_reason == 0) {
170 0 : error_reason = local_completions;
171 : } else {
172 1 : num_completions += local_completions;
173 : /* Just to be safe */
174 1 : assert(num_completions >= 0);
175 : }
176 : }
177 2 : group->in_process_completions = false;
178 :
179 2 : return error_reason ? error_reason : num_completions;
180 : }
181 :
182 : int
183 0 : spdk_nvme_poll_group_all_connected(struct spdk_nvme_poll_group *group)
184 : {
185 : struct spdk_nvme_transport_poll_group *tgroup;
186 : struct spdk_nvme_qpair *qpair;
187 0 : int rc = 0;
188 :
189 0 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
190 0 : if (!STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
191 : /* Treat disconnected qpairs as highest priority for notification.
192 : * This means we can just return immediately here.
193 : */
194 0 : return -EIO;
195 : }
196 0 : STAILQ_FOREACH(qpair, &tgroup->connected_qpairs, poll_group_stailq) {
197 0 : if (nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTING) {
198 0 : return -EIO;
199 0 : } else if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
200 0 : rc = -EAGAIN;
201 : /* Break so that we can check the remaining transport groups,
202 : * in case any of them have a disconnected qpair.
203 : */
204 0 : break;
205 : }
206 : }
207 : }
208 :
209 0 : return rc;
210 : }
211 :
212 : void *
213 0 : spdk_nvme_poll_group_get_ctx(struct spdk_nvme_poll_group *group)
214 : {
215 0 : return group->ctx;
216 : }
217 :
218 : int
219 8 : spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
220 : {
221 : struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
222 :
223 9 : STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
224 2 : STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
225 2 : if (nvme_transport_poll_group_destroy(tgroup) != 0) {
226 1 : STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
227 1 : return -EBUSY;
228 : }
229 :
230 : }
231 :
232 7 : free(group);
233 :
234 7 : return 0;
235 : }
236 :
237 : int
238 2 : spdk_nvme_poll_group_get_stats(struct spdk_nvme_poll_group *group,
239 : struct spdk_nvme_poll_group_stat **stats)
240 : {
241 : struct spdk_nvme_transport_poll_group *tgroup;
242 : struct spdk_nvme_poll_group_stat *result;
243 2 : uint32_t transports_count = 0;
244 : /* Not all transports used by this poll group may support statistics reporting */
245 2 : uint32_t reported_stats_count = 0;
246 : int rc;
247 :
248 2 : assert(group);
249 2 : assert(stats);
250 :
251 2 : result = calloc(1, sizeof(*result));
252 2 : if (!result) {
253 0 : SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
254 0 : return -ENOMEM;
255 : }
256 :
257 5 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
258 3 : transports_count++;
259 : }
260 :
261 2 : result->transport_stat = calloc(transports_count, sizeof(*result->transport_stat));
262 2 : if (!result->transport_stat) {
263 0 : SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
264 0 : free(result);
265 0 : return -ENOMEM;
266 : }
267 :
268 5 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
269 3 : rc = nvme_transport_poll_group_get_stats(tgroup, &result->transport_stat[reported_stats_count]);
270 3 : if (rc == 0) {
271 3 : reported_stats_count++;
272 : }
273 : }
274 :
275 2 : if (reported_stats_count == 0) {
276 1 : free(result->transport_stat);
277 1 : free(result);
278 1 : SPDK_DEBUGLOG(nvme, "No transport statistics available\n");
279 1 : return -ENOTSUP;
280 : }
281 :
282 1 : result->num_transports = reported_stats_count;
283 1 : *stats = result;
284 :
285 1 : return 0;
286 : }
287 :
288 : void
289 1 : spdk_nvme_poll_group_free_stats(struct spdk_nvme_poll_group *group,
290 : struct spdk_nvme_poll_group_stat *stat)
291 : {
292 : struct spdk_nvme_transport_poll_group *tgroup;
293 : uint32_t i;
294 1 : uint32_t freed_stats __attribute__((unused)) = 0;
295 :
296 1 : assert(group);
297 1 : assert(stat);
298 :
299 4 : for (i = 0; i < stat->num_transports; i++) {
300 3 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
301 3 : if (nvme_transport_get_trtype(tgroup->transport) == stat->transport_stat[i]->trtype) {
302 3 : nvme_transport_poll_group_free_stats(tgroup, stat->transport_stat[i]);
303 3 : freed_stats++;
304 3 : break;
305 : }
306 : }
307 : }
308 :
309 1 : assert(freed_stats == stat->num_transports);
310 :
311 1 : free(stat->transport_stat);
312 1 : free(stat);
313 1 : }
|