Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2020 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5 : */
6 :
7 : #include "nvme_internal.h"
8 :
9 : struct spdk_nvme_poll_group *
10 8 : spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
11 : {
12 : struct spdk_nvme_poll_group *group;
13 :
14 8 : group = calloc(1, sizeof(*group));
15 8 : if (group == NULL) {
16 1 : return NULL;
17 : }
18 :
19 7 : group->accel_fn_table.table_size = sizeof(struct spdk_nvme_accel_fn_table);
20 7 : if (table && table->table_size != 0) {
21 0 : group->accel_fn_table.table_size = table->table_size;
22 : #define SET_FIELD(field) \
23 : if (offsetof(struct spdk_nvme_accel_fn_table, field) + sizeof(table->field) <= table->table_size) { \
24 : group->accel_fn_table.field = table->field; \
25 : } \
26 :
27 0 : SET_FIELD(submit_accel_crc32c);
28 0 : SET_FIELD(append_crc32c);
29 0 : SET_FIELD(finish_sequence);
30 0 : SET_FIELD(reverse_sequence);
31 0 : SET_FIELD(abort_sequence);
32 : /* Do not remove this statement, you should always update this statement when you adding a new field,
33 : * and do not forget to add the SET_FIELD statement for your added field. */
34 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_accel_fn_table) == 48, "Incorrect size");
35 :
36 : #undef SET_FIELD
37 : }
38 :
39 : /* Make sure either all or none of the sequence manipulation callbacks are implemented */
40 7 : if ((group->accel_fn_table.finish_sequence && group->accel_fn_table.reverse_sequence &&
41 7 : group->accel_fn_table.abort_sequence) !=
42 14 : (group->accel_fn_table.finish_sequence || group->accel_fn_table.reverse_sequence ||
43 7 : group->accel_fn_table.abort_sequence)) {
44 0 : SPDK_ERRLOG("Invalid accel_fn_table configuration: either all or none of the "
45 : "sequence callbacks must be provided\n");
46 0 : free(group);
47 0 : return NULL;
48 : }
49 :
50 : /* Make sure that sequence callbacks are implemented if append* callbacks are provided */
51 7 : if (group->accel_fn_table.append_crc32c && !group->accel_fn_table.finish_sequence) {
52 0 : SPDK_ERRLOG("Invalid accel_fn_table configuration: append_crc32c requires sequence "
53 : "callbacks to be provided\n");
54 0 : free(group);
55 0 : return NULL;
56 : }
57 :
58 7 : group->ctx = ctx;
59 7 : STAILQ_INIT(&group->tgroups);
60 :
61 7 : return group;
62 : }
63 :
64 : struct spdk_nvme_poll_group *
65 0 : spdk_nvme_qpair_get_optimal_poll_group(struct spdk_nvme_qpair *qpair)
66 : {
67 : struct spdk_nvme_transport_poll_group *tgroup;
68 :
69 0 : tgroup = nvme_transport_qpair_get_optimal_poll_group(qpair->transport, qpair);
70 :
71 0 : if (tgroup == NULL) {
72 0 : return NULL;
73 : }
74 :
75 0 : return tgroup->group;
76 : }
77 :
78 : int
79 10 : spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
80 : {
81 : struct spdk_nvme_transport_poll_group *tgroup;
82 : const struct spdk_nvme_transport *transport;
83 :
84 10 : if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
85 1 : return -EINVAL;
86 : }
87 :
88 19 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
89 12 : if (tgroup->transport == qpair->transport) {
90 2 : break;
91 : }
92 : }
93 :
94 : /* See if a new transport has been added (dlopen style) and we need to update the poll group */
95 9 : if (!tgroup) {
96 7 : transport = nvme_get_first_transport();
97 17 : while (transport != NULL) {
98 15 : if (transport == qpair->transport) {
99 5 : tgroup = nvme_transport_poll_group_create(transport);
100 5 : if (tgroup == NULL) {
101 0 : return -ENOMEM;
102 : }
103 5 : tgroup->group = group;
104 5 : STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
105 5 : break;
106 : }
107 10 : transport = nvme_get_next_transport(transport);
108 : }
109 : }
110 :
111 9 : return tgroup ? nvme_transport_poll_group_add(tgroup, qpair) : -ENODEV;
112 : }
113 :
114 : int
115 7 : spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
116 : {
117 : struct spdk_nvme_transport_poll_group *tgroup;
118 :
119 15 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
120 14 : if (tgroup->transport == qpair->transport) {
121 6 : return nvme_transport_poll_group_remove(tgroup, qpair);
122 : }
123 : }
124 :
125 1 : return -ENODEV;
126 : }
127 :
128 : int
129 1 : nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
130 : {
131 1 : return nvme_transport_poll_group_connect_qpair(qpair);
132 : }
133 :
134 : int
135 0 : nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
136 : {
137 0 : return nvme_transport_poll_group_disconnect_qpair(qpair);
138 : }
139 :
140 : int64_t
141 2 : spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
142 : uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
143 : {
144 : struct spdk_nvme_transport_poll_group *tgroup;
145 2 : int64_t local_completions = 0, error_reason = 0, num_completions = 0;
146 :
147 2 : if (disconnected_qpair_cb == NULL) {
148 0 : return -EINVAL;
149 : }
150 :
151 2 : if (spdk_unlikely(group->in_process_completions)) {
152 0 : return 0;
153 : }
154 2 : group->in_process_completions = true;
155 :
156 3 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
157 1 : local_completions = nvme_transport_poll_group_process_completions(tgroup, completions_per_qpair,
158 : disconnected_qpair_cb);
159 1 : if (local_completions < 0 && error_reason == 0) {
160 0 : error_reason = local_completions;
161 : } else {
162 1 : num_completions += local_completions;
163 : /* Just to be safe */
164 1 : assert(num_completions >= 0);
165 : }
166 : }
167 2 : group->in_process_completions = false;
168 :
169 2 : return error_reason ? error_reason : num_completions;
170 : }
171 :
172 : int
173 0 : spdk_nvme_poll_group_all_connected(struct spdk_nvme_poll_group *group)
174 : {
175 : struct spdk_nvme_transport_poll_group *tgroup;
176 : struct spdk_nvme_qpair *qpair;
177 0 : int rc = 0;
178 :
179 0 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
180 0 : if (!STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
181 : /* Treat disconnected qpairs as highest priority for notification.
182 : * This means we can just return immediately here.
183 : */
184 0 : return -EIO;
185 : }
186 0 : STAILQ_FOREACH(qpair, &tgroup->connected_qpairs, poll_group_stailq) {
187 0 : if (nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTING) {
188 0 : return -EIO;
189 0 : } else if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
190 0 : rc = -EAGAIN;
191 : /* Break so that we can check the remaining transport groups,
192 : * in case any of them have a disconnected qpair.
193 : */
194 0 : break;
195 : }
196 : }
197 : }
198 :
199 0 : return rc;
200 : }
201 :
202 : void *
203 0 : spdk_nvme_poll_group_get_ctx(struct spdk_nvme_poll_group *group)
204 : {
205 0 : return group->ctx;
206 : }
207 :
208 : int
209 8 : spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
210 : {
211 : struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
212 :
213 9 : STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
214 2 : STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
215 2 : if (nvme_transport_poll_group_destroy(tgroup) != 0) {
216 1 : STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
217 1 : return -EBUSY;
218 : }
219 :
220 : }
221 :
222 7 : free(group);
223 :
224 7 : return 0;
225 : }
226 :
227 : int
228 2 : spdk_nvme_poll_group_get_stats(struct spdk_nvme_poll_group *group,
229 : struct spdk_nvme_poll_group_stat **stats)
230 : {
231 : struct spdk_nvme_transport_poll_group *tgroup;
232 : struct spdk_nvme_poll_group_stat *result;
233 2 : uint32_t transports_count = 0;
234 : /* Not all transports used by this poll group may support statistics reporting */
235 2 : uint32_t reported_stats_count = 0;
236 : int rc;
237 :
238 2 : assert(group);
239 2 : assert(stats);
240 :
241 2 : result = calloc(1, sizeof(*result));
242 2 : if (!result) {
243 0 : SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
244 0 : return -ENOMEM;
245 : }
246 :
247 5 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
248 3 : transports_count++;
249 : }
250 :
251 2 : result->transport_stat = calloc(transports_count, sizeof(*result->transport_stat));
252 2 : if (!result->transport_stat) {
253 0 : SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
254 0 : free(result);
255 0 : return -ENOMEM;
256 : }
257 :
258 5 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
259 3 : rc = nvme_transport_poll_group_get_stats(tgroup, &result->transport_stat[reported_stats_count]);
260 3 : if (rc == 0) {
261 3 : reported_stats_count++;
262 : }
263 : }
264 :
265 2 : if (reported_stats_count == 0) {
266 1 : free(result->transport_stat);
267 1 : free(result);
268 1 : SPDK_DEBUGLOG(nvme, "No transport statistics available\n");
269 1 : return -ENOTSUP;
270 : }
271 :
272 1 : result->num_transports = reported_stats_count;
273 1 : *stats = result;
274 :
275 1 : return 0;
276 : }
277 :
278 : void
279 1 : spdk_nvme_poll_group_free_stats(struct spdk_nvme_poll_group *group,
280 : struct spdk_nvme_poll_group_stat *stat)
281 : {
282 : struct spdk_nvme_transport_poll_group *tgroup;
283 : uint32_t i;
284 1 : uint32_t freed_stats __attribute__((unused)) = 0;
285 :
286 1 : assert(group);
287 1 : assert(stat);
288 :
289 4 : for (i = 0; i < stat->num_transports; i++) {
290 3 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
291 3 : if (nvme_transport_get_trtype(tgroup->transport) == stat->transport_stat[i]->trtype) {
292 3 : nvme_transport_poll_group_free_stats(tgroup, stat->transport_stat[i]);
293 3 : freed_stats++;
294 3 : break;
295 : }
296 : }
297 : }
298 :
299 1 : assert(freed_stats == stat->num_transports);
300 :
301 1 : free(stat->transport_stat);
302 1 : free(stat);
303 1 : }
|