Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2020 Intel Corporation.
3 : * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4 : * All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "spdk/accel_module.h"
10 :
11 : #include "accel_internal.h"
12 :
13 : #include "spdk/dma.h"
14 : #include "spdk/env.h"
15 : #include "spdk/likely.h"
16 : #include "spdk/log.h"
17 : #include "spdk/thread.h"
18 : #include "spdk/json.h"
19 : #include "spdk/crc32.h"
20 : #include "spdk/util.h"
21 : #include "spdk/hexlify.h"
22 : #include "spdk/string.h"
23 :
24 : /* Accelerator Framework: The following provides a top level
25 : * generic API for the accelerator functions defined here. Modules,
26 : * such as the one in /module/accel/ioat, supply the implementation
27 : * with the exception of the pure software implementation contained
28 : * later in this file.
29 : */
30 :
31 : #define ALIGN_4K 0x1000
32 : #define MAX_TASKS_PER_CHANNEL 0x800
33 : #define ACCEL_SMALL_CACHE_SIZE 128
34 : #define ACCEL_LARGE_CACHE_SIZE 16
35 : /* Set MSB, so we don't return NULL pointers as buffers */
36 : #define ACCEL_BUFFER_BASE ((void *)(1ull << 63))
37 : #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 :
39 : #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 : #define ACCEL_TASKS_IN_SEQUENCE_LIMIT 8
41 :
42 : struct accel_module {
43 : struct spdk_accel_module_if *module;
44 : bool supports_memory_domains;
45 : };
46 :
47 : /* Largest context size for all accel modules */
48 : static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
49 :
50 : static struct spdk_accel_module_if *g_accel_module = NULL;
51 : static spdk_accel_fini_cb g_fini_cb_fn = NULL;
52 : static void *g_fini_cb_arg = NULL;
53 : static bool g_modules_started = false;
54 : static struct spdk_memory_domain *g_accel_domain;
55 :
56 : /* Global list of registered accelerator modules */
57 : static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
58 : TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
59 :
60 : /* Crypto keyring */
61 : static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
62 : static struct spdk_spinlock g_keyring_spin;
63 :
64 : /* Global array mapping capabilities to modules */
65 : static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
66 : static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
67 : TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
68 : static struct spdk_accel_driver *g_accel_driver;
69 : static struct spdk_accel_opts g_opts = {
70 : .small_cache_size = ACCEL_SMALL_CACHE_SIZE,
71 : .large_cache_size = ACCEL_LARGE_CACHE_SIZE,
72 : .task_count = MAX_TASKS_PER_CHANNEL,
73 : .sequence_count = MAX_TASKS_PER_CHANNEL,
74 : .buf_count = MAX_TASKS_PER_CHANNEL,
75 : };
76 : static struct accel_stats g_stats;
77 : static struct spdk_spinlock g_stats_lock;
78 :
79 : static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
80 : "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
81 : "compress", "decompress", "encrypt", "decrypt", "xor",
82 : "dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy"
83 : };
84 :
85 : enum accel_sequence_state {
86 : ACCEL_SEQUENCE_STATE_INIT,
87 : ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
88 : ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
89 : ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
90 : ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
91 : ACCEL_SEQUENCE_STATE_PULL_DATA,
92 : ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
93 : ACCEL_SEQUENCE_STATE_EXEC_TASK,
94 : ACCEL_SEQUENCE_STATE_AWAIT_TASK,
95 : ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
96 : ACCEL_SEQUENCE_STATE_NEXT_TASK,
97 : ACCEL_SEQUENCE_STATE_PUSH_DATA,
98 : ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
99 : ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
100 : ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
101 : ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
102 : ACCEL_SEQUENCE_STATE_ERROR,
103 : ACCEL_SEQUENCE_STATE_MAX,
104 : };
105 :
106 : static const char *g_seq_states[]
107 : __attribute__((unused)) = {
108 : [ACCEL_SEQUENCE_STATE_INIT] = "init",
109 : [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
110 : [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
111 : [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
112 : [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
113 : [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
114 : [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
115 : [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
116 : [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
117 : [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
118 : [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
119 : [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
120 : [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
121 : [ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
122 : [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
123 : [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
124 : [ACCEL_SEQUENCE_STATE_ERROR] = "error",
125 : [ACCEL_SEQUENCE_STATE_MAX] = "",
126 : };
127 :
128 : #define ACCEL_SEQUENCE_STATE_STRING(s) \
129 : (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
130 : ? g_seq_states[s] : "unknown")
131 :
132 : struct accel_buffer {
133 : struct spdk_accel_sequence *seq;
134 : void *buf;
135 : uint64_t len;
136 : struct spdk_iobuf_entry iobuf;
137 : spdk_accel_sequence_get_buf_cb cb_fn;
138 : void *cb_ctx;
139 : SLIST_ENTRY(accel_buffer) link;
140 : struct accel_io_channel *ch;
141 : };
142 :
143 : struct accel_io_channel {
144 : struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST];
145 : struct spdk_io_channel *driver_channel;
146 : void *task_pool_base;
147 : struct spdk_accel_sequence *seq_pool_base;
148 : struct accel_buffer *buf_pool_base;
149 : struct spdk_accel_task_aux_data *task_aux_data_base;
150 : STAILQ_HEAD(, spdk_accel_task) task_pool;
151 : SLIST_HEAD(, spdk_accel_task_aux_data) task_aux_data_pool;
152 : SLIST_HEAD(, spdk_accel_sequence) seq_pool;
153 : SLIST_HEAD(, accel_buffer) buf_pool;
154 : struct spdk_iobuf_channel iobuf;
155 : struct accel_stats stats;
156 : };
157 :
158 : TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
159 :
160 : struct spdk_accel_sequence {
161 : struct accel_io_channel *ch;
162 : struct accel_sequence_tasks tasks;
163 : SLIST_HEAD(, accel_buffer) bounce_bufs;
164 : int status;
165 : /* state uses enum accel_sequence_state */
166 : uint8_t state;
167 : bool in_process_sequence;
168 : spdk_accel_completion_cb cb_fn;
169 : void *cb_arg;
170 : SLIST_ENTRY(spdk_accel_sequence) link;
171 : };
172 : SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size");
173 :
174 : #define accel_update_stats(ch, event, v) \
175 : do { \
176 : (ch)->stats.event += (v); \
177 : } while (0)
178 :
179 : #define accel_update_task_stats(ch, task, event, v) \
180 : accel_update_stats(ch, operations[(task)->op_code].event, v)
181 :
182 : static inline void accel_sequence_task_cb(void *cb_arg, int status);
183 :
184 : static inline void
185 709 : accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
186 : {
187 709 : SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
188 : ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
189 709 : assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
190 709 : seq->state = state;
191 709 : }
192 :
193 : static void
194 9 : accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
195 : {
196 9 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
197 9 : assert(status != 0);
198 9 : seq->status = status;
199 9 : }
200 :
201 : int
202 15 : spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
203 : {
204 15 : if (opcode >= SPDK_ACCEL_OPC_LAST) {
205 : /* invalid opcode */
206 0 : return -EINVAL;
207 : }
208 :
209 15 : if (g_modules_opc[opcode].module) {
210 15 : *module_name = g_modules_opc[opcode].module->name;
211 : } else {
212 0 : return -ENOENT;
213 : }
214 :
215 15 : return 0;
216 : }
217 :
218 : void
219 0 : _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
220 : {
221 : struct spdk_accel_module_if *accel_module;
222 : enum spdk_accel_opcode opcode;
223 0 : int j = 0;
224 :
225 0 : TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
226 0 : for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
227 0 : if (accel_module->supports_opcode(opcode)) {
228 0 : info->ops[j] = opcode;
229 0 : j++;
230 : }
231 : }
232 0 : info->name = accel_module->name;
233 0 : info->num_ops = j;
234 0 : fn(info);
235 0 : j = 0;
236 : }
237 0 : }
238 :
239 : const char *
240 0 : spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
241 : {
242 0 : if (opcode < SPDK_ACCEL_OPC_LAST) {
243 0 : return g_opcode_strings[opcode];
244 : }
245 :
246 0 : return NULL;
247 : }
248 :
249 : int
250 0 : spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
251 : {
252 : char *copy;
253 :
254 0 : if (g_modules_started == true) {
255 : /* we don't allow re-assignment once things have started */
256 0 : return -EINVAL;
257 : }
258 :
259 0 : if (opcode >= SPDK_ACCEL_OPC_LAST) {
260 : /* invalid opcode */
261 0 : return -EINVAL;
262 : }
263 :
264 0 : copy = strdup(name);
265 0 : if (copy == NULL) {
266 0 : return -ENOMEM;
267 : }
268 :
269 : /* module selection will be validated after the framework starts. */
270 0 : free(g_modules_opc_override[opcode]);
271 0 : g_modules_opc_override[opcode] = copy;
272 :
273 0 : return 0;
274 : }
275 :
276 : inline static struct spdk_accel_task *
277 151 : _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
278 : {
279 : struct spdk_accel_task *accel_task;
280 :
281 151 : accel_task = STAILQ_FIRST(&accel_ch->task_pool);
282 151 : if (spdk_unlikely(accel_task == NULL)) {
283 11 : accel_update_stats(accel_ch, retry.task, 1);
284 11 : return NULL;
285 : }
286 :
287 140 : accel_update_stats(accel_ch, task_outstanding, 1);
288 140 : STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link);
289 140 : accel_task->link.stqe_next = NULL;
290 :
291 140 : accel_task->cb_fn = cb_fn;
292 140 : accel_task->cb_arg = cb_arg;
293 140 : accel_task->accel_ch = accel_ch;
294 140 : accel_task->s.iovs = NULL;
295 140 : accel_task->d.iovs = NULL;
296 :
297 140 : return accel_task;
298 : }
299 :
300 : static void
301 132 : _put_task(struct accel_io_channel *ch, struct spdk_accel_task *task)
302 : {
303 132 : STAILQ_INSERT_HEAD(&ch->task_pool, task, link);
304 132 : accel_update_stats(ch, task_outstanding, -1);
305 132 : }
306 :
307 : void
308 95 : spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
309 : {
310 95 : struct accel_io_channel *accel_ch = accel_task->accel_ch;
311 : spdk_accel_completion_cb cb_fn;
312 : void *cb_arg;
313 :
314 95 : accel_update_task_stats(accel_ch, accel_task, executed, 1);
315 95 : accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
316 95 : if (spdk_unlikely(status != 0)) {
317 3 : accel_update_task_stats(accel_ch, accel_task, failed, 1);
318 : }
319 :
320 95 : if (accel_task->seq) {
321 94 : accel_sequence_task_cb(accel_task->seq, status);
322 94 : return;
323 : }
324 :
325 1 : cb_fn = accel_task->cb_fn;
326 1 : cb_arg = accel_task->cb_arg;
327 :
328 1 : if (accel_task->has_aux) {
329 0 : SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link);
330 0 : accel_task->aux = NULL;
331 0 : accel_task->has_aux = false;
332 : }
333 :
334 : /* We should put the accel_task into the list firstly in order to avoid
335 : * the accel task list is exhausted when there is recursive call to
336 : * allocate accel_task in user's call back function (cb_fn)
337 : */
338 1 : _put_task(accel_ch, accel_task);
339 :
340 1 : cb_fn(cb_arg, status);
341 : }
342 :
343 : static inline int
344 92 : accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
345 : {
346 92 : struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
347 92 : struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
348 : int rc;
349 :
350 92 : rc = module->submit_tasks(module_ch, task);
351 92 : if (spdk_unlikely(rc != 0)) {
352 2 : accel_update_task_stats(accel_ch, task, failed, 1);
353 : }
354 :
355 92 : return rc;
356 : }
357 :
358 : static inline uint64_t
359 114 : accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
360 : {
361 114 : uint64_t result = 0;
362 : uint32_t i;
363 :
364 259 : for (i = 0; i < iovcnt; ++i) {
365 145 : result += iovs[i].iov_len;
366 : }
367 :
368 114 : return result;
369 : }
370 :
371 : #define ACCEL_TASK_ALLOC_AUX_BUF(task) \
372 : do { \
373 : (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool); \
374 : if (spdk_unlikely(!(task)->aux)) { \
375 : SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); \
376 : _put_task(task->accel_ch, task); \
377 : assert(0); \
378 : return -ENOMEM; \
379 : } \
380 : SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link); \
381 : (task)->has_aux = true; \
382 : } while (0)
383 :
384 : /* Accel framework public API for copy function */
385 : int
386 2 : spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
387 : uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
388 : {
389 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
390 : struct spdk_accel_task *accel_task;
391 :
392 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
393 2 : if (spdk_unlikely(accel_task == NULL)) {
394 1 : return -ENOMEM;
395 : }
396 :
397 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
398 :
399 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
400 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
401 1 : accel_task->d.iovs[0].iov_base = dst;
402 1 : accel_task->d.iovs[0].iov_len = nbytes;
403 1 : accel_task->d.iovcnt = 1;
404 1 : accel_task->s.iovs[0].iov_base = src;
405 1 : accel_task->s.iovs[0].iov_len = nbytes;
406 1 : accel_task->s.iovcnt = 1;
407 1 : accel_task->nbytes = nbytes;
408 1 : accel_task->op_code = SPDK_ACCEL_OPC_COPY;
409 1 : accel_task->src_domain = NULL;
410 1 : accel_task->dst_domain = NULL;
411 :
412 1 : return accel_submit_task(accel_ch, accel_task);
413 : }
414 :
415 : /* Accel framework public API for dual cast copy function */
416 : int
417 4 : spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
418 : void *dst2, void *src, uint64_t nbytes,
419 : spdk_accel_completion_cb cb_fn, void *cb_arg)
420 : {
421 4 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
422 : struct spdk_accel_task *accel_task;
423 :
424 4 : if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
425 2 : SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
426 2 : return -EINVAL;
427 : }
428 :
429 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
430 2 : if (spdk_unlikely(accel_task == NULL)) {
431 1 : return -ENOMEM;
432 : }
433 :
434 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
435 :
436 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
437 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
438 1 : accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2];
439 1 : accel_task->d.iovs[0].iov_base = dst1;
440 1 : accel_task->d.iovs[0].iov_len = nbytes;
441 1 : accel_task->d.iovcnt = 1;
442 1 : accel_task->d2.iovs[0].iov_base = dst2;
443 1 : accel_task->d2.iovs[0].iov_len = nbytes;
444 1 : accel_task->d2.iovcnt = 1;
445 1 : accel_task->s.iovs[0].iov_base = src;
446 1 : accel_task->s.iovs[0].iov_len = nbytes;
447 1 : accel_task->s.iovcnt = 1;
448 1 : accel_task->nbytes = nbytes;
449 1 : accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
450 1 : accel_task->src_domain = NULL;
451 1 : accel_task->dst_domain = NULL;
452 :
453 1 : return accel_submit_task(accel_ch, accel_task);
454 : }
455 :
456 : /* Accel framework public API for compare function */
457 :
458 : int
459 2 : spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
460 : void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
461 : void *cb_arg)
462 : {
463 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
464 : struct spdk_accel_task *accel_task;
465 :
466 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
467 2 : if (spdk_unlikely(accel_task == NULL)) {
468 1 : return -ENOMEM;
469 : }
470 :
471 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
472 :
473 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
474 1 : accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2];
475 1 : accel_task->s.iovs[0].iov_base = src1;
476 1 : accel_task->s.iovs[0].iov_len = nbytes;
477 1 : accel_task->s.iovcnt = 1;
478 1 : accel_task->s2.iovs[0].iov_base = src2;
479 1 : accel_task->s2.iovs[0].iov_len = nbytes;
480 1 : accel_task->s2.iovcnt = 1;
481 1 : accel_task->nbytes = nbytes;
482 1 : accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
483 1 : accel_task->src_domain = NULL;
484 1 : accel_task->dst_domain = NULL;
485 :
486 1 : return accel_submit_task(accel_ch, accel_task);
487 : }
488 :
489 : /* Accel framework public API for fill function */
490 : int
491 2 : spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
492 : uint8_t fill, uint64_t nbytes,
493 : spdk_accel_completion_cb cb_fn, void *cb_arg)
494 : {
495 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
496 : struct spdk_accel_task *accel_task;
497 :
498 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
499 2 : if (spdk_unlikely(accel_task == NULL)) {
500 1 : return -ENOMEM;
501 : }
502 :
503 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
504 :
505 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
506 1 : accel_task->d.iovs[0].iov_base = dst;
507 1 : accel_task->d.iovs[0].iov_len = nbytes;
508 1 : accel_task->d.iovcnt = 1;
509 1 : accel_task->nbytes = nbytes;
510 1 : memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
511 1 : accel_task->op_code = SPDK_ACCEL_OPC_FILL;
512 1 : accel_task->src_domain = NULL;
513 1 : accel_task->dst_domain = NULL;
514 :
515 1 : return accel_submit_task(accel_ch, accel_task);
516 : }
517 :
518 : /* Accel framework public API for CRC-32C function */
519 : int
520 2 : spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
521 : void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
522 : void *cb_arg)
523 : {
524 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
525 : struct spdk_accel_task *accel_task;
526 :
527 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
528 2 : if (spdk_unlikely(accel_task == NULL)) {
529 1 : return -ENOMEM;
530 : }
531 :
532 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
533 :
534 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
535 1 : accel_task->s.iovs[0].iov_base = src;
536 1 : accel_task->s.iovs[0].iov_len = nbytes;
537 1 : accel_task->s.iovcnt = 1;
538 1 : accel_task->nbytes = nbytes;
539 1 : accel_task->crc_dst = crc_dst;
540 1 : accel_task->seed = seed;
541 1 : accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
542 1 : accel_task->src_domain = NULL;
543 1 : accel_task->dst_domain = NULL;
544 :
545 1 : return accel_submit_task(accel_ch, accel_task);
546 : }
547 :
548 : /* Accel framework public API for chained CRC-32C function */
549 : int
550 1 : spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
551 : struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
552 : spdk_accel_completion_cb cb_fn, void *cb_arg)
553 : {
554 1 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
555 : struct spdk_accel_task *accel_task;
556 :
557 1 : if (iov == NULL) {
558 0 : SPDK_ERRLOG("iov should not be NULL");
559 0 : return -EINVAL;
560 : }
561 :
562 1 : if (!iov_cnt) {
563 0 : SPDK_ERRLOG("iovcnt should not be zero value\n");
564 0 : return -EINVAL;
565 : }
566 :
567 1 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
568 1 : if (spdk_unlikely(accel_task == NULL)) {
569 0 : SPDK_ERRLOG("no memory\n");
570 0 : assert(0);
571 : return -ENOMEM;
572 : }
573 :
574 1 : accel_task->s.iovs = iov;
575 1 : accel_task->s.iovcnt = iov_cnt;
576 1 : accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
577 1 : accel_task->crc_dst = crc_dst;
578 1 : accel_task->seed = seed;
579 1 : accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
580 1 : accel_task->src_domain = NULL;
581 1 : accel_task->dst_domain = NULL;
582 :
583 1 : return accel_submit_task(accel_ch, accel_task);
584 : }
585 :
586 : /* Accel framework public API for copy with CRC-32C function */
587 : int
588 2 : spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
589 : void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
590 : spdk_accel_completion_cb cb_fn, void *cb_arg)
591 : {
592 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
593 : struct spdk_accel_task *accel_task;
594 :
595 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
596 2 : if (spdk_unlikely(accel_task == NULL)) {
597 1 : return -ENOMEM;
598 : }
599 :
600 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
601 :
602 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
603 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
604 1 : accel_task->d.iovs[0].iov_base = dst;
605 1 : accel_task->d.iovs[0].iov_len = nbytes;
606 1 : accel_task->d.iovcnt = 1;
607 1 : accel_task->s.iovs[0].iov_base = src;
608 1 : accel_task->s.iovs[0].iov_len = nbytes;
609 1 : accel_task->s.iovcnt = 1;
610 1 : accel_task->nbytes = nbytes;
611 1 : accel_task->crc_dst = crc_dst;
612 1 : accel_task->seed = seed;
613 1 : accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
614 1 : accel_task->src_domain = NULL;
615 1 : accel_task->dst_domain = NULL;
616 :
617 1 : return accel_submit_task(accel_ch, accel_task);
618 : }
619 :
620 : /* Accel framework public API for chained copy + CRC-32C function */
621 : int
622 0 : spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
623 : struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
624 : uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg)
625 : {
626 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
627 : struct spdk_accel_task *accel_task;
628 : uint64_t nbytes;
629 :
630 0 : if (src_iovs == NULL) {
631 0 : SPDK_ERRLOG("iov should not be NULL");
632 0 : return -EINVAL;
633 : }
634 :
635 0 : if (!iov_cnt) {
636 0 : SPDK_ERRLOG("iovcnt should not be zero value\n");
637 0 : return -EINVAL;
638 : }
639 :
640 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
641 0 : if (spdk_unlikely(accel_task == NULL)) {
642 0 : SPDK_ERRLOG("no memory\n");
643 0 : assert(0);
644 : return -ENOMEM;
645 : }
646 :
647 0 : nbytes = accel_get_iovlen(src_iovs, iov_cnt);
648 :
649 0 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
650 :
651 0 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
652 0 : accel_task->d.iovs[0].iov_base = dst;
653 0 : accel_task->d.iovs[0].iov_len = nbytes;
654 0 : accel_task->d.iovcnt = 1;
655 0 : accel_task->s.iovs = src_iovs;
656 0 : accel_task->s.iovcnt = iov_cnt;
657 0 : accel_task->nbytes = nbytes;
658 0 : accel_task->crc_dst = crc_dst;
659 0 : accel_task->seed = seed;
660 0 : accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
661 0 : accel_task->src_domain = NULL;
662 0 : accel_task->dst_domain = NULL;
663 :
664 0 : return accel_submit_task(accel_ch, accel_task);
665 : }
666 :
667 : int
668 0 : spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
669 : struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size,
670 : spdk_accel_completion_cb cb_fn, void *cb_arg)
671 : {
672 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
673 : struct spdk_accel_task *accel_task;
674 :
675 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
676 0 : if (spdk_unlikely(accel_task == NULL)) {
677 0 : return -ENOMEM;
678 : }
679 :
680 0 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
681 :
682 0 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
683 0 : accel_task->d.iovs[0].iov_base = dst;
684 0 : accel_task->d.iovs[0].iov_len = nbytes;
685 0 : accel_task->d.iovcnt = 1;
686 0 : accel_task->output_size = output_size;
687 0 : accel_task->s.iovs = src_iovs;
688 0 : accel_task->s.iovcnt = src_iovcnt;
689 0 : accel_task->nbytes = nbytes;
690 0 : accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
691 0 : accel_task->src_domain = NULL;
692 0 : accel_task->dst_domain = NULL;
693 :
694 0 : return accel_submit_task(accel_ch, accel_task);
695 : }
696 :
697 : int
698 0 : spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
699 : size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
700 : uint32_t *output_size, spdk_accel_completion_cb cb_fn,
701 : void *cb_arg)
702 : {
703 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
704 : struct spdk_accel_task *accel_task;
705 :
706 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
707 0 : if (spdk_unlikely(accel_task == NULL)) {
708 0 : return -ENOMEM;
709 : }
710 :
711 0 : accel_task->output_size = output_size;
712 0 : accel_task->s.iovs = src_iovs;
713 0 : accel_task->s.iovcnt = src_iovcnt;
714 0 : accel_task->d.iovs = dst_iovs;
715 0 : accel_task->d.iovcnt = dst_iovcnt;
716 0 : accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
717 0 : accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
718 0 : accel_task->src_domain = NULL;
719 0 : accel_task->dst_domain = NULL;
720 :
721 0 : return accel_submit_task(accel_ch, accel_task);
722 : }
723 :
724 : int
725 0 : spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
726 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
727 : struct iovec *src_iovs, uint32_t src_iovcnt,
728 : uint64_t iv, uint32_t block_size,
729 : spdk_accel_completion_cb cb_fn, void *cb_arg)
730 : {
731 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
732 : struct spdk_accel_task *accel_task;
733 :
734 0 : if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
735 0 : return -EINVAL;
736 : }
737 :
738 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
739 0 : if (spdk_unlikely(accel_task == NULL)) {
740 0 : return -ENOMEM;
741 : }
742 :
743 0 : accel_task->crypto_key = key;
744 0 : accel_task->s.iovs = src_iovs;
745 0 : accel_task->s.iovcnt = src_iovcnt;
746 0 : accel_task->d.iovs = dst_iovs;
747 0 : accel_task->d.iovcnt = dst_iovcnt;
748 0 : accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
749 0 : accel_task->iv = iv;
750 0 : accel_task->block_size = block_size;
751 0 : accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
752 0 : accel_task->src_domain = NULL;
753 0 : accel_task->dst_domain = NULL;
754 :
755 0 : return accel_submit_task(accel_ch, accel_task);
756 : }
757 :
758 : int
759 0 : spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
760 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
761 : struct iovec *src_iovs, uint32_t src_iovcnt,
762 : uint64_t iv, uint32_t block_size,
763 : spdk_accel_completion_cb cb_fn, void *cb_arg)
764 : {
765 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
766 : struct spdk_accel_task *accel_task;
767 :
768 0 : if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
769 0 : return -EINVAL;
770 : }
771 :
772 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
773 0 : if (spdk_unlikely(accel_task == NULL)) {
774 0 : return -ENOMEM;
775 : }
776 :
777 0 : accel_task->crypto_key = key;
778 0 : accel_task->s.iovs = src_iovs;
779 0 : accel_task->s.iovcnt = src_iovcnt;
780 0 : accel_task->d.iovs = dst_iovs;
781 0 : accel_task->d.iovcnt = dst_iovcnt;
782 0 : accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
783 0 : accel_task->iv = iv;
784 0 : accel_task->block_size = block_size;
785 0 : accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
786 0 : accel_task->src_domain = NULL;
787 0 : accel_task->dst_domain = NULL;
788 :
789 0 : return accel_submit_task(accel_ch, accel_task);
790 : }
791 :
792 : int
793 2 : spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
794 : uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
795 : {
796 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
797 : struct spdk_accel_task *accel_task;
798 :
799 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
800 2 : if (spdk_unlikely(accel_task == NULL)) {
801 1 : return -ENOMEM;
802 : }
803 :
804 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
805 :
806 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
807 1 : accel_task->nsrcs.srcs = sources;
808 1 : accel_task->nsrcs.cnt = nsrcs;
809 1 : accel_task->d.iovs[0].iov_base = dst;
810 1 : accel_task->d.iovs[0].iov_len = nbytes;
811 1 : accel_task->d.iovcnt = 1;
812 1 : accel_task->nbytes = nbytes;
813 1 : accel_task->op_code = SPDK_ACCEL_OPC_XOR;
814 1 : accel_task->src_domain = NULL;
815 1 : accel_task->dst_domain = NULL;
816 :
817 1 : return accel_submit_task(accel_ch, accel_task);
818 : }
819 :
820 : int
821 0 : spdk_accel_submit_dif_verify(struct spdk_io_channel *ch,
822 : struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
823 : const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
824 : spdk_accel_completion_cb cb_fn, void *cb_arg)
825 : {
826 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
827 : struct spdk_accel_task *accel_task;
828 :
829 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
830 0 : if (accel_task == NULL) {
831 0 : return -ENOMEM;
832 : }
833 :
834 0 : accel_task->s.iovs = iovs;
835 0 : accel_task->s.iovcnt = iovcnt;
836 0 : accel_task->dif.ctx = ctx;
837 0 : accel_task->dif.err = err;
838 0 : accel_task->dif.num_blocks = num_blocks;
839 0 : accel_task->nbytes = num_blocks * ctx->block_size;
840 0 : accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
841 0 : accel_task->src_domain = NULL;
842 0 : accel_task->dst_domain = NULL;
843 :
844 0 : return accel_submit_task(accel_ch, accel_task);
845 : }
846 :
847 : int
848 0 : spdk_accel_submit_dif_generate(struct spdk_io_channel *ch,
849 : struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
850 : const struct spdk_dif_ctx *ctx,
851 : spdk_accel_completion_cb cb_fn, void *cb_arg)
852 : {
853 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
854 : struct spdk_accel_task *accel_task;
855 :
856 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
857 0 : if (accel_task == NULL) {
858 0 : return -ENOMEM;
859 : }
860 :
861 0 : accel_task->s.iovs = iovs;
862 0 : accel_task->s.iovcnt = iovcnt;
863 0 : accel_task->dif.ctx = ctx;
864 0 : accel_task->dif.num_blocks = num_blocks;
865 0 : accel_task->nbytes = num_blocks * ctx->block_size;
866 0 : accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
867 0 : accel_task->src_domain = NULL;
868 0 : accel_task->dst_domain = NULL;
869 :
870 0 : return accel_submit_task(accel_ch, accel_task);
871 : }
872 :
873 : int
874 0 : spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs,
875 : size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
876 : uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
877 : spdk_accel_completion_cb cb_fn, void *cb_arg)
878 : {
879 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
880 : struct spdk_accel_task *accel_task;
881 :
882 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
883 0 : if (accel_task == NULL) {
884 0 : return -ENOMEM;
885 : }
886 :
887 0 : accel_task->s.iovs = src_iovs;
888 0 : accel_task->s.iovcnt = src_iovcnt;
889 0 : accel_task->d.iovs = dst_iovs;
890 0 : accel_task->d.iovcnt = dst_iovcnt;
891 0 : accel_task->dif.ctx = ctx;
892 0 : accel_task->dif.num_blocks = num_blocks;
893 0 : accel_task->nbytes = num_blocks * ctx->block_size;
894 0 : accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
895 0 : accel_task->src_domain = NULL;
896 0 : accel_task->dst_domain = NULL;
897 :
898 0 : return accel_submit_task(accel_ch, accel_task);
899 : }
900 :
901 : int
902 0 : spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch,
903 : struct iovec *dst_iovs, size_t dst_iovcnt,
904 : struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks,
905 : const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
906 : spdk_accel_completion_cb cb_fn, void *cb_arg)
907 : {
908 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
909 : struct spdk_accel_task *accel_task;
910 :
911 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
912 0 : if (accel_task == NULL) {
913 0 : return -ENOMEM;
914 : }
915 :
916 0 : accel_task->s.iovs = src_iovs;
917 0 : accel_task->s.iovcnt = src_iovcnt;
918 0 : accel_task->d.iovs = dst_iovs;
919 0 : accel_task->d.iovcnt = dst_iovcnt;
920 0 : accel_task->dif.ctx = ctx;
921 0 : accel_task->dif.err = err;
922 0 : accel_task->dif.num_blocks = num_blocks;
923 0 : accel_task->nbytes = num_blocks * ctx->block_size;
924 0 : accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
925 0 : accel_task->src_domain = NULL;
926 0 : accel_task->dst_domain = NULL;
927 :
928 0 : return accel_submit_task(accel_ch, accel_task);
929 : }
930 :
931 : static inline struct accel_buffer *
932 33 : accel_get_buf(struct accel_io_channel *ch, uint64_t len)
933 : {
934 : struct accel_buffer *buf;
935 :
936 33 : buf = SLIST_FIRST(&ch->buf_pool);
937 33 : if (spdk_unlikely(buf == NULL)) {
938 0 : accel_update_stats(ch, retry.bufdesc, 1);
939 0 : return NULL;
940 : }
941 :
942 33 : SLIST_REMOVE_HEAD(&ch->buf_pool, link);
943 33 : buf->len = len;
944 33 : buf->buf = NULL;
945 33 : buf->seq = NULL;
946 33 : buf->cb_fn = NULL;
947 :
948 33 : return buf;
949 : }
950 :
951 : static inline void
952 33 : accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
953 : {
954 33 : if (buf->buf != NULL) {
955 29 : spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
956 : }
957 :
958 33 : SLIST_INSERT_HEAD(&ch->buf_pool, buf, link);
959 33 : }
960 :
961 : static inline struct spdk_accel_sequence *
962 60 : accel_sequence_get(struct accel_io_channel *ch)
963 : {
964 : struct spdk_accel_sequence *seq;
965 :
966 60 : assert(g_opts.task_count >= ch->stats.task_outstanding);
967 :
968 : /* Sequence cannot be allocated if number of available task objects cannot satisfy required limit.
969 : * This is to prevent potential dead lock when few requests are pending task resource and none can
970 : * advance the processing. This solution should work only if there is single async operation after
971 : * sequence obj obtained, so assume that is possible to happen with io buffer allocation now, if
972 : * there are more async operations then solution should be improved. */
973 60 : if (spdk_unlikely(g_opts.task_count - ch->stats.task_outstanding < ACCEL_TASKS_IN_SEQUENCE_LIMIT)) {
974 0 : return NULL;
975 : }
976 :
977 60 : seq = SLIST_FIRST(&ch->seq_pool);
978 60 : if (spdk_unlikely(seq == NULL)) {
979 3 : accel_update_stats(ch, retry.sequence, 1);
980 3 : return NULL;
981 : }
982 :
983 57 : accel_update_stats(ch, sequence_outstanding, 1);
984 57 : SLIST_REMOVE_HEAD(&ch->seq_pool, link);
985 :
986 57 : TAILQ_INIT(&seq->tasks);
987 57 : SLIST_INIT(&seq->bounce_bufs);
988 :
989 57 : seq->ch = ch;
990 57 : seq->status = 0;
991 57 : seq->state = ACCEL_SEQUENCE_STATE_INIT;
992 57 : seq->in_process_sequence = false;
993 :
994 57 : return seq;
995 : }
996 :
997 : static inline void
998 57 : accel_sequence_put(struct spdk_accel_sequence *seq)
999 : {
1000 57 : struct accel_io_channel *ch = seq->ch;
1001 : struct accel_buffer *buf;
1002 :
1003 76 : while (!SLIST_EMPTY(&seq->bounce_bufs)) {
1004 19 : buf = SLIST_FIRST(&seq->bounce_bufs);
1005 19 : SLIST_REMOVE_HEAD(&seq->bounce_bufs, link);
1006 19 : accel_put_buf(seq->ch, buf);
1007 : }
1008 :
1009 57 : assert(TAILQ_EMPTY(&seq->tasks));
1010 57 : seq->ch = NULL;
1011 :
1012 57 : SLIST_INSERT_HEAD(&ch->seq_pool, seq, link);
1013 57 : accel_update_stats(ch, sequence_outstanding, -1);
1014 57 : }
1015 :
1016 : static void accel_sequence_task_cb(void *cb_arg, int status);
1017 :
1018 : static inline struct spdk_accel_task *
1019 134 : accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
1020 : spdk_accel_step_cb cb_fn, void *cb_arg)
1021 : {
1022 : struct spdk_accel_task *task;
1023 :
1024 134 : task = _get_task(ch, NULL, NULL);
1025 134 : if (spdk_unlikely(task == NULL)) {
1026 3 : return task;
1027 : }
1028 :
1029 131 : task->step_cb_fn = cb_fn;
1030 131 : task->cb_arg = cb_arg;
1031 131 : task->seq = seq;
1032 :
1033 131 : return task;
1034 : }
1035 :
1036 : int
1037 35 : spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1038 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
1039 : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1040 : struct iovec *src_iovs, uint32_t src_iovcnt,
1041 : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1042 : spdk_accel_step_cb cb_fn, void *cb_arg)
1043 : {
1044 35 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1045 : struct spdk_accel_task *task;
1046 35 : struct spdk_accel_sequence *seq = *pseq;
1047 :
1048 35 : if (seq == NULL) {
1049 12 : seq = accel_sequence_get(accel_ch);
1050 12 : if (spdk_unlikely(seq == NULL)) {
1051 1 : return -ENOMEM;
1052 : }
1053 : }
1054 :
1055 34 : assert(seq->ch == accel_ch);
1056 34 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1057 34 : if (spdk_unlikely(task == NULL)) {
1058 1 : if (*pseq == NULL) {
1059 1 : accel_sequence_put(seq);
1060 : }
1061 :
1062 1 : return -ENOMEM;
1063 : }
1064 :
1065 33 : task->dst_domain = dst_domain;
1066 33 : task->dst_domain_ctx = dst_domain_ctx;
1067 33 : task->d.iovs = dst_iovs;
1068 33 : task->d.iovcnt = dst_iovcnt;
1069 33 : task->src_domain = src_domain;
1070 33 : task->src_domain_ctx = src_domain_ctx;
1071 33 : task->s.iovs = src_iovs;
1072 33 : task->s.iovcnt = src_iovcnt;
1073 33 : task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1074 33 : task->op_code = SPDK_ACCEL_OPC_COPY;
1075 :
1076 33 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1077 33 : *pseq = seq;
1078 :
1079 33 : return 0;
1080 : }
1081 :
1082 : int
1083 39 : spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1084 : void *buf, uint64_t len,
1085 : struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
1086 : spdk_accel_step_cb cb_fn, void *cb_arg)
1087 : {
1088 39 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1089 : struct spdk_accel_task *task;
1090 39 : struct spdk_accel_sequence *seq = *pseq;
1091 :
1092 39 : if (seq == NULL) {
1093 23 : seq = accel_sequence_get(accel_ch);
1094 23 : if (spdk_unlikely(seq == NULL)) {
1095 1 : return -ENOMEM;
1096 : }
1097 : }
1098 :
1099 38 : assert(seq->ch == accel_ch);
1100 38 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1101 38 : if (spdk_unlikely(task == NULL)) {
1102 1 : if (*pseq == NULL) {
1103 1 : accel_sequence_put(seq);
1104 : }
1105 :
1106 1 : return -ENOMEM;
1107 : }
1108 :
1109 37 : memset(&task->fill_pattern, pattern, sizeof(uint64_t));
1110 :
1111 37 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1112 37 : if (spdk_unlikely(!task->aux)) {
1113 0 : SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");
1114 0 : if (*pseq == NULL) {
1115 0 : accel_sequence_put((seq));
1116 : }
1117 :
1118 0 : task->seq = NULL;
1119 0 : _put_task(task->accel_ch, task);
1120 0 : assert(0);
1121 : return -ENOMEM;
1122 : }
1123 37 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1124 37 : task->has_aux = true;
1125 :
1126 37 : task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
1127 37 : task->d.iovs[0].iov_base = buf;
1128 37 : task->d.iovs[0].iov_len = len;
1129 37 : task->d.iovcnt = 1;
1130 37 : task->nbytes = len;
1131 37 : task->src_domain = NULL;
1132 37 : task->dst_domain = domain;
1133 37 : task->dst_domain_ctx = domain_ctx;
1134 37 : task->op_code = SPDK_ACCEL_OPC_FILL;
1135 :
1136 37 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1137 37 : *pseq = seq;
1138 :
1139 37 : return 0;
1140 : }
1141 :
1142 : int
1143 40 : spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1144 : struct iovec *dst_iovs, size_t dst_iovcnt,
1145 : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1146 : struct iovec *src_iovs, size_t src_iovcnt,
1147 : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1148 : spdk_accel_step_cb cb_fn, void *cb_arg)
1149 : {
1150 40 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1151 : struct spdk_accel_task *task;
1152 40 : struct spdk_accel_sequence *seq = *pseq;
1153 :
1154 40 : if (seq == NULL) {
1155 17 : seq = accel_sequence_get(accel_ch);
1156 17 : if (spdk_unlikely(seq == NULL)) {
1157 1 : return -ENOMEM;
1158 : }
1159 : }
1160 :
1161 39 : assert(seq->ch == accel_ch);
1162 39 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1163 39 : if (spdk_unlikely(task == NULL)) {
1164 1 : if (*pseq == NULL) {
1165 1 : accel_sequence_put(seq);
1166 : }
1167 :
1168 1 : return -ENOMEM;
1169 : }
1170 :
1171 : /* TODO: support output_size for chaining */
1172 38 : task->output_size = NULL;
1173 38 : task->dst_domain = dst_domain;
1174 38 : task->dst_domain_ctx = dst_domain_ctx;
1175 38 : task->d.iovs = dst_iovs;
1176 38 : task->d.iovcnt = dst_iovcnt;
1177 38 : task->src_domain = src_domain;
1178 38 : task->src_domain_ctx = src_domain_ctx;
1179 38 : task->s.iovs = src_iovs;
1180 38 : task->s.iovcnt = src_iovcnt;
1181 38 : task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1182 38 : task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1183 :
1184 38 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1185 38 : *pseq = seq;
1186 :
1187 38 : return 0;
1188 : }
1189 :
1190 : int
1191 8 : spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1192 : struct spdk_accel_crypto_key *key,
1193 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
1194 : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1195 : struct iovec *src_iovs, uint32_t src_iovcnt,
1196 : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1197 : uint64_t iv, uint32_t block_size,
1198 : spdk_accel_step_cb cb_fn, void *cb_arg)
1199 : {
1200 8 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1201 : struct spdk_accel_task *task;
1202 8 : struct spdk_accel_sequence *seq = *pseq;
1203 :
1204 8 : assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1205 :
1206 8 : if (seq == NULL) {
1207 6 : seq = accel_sequence_get(accel_ch);
1208 6 : if (spdk_unlikely(seq == NULL)) {
1209 0 : return -ENOMEM;
1210 : }
1211 : }
1212 :
1213 8 : assert(seq->ch == accel_ch);
1214 8 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1215 8 : if (spdk_unlikely(task == NULL)) {
1216 0 : if (*pseq == NULL) {
1217 0 : accel_sequence_put(seq);
1218 : }
1219 :
1220 0 : return -ENOMEM;
1221 : }
1222 :
1223 8 : task->crypto_key = key;
1224 8 : task->src_domain = src_domain;
1225 8 : task->src_domain_ctx = src_domain_ctx;
1226 8 : task->s.iovs = src_iovs;
1227 8 : task->s.iovcnt = src_iovcnt;
1228 8 : task->dst_domain = dst_domain;
1229 8 : task->dst_domain_ctx = dst_domain_ctx;
1230 8 : task->d.iovs = dst_iovs;
1231 8 : task->d.iovcnt = dst_iovcnt;
1232 8 : task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1233 8 : task->iv = iv;
1234 8 : task->block_size = block_size;
1235 8 : task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1236 :
1237 8 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1238 8 : *pseq = seq;
1239 :
1240 8 : return 0;
1241 : }
1242 :
1243 : int
1244 9 : spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1245 : struct spdk_accel_crypto_key *key,
1246 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
1247 : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1248 : struct iovec *src_iovs, uint32_t src_iovcnt,
1249 : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1250 : uint64_t iv, uint32_t block_size,
1251 : spdk_accel_step_cb cb_fn, void *cb_arg)
1252 : {
1253 9 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1254 : struct spdk_accel_task *task;
1255 9 : struct spdk_accel_sequence *seq = *pseq;
1256 :
1257 9 : assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1258 :
1259 9 : if (seq == NULL) {
1260 0 : seq = accel_sequence_get(accel_ch);
1261 0 : if (spdk_unlikely(seq == NULL)) {
1262 0 : return -ENOMEM;
1263 : }
1264 : }
1265 :
1266 9 : assert(seq->ch == accel_ch);
1267 9 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1268 9 : if (spdk_unlikely(task == NULL)) {
1269 0 : if (*pseq == NULL) {
1270 0 : accel_sequence_put(seq);
1271 : }
1272 :
1273 0 : return -ENOMEM;
1274 : }
1275 :
1276 9 : task->crypto_key = key;
1277 9 : task->src_domain = src_domain;
1278 9 : task->src_domain_ctx = src_domain_ctx;
1279 9 : task->s.iovs = src_iovs;
1280 9 : task->s.iovcnt = src_iovcnt;
1281 9 : task->dst_domain = dst_domain;
1282 9 : task->dst_domain_ctx = dst_domain_ctx;
1283 9 : task->d.iovs = dst_iovs;
1284 9 : task->d.iovcnt = dst_iovcnt;
1285 9 : task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1286 9 : task->iv = iv;
1287 9 : task->block_size = block_size;
1288 9 : task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1289 :
1290 9 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1291 9 : *pseq = seq;
1292 :
1293 9 : return 0;
1294 : }
1295 :
1296 : int
1297 6 : spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1298 : uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1299 : struct spdk_memory_domain *domain, void *domain_ctx,
1300 : uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1301 : {
1302 6 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1303 : struct spdk_accel_task *task;
1304 6 : struct spdk_accel_sequence *seq = *pseq;
1305 :
1306 6 : if (seq == NULL) {
1307 2 : seq = accel_sequence_get(accel_ch);
1308 2 : if (spdk_unlikely(seq == NULL)) {
1309 0 : return -ENOMEM;
1310 : }
1311 : }
1312 :
1313 6 : assert(seq->ch == accel_ch);
1314 6 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1315 6 : if (spdk_unlikely(task == NULL)) {
1316 0 : if (*pseq == NULL) {
1317 0 : accel_sequence_put(seq);
1318 : }
1319 :
1320 0 : return -ENOMEM;
1321 : }
1322 :
1323 6 : task->s.iovs = iovs;
1324 6 : task->s.iovcnt = iovcnt;
1325 6 : task->src_domain = domain;
1326 6 : task->src_domain_ctx = domain_ctx;
1327 6 : task->nbytes = accel_get_iovlen(iovs, iovcnt);
1328 6 : task->crc_dst = dst;
1329 6 : task->seed = seed;
1330 6 : task->op_code = SPDK_ACCEL_OPC_CRC32C;
1331 6 : task->dst_domain = NULL;
1332 :
1333 6 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1334 6 : *pseq = seq;
1335 :
1336 6 : return 0;
1337 : }
1338 :
1339 : int
1340 14 : spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1341 : struct spdk_memory_domain **domain, void **domain_ctx)
1342 : {
1343 14 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1344 : struct accel_buffer *accel_buf;
1345 :
1346 14 : accel_buf = accel_get_buf(accel_ch, len);
1347 14 : if (spdk_unlikely(accel_buf == NULL)) {
1348 0 : return -ENOMEM;
1349 : }
1350 :
1351 14 : accel_buf->ch = accel_ch;
1352 :
1353 : /* We always return the same pointer and identify the buffers through domain_ctx */
1354 14 : *buf = ACCEL_BUFFER_BASE;
1355 14 : *domain_ctx = accel_buf;
1356 14 : *domain = g_accel_domain;
1357 :
1358 14 : return 0;
1359 : }
1360 :
1361 : void
1362 14 : spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1363 : struct spdk_memory_domain *domain, void *domain_ctx)
1364 : {
1365 14 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1366 14 : struct accel_buffer *accel_buf = domain_ctx;
1367 :
1368 14 : assert(domain == g_accel_domain);
1369 14 : assert(buf == ACCEL_BUFFER_BASE);
1370 :
1371 14 : accel_put_buf(accel_ch, accel_buf);
1372 14 : }
1373 :
1374 : static void
1375 131 : accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1376 : {
1377 131 : struct accel_io_channel *ch = seq->ch;
1378 : spdk_accel_step_cb cb_fn;
1379 : void *cb_arg;
1380 :
1381 131 : TAILQ_REMOVE(&seq->tasks, task, seq_link);
1382 131 : cb_fn = task->step_cb_fn;
1383 131 : cb_arg = task->cb_arg;
1384 131 : task->seq = NULL;
1385 131 : if (task->has_aux) {
1386 58 : SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link);
1387 58 : task->aux = NULL;
1388 58 : task->has_aux = false;
1389 : }
1390 :
1391 131 : _put_task(ch, task);
1392 :
1393 131 : if (cb_fn != NULL) {
1394 131 : cb_fn(cb_arg);
1395 : }
1396 131 : }
1397 :
1398 : static void
1399 54 : accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1400 : {
1401 : struct spdk_accel_task *task;
1402 :
1403 72 : while (!TAILQ_EMPTY(&seq->tasks)) {
1404 18 : task = TAILQ_FIRST(&seq->tasks);
1405 18 : accel_sequence_complete_task(seq, task);
1406 : }
1407 54 : }
1408 :
1409 : static void
1410 52 : accel_sequence_complete(struct spdk_accel_sequence *seq)
1411 : {
1412 52 : spdk_accel_completion_cb cb_fn = seq->cb_fn;
1413 52 : void *cb_arg = seq->cb_arg;
1414 52 : int status = seq->status;
1415 :
1416 52 : SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status);
1417 :
1418 52 : accel_update_stats(seq->ch, sequence_executed, 1);
1419 52 : if (spdk_unlikely(status != 0)) {
1420 10 : accel_update_stats(seq->ch, sequence_failed, 1);
1421 : }
1422 :
1423 : /* First notify all users that appended operations to this sequence */
1424 52 : accel_sequence_complete_tasks(seq);
1425 52 : accel_sequence_put(seq);
1426 :
1427 : /* Then notify the user that finished the sequence */
1428 52 : cb_fn(cb_arg, status);
1429 52 : }
1430 :
1431 : static void
1432 28 : accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1433 : {
1434 : uintptr_t offset;
1435 :
1436 28 : offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1437 28 : assert(offset < accel_buf->len);
1438 :
1439 28 : diov->iov_base = (char *)accel_buf->buf + offset;
1440 28 : diov->iov_len = siov->iov_len;
1441 28 : }
1442 :
1443 : static void
1444 10 : accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1445 : {
1446 : struct spdk_accel_task *task;
1447 : struct iovec *iov;
1448 :
1449 : /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1450 : * in a sequence that were using it.
1451 : */
1452 38 : TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1453 28 : if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1454 11 : if (!task->has_aux) {
1455 11 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1456 11 : assert(task->aux && "Can't allocate aux data structure");
1457 11 : task->has_aux = true;
1458 11 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1459 : }
1460 :
1461 11 : iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1462 11 : assert(task->s.iovcnt == 1);
1463 11 : accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1464 11 : task->src_domain = NULL;
1465 11 : task->s.iovs = iov;
1466 : }
1467 28 : if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1468 17 : if (!task->has_aux) {
1469 2 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1470 2 : assert(task->aux && "Can't allocate aux data structure");
1471 2 : task->has_aux = true;
1472 2 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1473 : }
1474 :
1475 17 : iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1476 17 : assert(task->d.iovcnt == 1);
1477 17 : accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1478 17 : task->dst_domain = NULL;
1479 17 : task->d.iovs = iov;
1480 : }
1481 : }
1482 10 : }
1483 :
1484 : static void accel_process_sequence(struct spdk_accel_sequence *seq);
1485 :
1486 : static void
1487 3 : accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1488 : {
1489 : struct accel_buffer *accel_buf;
1490 :
1491 3 : accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1492 :
1493 3 : assert(accel_buf->seq != NULL);
1494 3 : assert(accel_buf->buf == NULL);
1495 3 : accel_buf->buf = buf;
1496 :
1497 3 : assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1498 3 : accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1499 3 : accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1500 3 : accel_process_sequence(accel_buf->seq);
1501 3 : }
1502 :
1503 : static bool
1504 29 : accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1505 : spdk_iobuf_get_cb cb_fn)
1506 : {
1507 29 : struct accel_io_channel *ch = seq->ch;
1508 :
1509 29 : assert(buf->seq == NULL);
1510 :
1511 29 : buf->seq = seq;
1512 :
1513 : /* Buffer might be already allocated by memory domain translation. */
1514 29 : if (buf->buf) {
1515 0 : return true;
1516 : }
1517 :
1518 29 : buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1519 29 : if (spdk_unlikely(buf->buf == NULL)) {
1520 5 : accel_update_stats(ch, retry.iobuf, 1);
1521 5 : return false;
1522 : }
1523 :
1524 24 : return true;
1525 : }
1526 :
1527 : static bool
1528 89 : accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1529 : {
1530 : /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1531 : * NULL */
1532 89 : if (task->src_domain == g_accel_domain) {
1533 0 : if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1534 : accel_iobuf_get_virtbuf_cb)) {
1535 0 : return false;
1536 : }
1537 :
1538 0 : accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1539 : }
1540 :
1541 89 : if (task->dst_domain == g_accel_domain) {
1542 10 : if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1543 : accel_iobuf_get_virtbuf_cb)) {
1544 3 : return false;
1545 : }
1546 :
1547 7 : accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1548 : }
1549 :
1550 86 : return true;
1551 : }
1552 :
1553 : static void
1554 0 : accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1555 : {
1556 : struct accel_buffer *accel_buf;
1557 :
1558 0 : accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1559 :
1560 0 : assert(accel_buf->seq != NULL);
1561 0 : assert(accel_buf->buf == NULL);
1562 0 : accel_buf->buf = buf;
1563 :
1564 0 : accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1565 0 : accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1566 0 : }
1567 :
1568 : bool
1569 0 : spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1570 : struct spdk_memory_domain *domain, void *domain_ctx,
1571 : spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1572 : {
1573 0 : struct accel_buffer *accel_buf = domain_ctx;
1574 :
1575 0 : assert(domain == g_accel_domain);
1576 0 : accel_buf->cb_fn = cb_fn;
1577 0 : accel_buf->cb_ctx = cb_ctx;
1578 :
1579 0 : if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1580 0 : return false;
1581 : }
1582 :
1583 0 : accel_sequence_set_virtbuf(seq, accel_buf);
1584 :
1585 0 : return true;
1586 : }
1587 :
1588 : struct spdk_accel_task *
1589 24 : spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1590 : {
1591 24 : return TAILQ_FIRST(&seq->tasks);
1592 : }
1593 :
1594 : struct spdk_accel_task *
1595 0 : spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1596 : {
1597 0 : return TAILQ_NEXT(task, seq_link);
1598 : }
1599 :
1600 : static inline void
1601 19 : accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1602 : uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1603 : struct accel_buffer *buf)
1604 : {
1605 19 : bounce->orig_iovs = *iovs;
1606 19 : bounce->orig_iovcnt = *iovcnt;
1607 19 : bounce->orig_domain = *domain;
1608 19 : bounce->orig_domain_ctx = *domain_ctx;
1609 19 : bounce->iov.iov_base = buf->buf;
1610 19 : bounce->iov.iov_len = buf->len;
1611 :
1612 19 : *iovs = &bounce->iov;
1613 19 : *iovcnt = 1;
1614 19 : *domain = NULL;
1615 19 : }
1616 :
1617 : static void
1618 1 : accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1619 : {
1620 : struct spdk_accel_task *task;
1621 : struct accel_buffer *accel_buf;
1622 :
1623 1 : accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1624 1 : assert(accel_buf->buf == NULL);
1625 1 : accel_buf->buf = buf;
1626 :
1627 1 : task = TAILQ_FIRST(&accel_buf->seq->tasks);
1628 1 : assert(task != NULL);
1629 :
1630 1 : assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1631 1 : accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1632 1 : assert(task->aux);
1633 1 : assert(task->has_aux);
1634 1 : accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1635 : &task->src_domain_ctx, accel_buf);
1636 1 : accel_process_sequence(accel_buf->seq);
1637 1 : }
1638 :
1639 : static void
1640 1 : accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1641 : {
1642 : struct spdk_accel_task *task;
1643 : struct accel_buffer *accel_buf;
1644 :
1645 1 : accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1646 1 : assert(accel_buf->buf == NULL);
1647 1 : accel_buf->buf = buf;
1648 :
1649 1 : task = TAILQ_FIRST(&accel_buf->seq->tasks);
1650 1 : assert(task != NULL);
1651 :
1652 1 : assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1653 1 : accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1654 1 : assert(task->aux);
1655 1 : assert(task->has_aux);
1656 1 : accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1657 : &task->dst_domain_ctx, accel_buf);
1658 1 : accel_process_sequence(accel_buf->seq);
1659 1 : }
1660 :
1661 : static int
1662 73 : accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1663 : {
1664 : struct accel_buffer *buf;
1665 :
1666 73 : if (task->src_domain != NULL) {
1667 : /* By the time we're here, accel buffers should have been allocated */
1668 9 : assert(task->src_domain != g_accel_domain);
1669 :
1670 9 : if (!task->has_aux) {
1671 8 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1672 8 : if (spdk_unlikely(!task->aux)) {
1673 0 : SPDK_ERRLOG("Can't allocate aux data structure\n");
1674 0 : assert(0);
1675 : return -EAGAIN;
1676 : }
1677 8 : task->has_aux = true;
1678 8 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1679 : }
1680 9 : buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1681 9 : if (buf == NULL) {
1682 0 : SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1683 0 : return -ENOMEM;
1684 : }
1685 :
1686 9 : SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1687 9 : if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1688 1 : return -EAGAIN;
1689 : }
1690 :
1691 8 : accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt,
1692 : &task->src_domain, &task->src_domain_ctx, buf);
1693 : }
1694 :
1695 72 : if (task->dst_domain != NULL) {
1696 : /* By the time we're here, accel buffers should have been allocated */
1697 10 : assert(task->dst_domain != g_accel_domain);
1698 :
1699 10 : if (!task->has_aux) {
1700 0 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1701 0 : if (spdk_unlikely(!task->aux)) {
1702 0 : SPDK_ERRLOG("Can't allocate aux data structure\n");
1703 0 : assert(0);
1704 : return -EAGAIN;
1705 : }
1706 0 : task->has_aux = true;
1707 0 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1708 : }
1709 10 : buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1710 10 : if (buf == NULL) {
1711 : /* The src buffer will be released when a sequence is completed */
1712 0 : SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1713 0 : return -ENOMEM;
1714 : }
1715 :
1716 10 : SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1717 10 : if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1718 1 : return -EAGAIN;
1719 : }
1720 :
1721 9 : accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt,
1722 : &task->dst_domain, &task->dst_domain_ctx, buf);
1723 : }
1724 :
1725 71 : return 0;
1726 : }
1727 :
1728 : static void
1729 8 : accel_task_pull_data_cb(void *ctx, int status)
1730 : {
1731 8 : struct spdk_accel_sequence *seq = ctx;
1732 :
1733 8 : assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1734 8 : if (spdk_likely(status == 0)) {
1735 7 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1736 : } else {
1737 1 : accel_sequence_set_fail(seq, status);
1738 : }
1739 :
1740 8 : accel_process_sequence(seq);
1741 8 : }
1742 :
1743 : static void
1744 9 : accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1745 : {
1746 : int rc;
1747 :
1748 9 : assert(task->has_aux);
1749 9 : assert(task->aux);
1750 9 : assert(task->aux->bounce.s.orig_iovs != NULL);
1751 9 : assert(task->aux->bounce.s.orig_domain != NULL);
1752 9 : assert(task->aux->bounce.s.orig_domain != g_accel_domain);
1753 9 : assert(!g_modules_opc[task->op_code].supports_memory_domains);
1754 :
1755 9 : rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain,
1756 9 : task->aux->bounce.s.orig_domain_ctx,
1757 9 : task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt,
1758 : task->s.iovs, task->s.iovcnt,
1759 : accel_task_pull_data_cb, seq);
1760 9 : if (spdk_unlikely(rc != 0)) {
1761 1 : SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1762 : spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1763 1 : accel_sequence_set_fail(seq, rc);
1764 : }
1765 9 : }
1766 :
1767 : static void
1768 7 : accel_task_push_data_cb(void *ctx, int status)
1769 : {
1770 7 : struct spdk_accel_sequence *seq = ctx;
1771 :
1772 7 : assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1773 7 : if (spdk_likely(status == 0)) {
1774 6 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1775 : } else {
1776 1 : accel_sequence_set_fail(seq, status);
1777 : }
1778 :
1779 7 : accel_process_sequence(seq);
1780 7 : }
1781 :
1782 : static void
1783 8 : accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1784 : {
1785 : int rc;
1786 :
1787 8 : assert(task->has_aux);
1788 8 : assert(task->aux);
1789 8 : assert(task->aux->bounce.d.orig_iovs != NULL);
1790 8 : assert(task->aux->bounce.d.orig_domain != NULL);
1791 8 : assert(task->aux->bounce.d.orig_domain != g_accel_domain);
1792 8 : assert(!g_modules_opc[task->op_code].supports_memory_domains);
1793 :
1794 8 : rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain,
1795 8 : task->aux->bounce.d.orig_domain_ctx,
1796 8 : task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt,
1797 : task->d.iovs, task->d.iovcnt,
1798 : accel_task_push_data_cb, seq);
1799 8 : if (spdk_unlikely(rc != 0)) {
1800 1 : SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1801 : spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1802 1 : accel_sequence_set_fail(seq, rc);
1803 : }
1804 8 : }
1805 :
1806 : static void
1807 166 : accel_process_sequence(struct spdk_accel_sequence *seq)
1808 : {
1809 166 : struct accel_io_channel *accel_ch = seq->ch;
1810 : struct spdk_accel_task *task;
1811 : enum accel_sequence_state state;
1812 : int rc;
1813 :
1814 : /* Prevent recursive calls to this function */
1815 166 : if (spdk_unlikely(seq->in_process_sequence)) {
1816 74 : return;
1817 : }
1818 92 : seq->in_process_sequence = true;
1819 :
1820 92 : task = TAILQ_FIRST(&seq->tasks);
1821 : do {
1822 375 : state = seq->state;
1823 375 : switch (state) {
1824 91 : case ACCEL_SEQUENCE_STATE_INIT:
1825 91 : if (g_accel_driver != NULL) {
1826 13 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
1827 13 : break;
1828 : }
1829 : /* Fall through */
1830 : case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1831 89 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1832 89 : if (!accel_sequence_check_virtbuf(seq, task)) {
1833 : /* We couldn't allocate a buffer, wait until one is available */
1834 3 : break;
1835 : }
1836 86 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1837 : /* Fall through */
1838 88 : case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1839 : /* If a module supports memory domains, we don't need to allocate bounce
1840 : * buffers */
1841 88 : if (g_modules_opc[task->op_code].supports_memory_domains) {
1842 15 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1843 15 : break;
1844 : }
1845 73 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1846 73 : rc = accel_sequence_check_bouncebuf(seq, task);
1847 73 : if (spdk_unlikely(rc != 0)) {
1848 : /* We couldn't allocate a buffer, wait until one is available */
1849 2 : if (rc == -EAGAIN) {
1850 2 : break;
1851 : }
1852 0 : accel_sequence_set_fail(seq, rc);
1853 0 : break;
1854 : }
1855 71 : if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) {
1856 9 : assert(task->aux->bounce.s.orig_iovs);
1857 9 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1858 9 : break;
1859 : }
1860 62 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1861 : /* Fall through */
1862 84 : case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1863 84 : SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1864 : g_opcode_strings[task->op_code], seq);
1865 :
1866 84 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1867 84 : rc = accel_submit_task(accel_ch, task);
1868 84 : if (spdk_unlikely(rc != 0)) {
1869 2 : SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1870 : g_opcode_strings[task->op_code], seq);
1871 2 : accel_sequence_set_fail(seq, rc);
1872 : }
1873 84 : break;
1874 9 : case ACCEL_SEQUENCE_STATE_PULL_DATA:
1875 9 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1876 9 : accel_task_pull_data(seq, task);
1877 9 : break;
1878 80 : case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1879 80 : if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) {
1880 8 : assert(task->aux->bounce.d.orig_iovs);
1881 8 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1882 8 : break;
1883 : }
1884 72 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1885 72 : break;
1886 8 : case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1887 8 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1888 8 : accel_task_push_data(seq, task);
1889 8 : break;
1890 78 : case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1891 78 : accel_sequence_complete_task(seq, task);
1892 : /* Check if there are any remaining tasks */
1893 78 : task = TAILQ_FIRST(&seq->tasks);
1894 78 : if (task == NULL) {
1895 : /* Immediately return here to make sure we don't touch the sequence
1896 : * after it's completed */
1897 39 : accel_sequence_complete(seq);
1898 39 : return;
1899 : }
1900 39 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1901 39 : break;
1902 13 : case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
1903 13 : assert(!TAILQ_EMPTY(&seq->tasks));
1904 :
1905 13 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1906 13 : rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
1907 13 : if (spdk_unlikely(rc != 0)) {
1908 1 : SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1909 : seq, g_accel_driver->name);
1910 1 : accel_sequence_set_fail(seq, rc);
1911 : }
1912 13 : break;
1913 11 : case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
1914 : /* Get the task again, as the driver might have completed some tasks
1915 : * synchronously */
1916 11 : task = TAILQ_FIRST(&seq->tasks);
1917 11 : if (task == NULL) {
1918 : /* Immediately return here to make sure we don't touch the sequence
1919 : * after it's completed */
1920 3 : accel_sequence_complete(seq);
1921 3 : return;
1922 : }
1923 : /* We don't want to execute the next task through the driver, so we
1924 : * explicitly omit the INIT state here */
1925 8 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1926 8 : break;
1927 10 : case ACCEL_SEQUENCE_STATE_ERROR:
1928 : /* Immediately return here to make sure we don't touch the sequence
1929 : * after it's completed */
1930 10 : assert(seq->status != 0);
1931 10 : accel_sequence_complete(seq);
1932 10 : return;
1933 40 : case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1934 : case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1935 : case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1936 : case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1937 : case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1938 : case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1939 40 : break;
1940 0 : default:
1941 0 : assert(0 && "bad state");
1942 : break;
1943 : }
1944 323 : } while (seq->state != state);
1945 :
1946 40 : seq->in_process_sequence = false;
1947 : }
1948 :
1949 : static void
1950 94 : accel_sequence_task_cb(void *cb_arg, int status)
1951 : {
1952 94 : struct spdk_accel_sequence *seq = cb_arg;
1953 94 : struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1954 :
1955 94 : switch (seq->state) {
1956 82 : case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1957 82 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1958 82 : if (spdk_unlikely(status != 0)) {
1959 2 : SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1960 : g_opcode_strings[task->op_code], seq);
1961 2 : accel_sequence_set_fail(seq, status);
1962 : }
1963 :
1964 82 : accel_process_sequence(seq);
1965 82 : break;
1966 12 : case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1967 12 : assert(g_accel_driver != NULL);
1968 : /* Immediately remove the task from the outstanding list to make sure the next call
1969 : * to spdk_accel_sequence_first_task() doesn't return it */
1970 12 : accel_sequence_complete_task(seq, task);
1971 12 : if (spdk_unlikely(status != 0)) {
1972 1 : SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1973 : "driver: %s\n", g_opcode_strings[task->op_code], seq,
1974 : g_accel_driver->name);
1975 : /* Update status without using accel_sequence_set_fail() to avoid changing
1976 : * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1977 1 : seq->status = status;
1978 : }
1979 12 : break;
1980 0 : default:
1981 0 : assert(0 && "bad state");
1982 : break;
1983 : }
1984 94 : }
1985 :
1986 : void
1987 12 : spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1988 : {
1989 12 : assert(g_accel_driver != NULL);
1990 12 : assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1991 :
1992 12 : if (spdk_likely(seq->status == 0)) {
1993 11 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
1994 : } else {
1995 1 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1996 : }
1997 :
1998 12 : accel_process_sequence(seq);
1999 12 : }
2000 :
2001 : static bool
2002 28 : accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
2003 : {
2004 : /* For now, just do a dumb check that the iovecs arrays are exactly the same */
2005 28 : if (iovacnt != iovbcnt) {
2006 0 : return false;
2007 : }
2008 :
2009 28 : return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
2010 : }
2011 :
2012 : static bool
2013 22 : accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
2014 : {
2015 : struct spdk_accel_task *prev;
2016 :
2017 22 : switch (task->op_code) {
2018 18 : case SPDK_ACCEL_OPC_DECOMPRESS:
2019 : case SPDK_ACCEL_OPC_FILL:
2020 : case SPDK_ACCEL_OPC_ENCRYPT:
2021 : case SPDK_ACCEL_OPC_DECRYPT:
2022 18 : if (task->dst_domain != next->src_domain) {
2023 0 : return false;
2024 : }
2025 18 : if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2026 : next->s.iovs, next->s.iovcnt)) {
2027 1 : return false;
2028 : }
2029 17 : task->d.iovs = next->d.iovs;
2030 17 : task->d.iovcnt = next->d.iovcnt;
2031 17 : task->dst_domain = next->dst_domain;
2032 17 : task->dst_domain_ctx = next->dst_domain_ctx;
2033 17 : break;
2034 4 : case SPDK_ACCEL_OPC_CRC32C:
2035 : /* crc32 is special, because it doesn't have a dst buffer */
2036 4 : if (task->src_domain != next->src_domain) {
2037 0 : return false;
2038 : }
2039 4 : if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
2040 : next->s.iovs, next->s.iovcnt)) {
2041 1 : return false;
2042 : }
2043 : /* We can only change crc32's buffer if we can change previous task's buffer */
2044 3 : prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
2045 3 : if (prev == NULL) {
2046 1 : return false;
2047 : }
2048 2 : if (!accel_task_set_dstbuf(prev, next)) {
2049 0 : return false;
2050 : }
2051 2 : task->s.iovs = next->d.iovs;
2052 2 : task->s.iovcnt = next->d.iovcnt;
2053 2 : task->src_domain = next->dst_domain;
2054 2 : task->src_domain_ctx = next->dst_domain_ctx;
2055 2 : break;
2056 0 : default:
2057 0 : return false;
2058 : }
2059 :
2060 19 : return true;
2061 : }
2062 :
2063 : static void
2064 72 : accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
2065 : struct spdk_accel_task **next_task)
2066 : {
2067 72 : struct spdk_accel_task *next = *next_task;
2068 :
2069 72 : switch (task->op_code) {
2070 8 : case SPDK_ACCEL_OPC_COPY:
2071 : /* We only allow changing src of operations that actually have a src, e.g. we never
2072 : * do it for fill. Theoretically, it is possible, but we'd have to be careful to
2073 : * change the src of the operation after fill (which in turn could also be a fill).
2074 : * So, for the sake of simplicity, skip this type of operations for now.
2075 : */
2076 8 : if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
2077 6 : next->op_code != SPDK_ACCEL_OPC_COPY &&
2078 5 : next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
2079 4 : next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
2080 2 : next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C) {
2081 2 : break;
2082 : }
2083 6 : if (task->dst_domain != next->src_domain) {
2084 0 : break;
2085 : }
2086 6 : if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2087 : next->s.iovs, next->s.iovcnt)) {
2088 0 : break;
2089 : }
2090 6 : next->s.iovs = task->s.iovs;
2091 6 : next->s.iovcnt = task->s.iovcnt;
2092 6 : next->src_domain = task->src_domain;
2093 6 : next->src_domain_ctx = task->src_domain_ctx;
2094 6 : accel_sequence_complete_task(seq, task);
2095 6 : break;
2096 64 : case SPDK_ACCEL_OPC_DECOMPRESS:
2097 : case SPDK_ACCEL_OPC_FILL:
2098 : case SPDK_ACCEL_OPC_ENCRYPT:
2099 : case SPDK_ACCEL_OPC_DECRYPT:
2100 : case SPDK_ACCEL_OPC_CRC32C:
2101 : /* We can only merge tasks when one of them is a copy */
2102 64 : if (next->op_code != SPDK_ACCEL_OPC_COPY) {
2103 44 : break;
2104 : }
2105 20 : if (!accel_task_set_dstbuf(task, next)) {
2106 3 : break;
2107 : }
2108 : /* We're removing next_task from the tasks queue, so we need to update its pointer,
2109 : * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
2110 17 : *next_task = TAILQ_NEXT(next, seq_link);
2111 17 : accel_sequence_complete_task(seq, next);
2112 17 : break;
2113 0 : default:
2114 0 : assert(0 && "bad opcode");
2115 : break;
2116 : }
2117 72 : }
2118 :
2119 : void
2120 52 : spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
2121 : spdk_accel_completion_cb cb_fn, void *cb_arg)
2122 : {
2123 52 : struct spdk_accel_task *task, *next;
2124 :
2125 : /* Try to remove any copy operations if possible */
2126 124 : TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
2127 109 : if (next == NULL) {
2128 37 : break;
2129 : }
2130 72 : accel_sequence_merge_tasks(seq, task, &next);
2131 : }
2132 :
2133 52 : seq->cb_fn = cb_fn;
2134 52 : seq->cb_arg = cb_arg;
2135 :
2136 52 : accel_process_sequence(seq);
2137 52 : }
2138 :
2139 : void
2140 0 : spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
2141 : {
2142 0 : struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
2143 : struct spdk_accel_task *task;
2144 :
2145 0 : TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
2146 :
2147 0 : while (!TAILQ_EMPTY(&tasks)) {
2148 0 : task = TAILQ_FIRST(&tasks);
2149 0 : TAILQ_REMOVE(&tasks, task, seq_link);
2150 0 : TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
2151 : }
2152 0 : }
2153 :
2154 : void
2155 3 : spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
2156 : {
2157 3 : if (seq == NULL) {
2158 1 : return;
2159 : }
2160 :
2161 2 : accel_sequence_complete_tasks(seq);
2162 2 : accel_sequence_put(seq);
2163 : }
2164 :
2165 : struct spdk_memory_domain *
2166 0 : spdk_accel_get_memory_domain(void)
2167 : {
2168 0 : return g_accel_domain;
2169 : }
2170 :
2171 : static struct spdk_accel_module_if *
2172 7 : _module_find_by_name(const char *name)
2173 : {
2174 7 : struct spdk_accel_module_if *accel_module = NULL;
2175 :
2176 16 : TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2177 10 : if (strcmp(name, accel_module->name) == 0) {
2178 1 : break;
2179 : }
2180 : }
2181 :
2182 7 : return accel_module;
2183 : }
2184 :
2185 : static inline struct spdk_accel_crypto_key *
2186 0 : _accel_crypto_key_get(const char *name)
2187 : {
2188 : struct spdk_accel_crypto_key *key;
2189 :
2190 0 : assert(spdk_spin_held(&g_keyring_spin));
2191 :
2192 0 : TAILQ_FOREACH(key, &g_keyring, link) {
2193 0 : if (strcmp(name, key->param.key_name) == 0) {
2194 0 : return key;
2195 : }
2196 : }
2197 :
2198 0 : return NULL;
2199 : }
2200 :
2201 : static void
2202 0 : accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
2203 : {
2204 0 : if (key->param.hex_key) {
2205 0 : spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
2206 0 : free(key->param.hex_key);
2207 : }
2208 0 : if (key->param.hex_key2) {
2209 0 : spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2210 0 : free(key->param.hex_key2);
2211 : }
2212 0 : free(key->param.tweak_mode);
2213 0 : free(key->param.key_name);
2214 0 : free(key->param.cipher);
2215 0 : if (key->key) {
2216 0 : spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2217 0 : free(key->key);
2218 : }
2219 0 : if (key->key2) {
2220 0 : spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2221 0 : free(key->key2);
2222 : }
2223 0 : free(key);
2224 0 : }
2225 :
2226 : static void
2227 0 : accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2228 : {
2229 0 : assert(key->module_if);
2230 0 : assert(key->module_if->crypto_key_deinit);
2231 :
2232 0 : key->module_if->crypto_key_deinit(key);
2233 0 : accel_crypto_key_free_mem(key);
2234 0 : }
2235 :
2236 : /*
2237 : * This function mitigates a timing side channel which could be caused by using strcmp()
2238 : * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2239 : * the article [1] for more details
2240 : * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2241 : */
2242 : static bool
2243 0 : accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2244 : {
2245 : size_t i;
2246 0 : volatile size_t x = k1_len ^ k2_len;
2247 :
2248 0 : for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2249 0 : x |= k1[i] ^ k2[i];
2250 : }
2251 :
2252 0 : return x == 0;
2253 : }
2254 :
2255 : static const char *g_tweak_modes[] = {
2256 : [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2257 : [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2258 : [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2259 : [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2260 : };
2261 :
2262 : static const char *g_ciphers[] = {
2263 : [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2264 : [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2265 : };
2266 :
2267 : int
2268 0 : spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2269 : {
2270 : struct spdk_accel_module_if *module;
2271 : struct spdk_accel_crypto_key *key;
2272 : size_t hex_key_size, hex_key2_size;
2273 0 : bool found = false;
2274 : size_t i;
2275 : int rc;
2276 :
2277 0 : if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2278 0 : return -EINVAL;
2279 : }
2280 :
2281 0 : if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2282 : /* hardly ever possible, but let's check and warn the user */
2283 0 : SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2284 : }
2285 0 : module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2286 :
2287 0 : if (!module) {
2288 0 : SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2289 0 : return -ENOENT;
2290 : }
2291 :
2292 0 : if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2293 0 : SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2294 0 : return -ENOTSUP;
2295 : }
2296 :
2297 0 : key = calloc(1, sizeof(*key));
2298 0 : if (!key) {
2299 0 : return -ENOMEM;
2300 : }
2301 :
2302 0 : key->param.key_name = strdup(param->key_name);
2303 0 : if (!key->param.key_name) {
2304 0 : rc = -ENOMEM;
2305 0 : goto error;
2306 : }
2307 :
2308 0 : for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2309 0 : assert(g_ciphers[i]);
2310 :
2311 0 : if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2312 0 : key->cipher = i;
2313 0 : found = true;
2314 0 : break;
2315 : }
2316 : }
2317 :
2318 0 : if (!found) {
2319 0 : SPDK_ERRLOG("Failed to parse cipher\n");
2320 0 : rc = -EINVAL;
2321 0 : goto error;
2322 : }
2323 :
2324 0 : key->param.cipher = strdup(param->cipher);
2325 0 : if (!key->param.cipher) {
2326 0 : rc = -ENOMEM;
2327 0 : goto error;
2328 : }
2329 :
2330 0 : hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2331 0 : if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2332 0 : SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2333 0 : rc = -EINVAL;
2334 0 : goto error;
2335 : }
2336 :
2337 0 : if (hex_key_size == 0) {
2338 0 : SPDK_ERRLOG("key1 size cannot be 0\n");
2339 0 : rc = -EINVAL;
2340 0 : goto error;
2341 : }
2342 :
2343 0 : key->param.hex_key = strdup(param->hex_key);
2344 0 : if (!key->param.hex_key) {
2345 0 : rc = -ENOMEM;
2346 0 : goto error;
2347 : }
2348 :
2349 0 : key->key_size = hex_key_size / 2;
2350 0 : key->key = spdk_unhexlify(key->param.hex_key);
2351 0 : if (!key->key) {
2352 0 : SPDK_ERRLOG("Failed to unhexlify key1\n");
2353 0 : rc = -EINVAL;
2354 0 : goto error;
2355 : }
2356 :
2357 0 : if (param->hex_key2) {
2358 0 : hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2359 0 : if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2360 0 : SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2361 0 : rc = -EINVAL;
2362 0 : goto error;
2363 : }
2364 :
2365 0 : if (hex_key2_size == 0) {
2366 0 : SPDK_ERRLOG("key2 size cannot be 0\n");
2367 0 : rc = -EINVAL;
2368 0 : goto error;
2369 : }
2370 :
2371 0 : key->param.hex_key2 = strdup(param->hex_key2);
2372 0 : if (!key->param.hex_key2) {
2373 0 : rc = -ENOMEM;
2374 0 : goto error;
2375 : }
2376 :
2377 0 : key->key2_size = hex_key2_size / 2;
2378 0 : key->key2 = spdk_unhexlify(key->param.hex_key2);
2379 0 : if (!key->key2) {
2380 0 : SPDK_ERRLOG("Failed to unhexlify key2\n");
2381 0 : rc = -EINVAL;
2382 0 : goto error;
2383 : }
2384 : }
2385 :
2386 0 : key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2387 0 : if (param->tweak_mode) {
2388 0 : found = false;
2389 :
2390 0 : key->param.tweak_mode = strdup(param->tweak_mode);
2391 0 : if (!key->param.tweak_mode) {
2392 0 : rc = -ENOMEM;
2393 0 : goto error;
2394 : }
2395 :
2396 0 : for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2397 0 : assert(g_tweak_modes[i]);
2398 :
2399 0 : if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2400 0 : key->tweak_mode = i;
2401 0 : found = true;
2402 0 : break;
2403 : }
2404 : }
2405 :
2406 0 : if (!found) {
2407 0 : SPDK_ERRLOG("Failed to parse tweak mode\n");
2408 0 : rc = -EINVAL;
2409 0 : goto error;
2410 : }
2411 : }
2412 :
2413 0 : if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2414 0 : (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2415 0 : SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2416 : g_tweak_modes[key->tweak_mode]);
2417 0 : rc = -EINVAL;
2418 0 : goto error;
2419 : }
2420 :
2421 0 : if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2422 0 : SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2423 : g_ciphers[key->cipher], key->key_size);
2424 0 : rc = -EINVAL;
2425 0 : goto error;
2426 : }
2427 :
2428 0 : if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2429 0 : if (!key->key2) {
2430 0 : SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2431 0 : rc = -EINVAL;
2432 0 : goto error;
2433 : }
2434 :
2435 0 : if (key->key_size != key->key2_size) {
2436 0 : SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2437 : key->key_size,
2438 : key->key2_size);
2439 0 : rc = -EINVAL;
2440 0 : goto error;
2441 : }
2442 :
2443 0 : if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2444 0 : SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2445 0 : rc = -EINVAL;
2446 0 : goto error;
2447 : }
2448 : }
2449 :
2450 0 : if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2451 0 : if (key->key2_size) {
2452 0 : SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2453 0 : rc = -EINVAL;
2454 0 : goto error;
2455 : }
2456 : }
2457 :
2458 0 : key->module_if = module;
2459 :
2460 0 : spdk_spin_lock(&g_keyring_spin);
2461 0 : if (_accel_crypto_key_get(param->key_name)) {
2462 0 : rc = -EEXIST;
2463 : } else {
2464 0 : rc = module->crypto_key_init(key);
2465 0 : if (rc) {
2466 0 : SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2467 : } else {
2468 0 : TAILQ_INSERT_TAIL(&g_keyring, key, link);
2469 : }
2470 : }
2471 0 : spdk_spin_unlock(&g_keyring_spin);
2472 :
2473 0 : if (rc) {
2474 0 : goto error;
2475 : }
2476 :
2477 0 : return 0;
2478 :
2479 0 : error:
2480 0 : accel_crypto_key_free_mem(key);
2481 0 : return rc;
2482 : }
2483 :
2484 : int
2485 0 : spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2486 : {
2487 0 : if (!key || !key->module_if) {
2488 0 : return -EINVAL;
2489 : }
2490 :
2491 0 : spdk_spin_lock(&g_keyring_spin);
2492 0 : if (!_accel_crypto_key_get(key->param.key_name)) {
2493 0 : spdk_spin_unlock(&g_keyring_spin);
2494 0 : return -ENOENT;
2495 : }
2496 0 : TAILQ_REMOVE(&g_keyring, key, link);
2497 0 : spdk_spin_unlock(&g_keyring_spin);
2498 :
2499 0 : accel_crypto_key_destroy_unsafe(key);
2500 :
2501 0 : return 0;
2502 : }
2503 :
2504 : struct spdk_accel_crypto_key *
2505 0 : spdk_accel_crypto_key_get(const char *name)
2506 : {
2507 : struct spdk_accel_crypto_key *key;
2508 :
2509 0 : spdk_spin_lock(&g_keyring_spin);
2510 0 : key = _accel_crypto_key_get(name);
2511 0 : spdk_spin_unlock(&g_keyring_spin);
2512 :
2513 0 : return key;
2514 : }
2515 :
2516 : /* Helper function when accel modules register with the framework. */
2517 : void
2518 5 : spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2519 : {
2520 : struct spdk_accel_module_if *tmp;
2521 :
2522 5 : if (_module_find_by_name(accel_module->name)) {
2523 0 : SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2524 0 : assert(false);
2525 : return;
2526 : }
2527 :
2528 8 : TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2529 5 : if (accel_module->priority < tmp->priority) {
2530 2 : break;
2531 : }
2532 : }
2533 :
2534 5 : if (tmp != NULL) {
2535 2 : TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2536 : } else {
2537 3 : TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2538 : }
2539 : }
2540 :
2541 : /* Framework level channel create callback. */
2542 : static int
2543 11 : accel_create_channel(void *io_device, void *ctx_buf)
2544 : {
2545 11 : struct accel_io_channel *accel_ch = ctx_buf;
2546 : struct spdk_accel_task *accel_task;
2547 : struct spdk_accel_task_aux_data *accel_task_aux;
2548 : struct spdk_accel_sequence *seq;
2549 : struct accel_buffer *buf;
2550 : size_t task_size_aligned;
2551 : uint8_t *task_mem;
2552 11 : uint32_t i = 0, j;
2553 : int rc;
2554 :
2555 11 : task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE);
2556 11 : accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2557 11 : g_opts.task_count * task_size_aligned);
2558 11 : if (!accel_ch->task_pool_base) {
2559 0 : return -ENOMEM;
2560 : }
2561 11 : memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned);
2562 :
2563 11 : accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2564 11 : g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2565 11 : if (accel_ch->seq_pool_base == NULL) {
2566 0 : goto err;
2567 : }
2568 11 : memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2569 :
2570 11 : accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data));
2571 11 : if (accel_ch->task_aux_data_base == NULL) {
2572 0 : goto err;
2573 : }
2574 :
2575 11 : accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2576 11 : if (accel_ch->buf_pool_base == NULL) {
2577 0 : goto err;
2578 : }
2579 :
2580 11 : STAILQ_INIT(&accel_ch->task_pool);
2581 11 : SLIST_INIT(&accel_ch->task_aux_data_pool);
2582 11 : SLIST_INIT(&accel_ch->seq_pool);
2583 11 : SLIST_INIT(&accel_ch->buf_pool);
2584 :
2585 11 : task_mem = accel_ch->task_pool_base;
2586 22539 : for (i = 0; i < g_opts.task_count; i++) {
2587 22528 : accel_task = (struct spdk_accel_task *)task_mem;
2588 22528 : accel_task->aux = NULL;
2589 22528 : STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2590 22528 : task_mem += task_size_aligned;
2591 22528 : accel_task_aux = &accel_ch->task_aux_data_base[i];
2592 22528 : SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link);
2593 : }
2594 22539 : for (i = 0; i < g_opts.sequence_count; i++) {
2595 22528 : seq = &accel_ch->seq_pool_base[i];
2596 22528 : SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link);
2597 : }
2598 22539 : for (i = 0; i < g_opts.buf_count; i++) {
2599 22528 : buf = &accel_ch->buf_pool_base[i];
2600 22528 : SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link);
2601 : }
2602 :
2603 : /* Assign modules and get IO channels for each */
2604 176 : for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2605 165 : accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2606 : /* This can happen if idxd runs out of channels. */
2607 165 : if (accel_ch->module_ch[i] == NULL) {
2608 0 : SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2609 0 : goto err;
2610 : }
2611 : }
2612 :
2613 11 : if (g_accel_driver != NULL) {
2614 0 : accel_ch->driver_channel = g_accel_driver->get_io_channel();
2615 0 : if (accel_ch->driver_channel == NULL) {
2616 0 : SPDK_ERRLOG("Failed to get driver's IO channel\n");
2617 0 : goto err;
2618 : }
2619 : }
2620 :
2621 11 : rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2622 : g_opts.large_cache_size);
2623 11 : if (rc != 0) {
2624 0 : SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2625 0 : goto err;
2626 : }
2627 :
2628 11 : return 0;
2629 0 : err:
2630 0 : if (accel_ch->driver_channel != NULL) {
2631 0 : spdk_put_io_channel(accel_ch->driver_channel);
2632 : }
2633 0 : for (j = 0; j < i; j++) {
2634 0 : spdk_put_io_channel(accel_ch->module_ch[j]);
2635 : }
2636 0 : free(accel_ch->task_pool_base);
2637 0 : free(accel_ch->task_aux_data_base);
2638 0 : free(accel_ch->seq_pool_base);
2639 0 : free(accel_ch->buf_pool_base);
2640 :
2641 0 : return -ENOMEM;
2642 : }
2643 :
2644 : static void
2645 11 : accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2646 : {
2647 : int i;
2648 :
2649 11 : total->sequence_executed += stats->sequence_executed;
2650 11 : total->sequence_failed += stats->sequence_failed;
2651 11 : total->sequence_outstanding += stats->sequence_outstanding;
2652 11 : total->task_outstanding += stats->task_outstanding;
2653 11 : total->retry.task += stats->retry.task;
2654 11 : total->retry.sequence += stats->retry.sequence;
2655 11 : total->retry.iobuf += stats->retry.iobuf;
2656 11 : total->retry.bufdesc += stats->retry.bufdesc;
2657 176 : for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
2658 165 : total->operations[i].executed += stats->operations[i].executed;
2659 165 : total->operations[i].failed += stats->operations[i].failed;
2660 165 : total->operations[i].num_bytes += stats->operations[i].num_bytes;
2661 : }
2662 11 : }
2663 :
2664 : /* Framework level channel destroy callback. */
2665 : static void
2666 11 : accel_destroy_channel(void *io_device, void *ctx_buf)
2667 : {
2668 11 : struct accel_io_channel *accel_ch = ctx_buf;
2669 : int i;
2670 :
2671 11 : spdk_iobuf_channel_fini(&accel_ch->iobuf);
2672 :
2673 11 : if (accel_ch->driver_channel != NULL) {
2674 0 : spdk_put_io_channel(accel_ch->driver_channel);
2675 : }
2676 :
2677 176 : for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2678 165 : assert(accel_ch->module_ch[i] != NULL);
2679 165 : spdk_put_io_channel(accel_ch->module_ch[i]);
2680 165 : accel_ch->module_ch[i] = NULL;
2681 : }
2682 :
2683 : /* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2684 11 : spdk_spin_lock(&g_stats_lock);
2685 11 : accel_add_stats(&g_stats, &accel_ch->stats);
2686 11 : spdk_spin_unlock(&g_stats_lock);
2687 :
2688 11 : free(accel_ch->task_pool_base);
2689 11 : free(accel_ch->task_aux_data_base);
2690 11 : free(accel_ch->seq_pool_base);
2691 11 : free(accel_ch->buf_pool_base);
2692 11 : }
2693 :
2694 : struct spdk_io_channel *
2695 11 : spdk_accel_get_io_channel(void)
2696 : {
2697 11 : return spdk_get_io_channel(&spdk_accel_module_list);
2698 : }
2699 :
2700 : static int
2701 2 : accel_module_initialize(void)
2702 : {
2703 : struct spdk_accel_module_if *accel_module, *tmp_module;
2704 2 : int rc = 0, module_rc;
2705 :
2706 7 : TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2707 5 : module_rc = accel_module->module_init();
2708 5 : if (module_rc) {
2709 0 : TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2710 0 : if (module_rc == -ENODEV) {
2711 0 : SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name);
2712 0 : } else if (!rc) {
2713 0 : SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2714 0 : rc = module_rc;
2715 : }
2716 0 : continue;
2717 : }
2718 :
2719 5 : SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2720 : }
2721 :
2722 2 : return rc;
2723 : }
2724 :
2725 : static void
2726 30 : accel_module_init_opcode(enum spdk_accel_opcode opcode)
2727 : {
2728 30 : struct accel_module *module = &g_modules_opc[opcode];
2729 30 : struct spdk_accel_module_if *module_if = module->module;
2730 :
2731 30 : if (module_if->get_memory_domains != NULL) {
2732 0 : module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2733 : }
2734 30 : }
2735 :
2736 : static int
2737 0 : accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
2738 : struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
2739 : void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
2740 : {
2741 0 : struct accel_buffer *buf = src_domain_ctx;
2742 :
2743 0 : SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len);
2744 :
2745 0 : assert(g_accel_domain == src_domain);
2746 0 : assert(spdk_memory_domain_get_system_domain() == dst_domain);
2747 0 : assert(buf->buf == NULL);
2748 0 : assert(addr == ACCEL_BUFFER_BASE);
2749 0 : assert(len == buf->len);
2750 :
2751 0 : buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL);
2752 0 : if (spdk_unlikely(buf->buf == NULL)) {
2753 0 : return -ENOMEM;
2754 : }
2755 :
2756 0 : result->iov_count = 1;
2757 0 : result->iov.iov_base = buf->buf;
2758 0 : result->iov.iov_len = buf->len;
2759 0 : SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base);
2760 0 : return 0;
2761 : }
2762 :
2763 : static void
2764 0 : accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx,
2765 : struct iovec *iov, uint32_t iovcnt)
2766 : {
2767 0 : struct accel_buffer *buf = domain_ctx;
2768 :
2769 0 : SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len);
2770 :
2771 0 : assert(g_accel_domain == domain);
2772 0 : assert(iovcnt == 1);
2773 0 : assert(buf->buf != NULL);
2774 0 : assert(iov[0].iov_base == buf->buf);
2775 0 : assert(iov[0].iov_len == buf->len);
2776 :
2777 0 : spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len);
2778 0 : buf->buf = NULL;
2779 0 : }
2780 :
2781 : int
2782 2 : spdk_accel_initialize(void)
2783 : {
2784 : enum spdk_accel_opcode op;
2785 2 : struct spdk_accel_module_if *accel_module = NULL;
2786 : int rc;
2787 :
2788 : /*
2789 : * We need a unique identifier for the accel framework, so use the
2790 : * spdk_accel_module_list address for this purpose.
2791 : */
2792 2 : spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2793 : sizeof(struct accel_io_channel), "accel");
2794 :
2795 2 : spdk_spin_init(&g_keyring_spin);
2796 2 : spdk_spin_init(&g_stats_lock);
2797 :
2798 2 : rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2799 : "SPDK_ACCEL_DMA_DEVICE");
2800 2 : if (rc != 0) {
2801 0 : SPDK_ERRLOG("Failed to create accel memory domain\n");
2802 0 : return rc;
2803 : }
2804 :
2805 2 : spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate);
2806 2 : spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate);
2807 :
2808 2 : g_modules_started = true;
2809 2 : rc = accel_module_initialize();
2810 2 : if (rc) {
2811 0 : return rc;
2812 : }
2813 :
2814 2 : if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
2815 0 : rc = g_accel_driver->init();
2816 0 : if (rc != 0) {
2817 0 : SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
2818 : spdk_strerror(-rc));
2819 0 : return rc;
2820 : }
2821 : }
2822 :
2823 : /* The module list is order by priority, with the highest priority modules being at the end
2824 : * of the list. The software module should be somewhere at the beginning of the list,
2825 : * before all HW modules.
2826 : * NOTE: all opcodes must be supported by software in the event that no HW modules are
2827 : * initialized to support the operation.
2828 : */
2829 7 : TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2830 80 : for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2831 75 : if (accel_module->supports_opcode(op)) {
2832 75 : g_modules_opc[op].module = accel_module;
2833 75 : SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2834 : }
2835 : }
2836 :
2837 5 : if (accel_module->get_ctx_size != NULL) {
2838 1 : g_max_accel_module_size = spdk_max(g_max_accel_module_size,
2839 : accel_module->get_ctx_size());
2840 : }
2841 : }
2842 :
2843 : /* Now lets check for overrides and apply all that exist */
2844 32 : for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2845 30 : if (g_modules_opc_override[op] != NULL) {
2846 0 : accel_module = _module_find_by_name(g_modules_opc_override[op]);
2847 0 : if (accel_module == NULL) {
2848 0 : SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2849 0 : return -EINVAL;
2850 : }
2851 0 : if (accel_module->supports_opcode(op) == false) {
2852 0 : SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2853 0 : return -EINVAL;
2854 : }
2855 0 : g_modules_opc[op].module = accel_module;
2856 : }
2857 : }
2858 :
2859 2 : if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2860 0 : SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2861 0 : return -EINVAL;
2862 : }
2863 :
2864 32 : for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2865 30 : assert(g_modules_opc[op].module != NULL);
2866 30 : accel_module_init_opcode(op);
2867 : }
2868 :
2869 2 : rc = spdk_iobuf_register_module("accel");
2870 2 : if (rc != 0) {
2871 0 : SPDK_ERRLOG("Failed to register accel iobuf module\n");
2872 0 : return rc;
2873 : }
2874 :
2875 2 : return 0;
2876 : }
2877 :
2878 : static void
2879 2 : accel_module_finish_cb(void)
2880 : {
2881 2 : spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2882 :
2883 2 : cb_fn(g_fini_cb_arg);
2884 2 : g_fini_cb_fn = NULL;
2885 2 : g_fini_cb_arg = NULL;
2886 2 : }
2887 :
2888 : static void
2889 0 : accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2890 : const char *module_str)
2891 : {
2892 0 : spdk_json_write_object_begin(w);
2893 0 : spdk_json_write_named_string(w, "method", "accel_assign_opc");
2894 0 : spdk_json_write_named_object_begin(w, "params");
2895 0 : spdk_json_write_named_string(w, "opname", opc_str);
2896 0 : spdk_json_write_named_string(w, "module", module_str);
2897 0 : spdk_json_write_object_end(w);
2898 0 : spdk_json_write_object_end(w);
2899 0 : }
2900 :
2901 : static void
2902 0 : __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2903 : {
2904 0 : spdk_json_write_named_string(w, "name", key->param.key_name);
2905 0 : spdk_json_write_named_string(w, "cipher", key->param.cipher);
2906 0 : spdk_json_write_named_string(w, "key", key->param.hex_key);
2907 0 : if (key->param.hex_key2) {
2908 0 : spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2909 : }
2910 :
2911 0 : if (key->param.tweak_mode) {
2912 0 : spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
2913 : }
2914 0 : }
2915 :
2916 : void
2917 0 : _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2918 : {
2919 0 : spdk_json_write_object_begin(w);
2920 0 : __accel_crypto_key_dump_param(w, key);
2921 0 : spdk_json_write_object_end(w);
2922 0 : }
2923 :
2924 : static void
2925 0 : _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2926 : struct spdk_accel_crypto_key *key)
2927 : {
2928 0 : spdk_json_write_object_begin(w);
2929 0 : spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2930 0 : spdk_json_write_named_object_begin(w, "params");
2931 0 : __accel_crypto_key_dump_param(w, key);
2932 0 : spdk_json_write_object_end(w);
2933 0 : spdk_json_write_object_end(w);
2934 0 : }
2935 :
2936 : static void
2937 0 : accel_write_options(struct spdk_json_write_ctx *w)
2938 : {
2939 0 : spdk_json_write_object_begin(w);
2940 0 : spdk_json_write_named_string(w, "method", "accel_set_options");
2941 0 : spdk_json_write_named_object_begin(w, "params");
2942 0 : spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2943 0 : spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2944 0 : spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2945 0 : spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2946 0 : spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2947 0 : spdk_json_write_object_end(w);
2948 0 : spdk_json_write_object_end(w);
2949 0 : }
2950 :
2951 : static void
2952 0 : _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2953 : {
2954 : struct spdk_accel_crypto_key *key;
2955 :
2956 0 : spdk_spin_lock(&g_keyring_spin);
2957 0 : TAILQ_FOREACH(key, &g_keyring, link) {
2958 0 : if (full_dump) {
2959 0 : _accel_crypto_key_write_config_json(w, key);
2960 : } else {
2961 0 : _accel_crypto_key_dump_param(w, key);
2962 : }
2963 : }
2964 0 : spdk_spin_unlock(&g_keyring_spin);
2965 0 : }
2966 :
2967 : void
2968 0 : _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2969 : {
2970 0 : _accel_crypto_keys_write_config_json(w, false);
2971 0 : }
2972 :
2973 : void
2974 0 : spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2975 : {
2976 : struct spdk_accel_module_if *accel_module;
2977 : int i;
2978 :
2979 0 : spdk_json_write_array_begin(w);
2980 0 : accel_write_options(w);
2981 :
2982 0 : TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2983 0 : if (accel_module->write_config_json) {
2984 0 : accel_module->write_config_json(w);
2985 : }
2986 : }
2987 0 : for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2988 0 : if (g_modules_opc_override[i]) {
2989 0 : accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2990 : }
2991 : }
2992 :
2993 0 : _accel_crypto_keys_write_config_json(w, true);
2994 :
2995 0 : spdk_json_write_array_end(w);
2996 0 : }
2997 :
2998 : void
2999 7 : spdk_accel_module_finish(void)
3000 : {
3001 7 : if (!g_accel_module) {
3002 2 : g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
3003 : } else {
3004 5 : g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
3005 : }
3006 :
3007 7 : if (!g_accel_module) {
3008 2 : if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
3009 0 : g_accel_driver->fini();
3010 : }
3011 :
3012 2 : spdk_spin_destroy(&g_keyring_spin);
3013 2 : spdk_spin_destroy(&g_stats_lock);
3014 2 : if (g_accel_domain) {
3015 2 : spdk_memory_domain_destroy(g_accel_domain);
3016 2 : g_accel_domain = NULL;
3017 : }
3018 2 : accel_module_finish_cb();
3019 2 : return;
3020 : }
3021 :
3022 5 : if (g_accel_module->module_fini) {
3023 1 : spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
3024 : } else {
3025 4 : spdk_accel_module_finish();
3026 : }
3027 : }
3028 :
3029 : static void
3030 2 : accel_io_device_unregister_cb(void *io_device)
3031 : {
3032 : struct spdk_accel_crypto_key *key, *key_tmp;
3033 : enum spdk_accel_opcode op;
3034 :
3035 2 : spdk_spin_lock(&g_keyring_spin);
3036 2 : TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
3037 0 : accel_crypto_key_destroy_unsafe(key);
3038 : }
3039 2 : spdk_spin_unlock(&g_keyring_spin);
3040 :
3041 32 : for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3042 30 : if (g_modules_opc_override[op] != NULL) {
3043 0 : free(g_modules_opc_override[op]);
3044 0 : g_modules_opc_override[op] = NULL;
3045 : }
3046 30 : g_modules_opc[op].module = NULL;
3047 : }
3048 :
3049 2 : spdk_accel_module_finish();
3050 2 : }
3051 :
3052 : void
3053 2 : spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
3054 : {
3055 2 : assert(cb_fn != NULL);
3056 :
3057 2 : g_fini_cb_fn = cb_fn;
3058 2 : g_fini_cb_arg = cb_arg;
3059 :
3060 2 : spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
3061 2 : }
3062 :
3063 : static struct spdk_accel_driver *
3064 2 : accel_find_driver(const char *name)
3065 : {
3066 : struct spdk_accel_driver *driver;
3067 :
3068 2 : TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
3069 1 : if (strcmp(driver->name, name) == 0) {
3070 1 : return driver;
3071 : }
3072 : }
3073 :
3074 1 : return NULL;
3075 : }
3076 :
3077 : int
3078 1 : spdk_accel_set_driver(const char *name)
3079 : {
3080 : struct spdk_accel_driver *driver;
3081 :
3082 1 : driver = accel_find_driver(name);
3083 1 : if (driver == NULL) {
3084 0 : SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
3085 0 : return -ENODEV;
3086 : }
3087 :
3088 1 : g_accel_driver = driver;
3089 :
3090 1 : return 0;
3091 : }
3092 :
3093 : const char *
3094 0 : spdk_accel_get_driver_name(void)
3095 : {
3096 0 : if (!g_accel_driver) {
3097 0 : return NULL;
3098 : }
3099 :
3100 0 : return g_accel_driver->name;
3101 : }
3102 :
3103 : void
3104 1 : spdk_accel_driver_register(struct spdk_accel_driver *driver)
3105 : {
3106 1 : if (accel_find_driver(driver->name)) {
3107 0 : SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
3108 0 : assert(0);
3109 : return;
3110 : }
3111 :
3112 1 : TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
3113 : }
3114 :
3115 : int
3116 0 : spdk_accel_set_opts(const struct spdk_accel_opts *opts)
3117 : {
3118 0 : if (!opts) {
3119 0 : SPDK_ERRLOG("opts cannot be NULL\n");
3120 0 : return -1;
3121 : }
3122 :
3123 0 : if (!opts->opts_size) {
3124 0 : SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
3125 0 : return -1;
3126 : }
3127 :
3128 0 : if (SPDK_GET_FIELD(opts, task_count, g_opts.task_count,
3129 0 : opts->opts_size) < ACCEL_TASKS_IN_SEQUENCE_LIMIT) {
3130 0 : return -EINVAL;
3131 : }
3132 :
3133 : #define SET_FIELD(field) \
3134 : if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
3135 : g_opts.field = opts->field; \
3136 : } \
3137 :
3138 0 : SET_FIELD(small_cache_size);
3139 0 : SET_FIELD(large_cache_size);
3140 0 : SET_FIELD(task_count);
3141 0 : SET_FIELD(sequence_count);
3142 0 : SET_FIELD(buf_count);
3143 :
3144 0 : g_opts.opts_size = opts->opts_size;
3145 :
3146 : #undef SET_FIELD
3147 :
3148 0 : return 0;
3149 : }
3150 :
3151 : void
3152 0 : spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size)
3153 : {
3154 0 : if (!opts) {
3155 0 : SPDK_ERRLOG("opts should not be NULL\n");
3156 0 : return;
3157 : }
3158 :
3159 0 : if (!opts_size) {
3160 0 : SPDK_ERRLOG("opts_size should not be zero value\n");
3161 0 : return;
3162 : }
3163 :
3164 0 : opts->opts_size = opts_size;
3165 :
3166 : #define SET_FIELD(field) \
3167 : if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \
3168 : opts->field = g_opts.field; \
3169 : } \
3170 :
3171 0 : SET_FIELD(small_cache_size);
3172 0 : SET_FIELD(large_cache_size);
3173 0 : SET_FIELD(task_count);
3174 0 : SET_FIELD(sequence_count);
3175 0 : SET_FIELD(buf_count);
3176 :
3177 : #undef SET_FIELD
3178 :
3179 : /* Do not remove this statement, you should always update this statement when you adding a new field,
3180 : * and do not forget to add the SET_FIELD statement for your added field. */
3181 : SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size");
3182 : }
3183 :
3184 : struct accel_get_stats_ctx {
3185 : struct accel_stats stats;
3186 : accel_get_stats_cb cb_fn;
3187 : void *cb_arg;
3188 : };
3189 :
3190 : static void
3191 0 : accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
3192 : {
3193 0 : struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3194 :
3195 0 : ctx->cb_fn(&ctx->stats, ctx->cb_arg);
3196 0 : free(ctx);
3197 0 : }
3198 :
3199 : static void
3200 0 : accel_get_channel_stats(struct spdk_io_channel_iter *iter)
3201 : {
3202 0 : struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
3203 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3204 0 : struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3205 :
3206 0 : accel_add_stats(&ctx->stats, &accel_ch->stats);
3207 0 : spdk_for_each_channel_continue(iter, 0);
3208 0 : }
3209 :
3210 : int
3211 0 : accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
3212 : {
3213 : struct accel_get_stats_ctx *ctx;
3214 :
3215 0 : ctx = calloc(1, sizeof(*ctx));
3216 0 : if (ctx == NULL) {
3217 0 : return -ENOMEM;
3218 : }
3219 :
3220 0 : spdk_spin_lock(&g_stats_lock);
3221 0 : accel_add_stats(&ctx->stats, &g_stats);
3222 0 : spdk_spin_unlock(&g_stats_lock);
3223 :
3224 0 : ctx->cb_fn = cb_fn;
3225 0 : ctx->cb_arg = cb_arg;
3226 :
3227 0 : spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
3228 : accel_get_channel_stats_done);
3229 :
3230 0 : return 0;
3231 : }
3232 :
3233 : void
3234 0 : spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
3235 : struct spdk_accel_opcode_stats *stats, size_t size)
3236 : {
3237 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3238 :
3239 : #define FIELD_OK(field) \
3240 : offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
3241 :
3242 : #define SET_FIELD(field, value) \
3243 : if (FIELD_OK(field)) { \
3244 : stats->field = value; \
3245 : }
3246 :
3247 0 : SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
3248 0 : SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
3249 0 : SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
3250 :
3251 : #undef FIELD_OK
3252 : #undef SET_FIELD
3253 0 : }
3254 :
3255 : uint8_t
3256 0 : spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
3257 : const struct spdk_accel_operation_exec_ctx *ctx)
3258 : {
3259 0 : struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
3260 0 : struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
3261 :
3262 0 : if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
3263 0 : g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
3264 : }
3265 :
3266 0 : if (module->get_operation_info != NULL) {
3267 0 : module->get_operation_info(opcode, ctx, &modinfo);
3268 : }
3269 :
3270 : /* If a driver is set, it'll execute most of the operations, while the rest will usually
3271 : * fall back to accel_sw, which doesn't have any alignment requiremenets. However, to be
3272 : * extra safe, return the max(driver, module) if a driver delegates some operations to a
3273 : * hardware module. */
3274 0 : return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
3275 : }
3276 :
3277 : struct spdk_accel_module_if *
3278 0 : spdk_accel_get_module(const char *name)
3279 : {
3280 : struct spdk_accel_module_if *module;
3281 :
3282 0 : TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
3283 0 : if (strcmp(module->name, name) == 0) {
3284 0 : return module;
3285 : }
3286 : }
3287 :
3288 0 : return NULL;
3289 : }
3290 :
3291 : int
3292 0 : spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode,
3293 : struct spdk_memory_domain **domains,
3294 : int array_size)
3295 : {
3296 0 : assert(opcode < SPDK_ACCEL_OPC_LAST);
3297 :
3298 0 : if (g_modules_opc[opcode].module->get_memory_domains) {
3299 0 : return g_modules_opc[opcode].module->get_memory_domains(domains, array_size);
3300 : }
3301 :
3302 0 : return 0;
3303 : }
3304 :
3305 1 : SPDK_LOG_REGISTER_COMPONENT(accel)
|