Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2020 Intel Corporation.
3 : * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4 : * All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "spdk/accel_module.h"
10 :
11 : #include "accel_internal.h"
12 :
13 : #include "spdk/dma.h"
14 : #include "spdk/env.h"
15 : #include "spdk/likely.h"
16 : #include "spdk/log.h"
17 : #include "spdk/thread.h"
18 : #include "spdk/json.h"
19 : #include "spdk/crc32.h"
20 : #include "spdk/util.h"
21 : #include "spdk/hexlify.h"
22 : #include "spdk/string.h"
23 :
24 : /* Accelerator Framework: The following provides a top level
25 : * generic API for the accelerator functions defined here. Modules,
26 : * such as the one in /module/accel/ioat, supply the implementation
27 : * with the exception of the pure software implementation contained
28 : * later in this file.
29 : */
30 :
31 : #define ALIGN_4K 0x1000
32 : #define MAX_TASKS_PER_CHANNEL 0x800
33 : #define ACCEL_SMALL_CACHE_SIZE 128
34 : #define ACCEL_LARGE_CACHE_SIZE 16
35 : /* Set MSB, so we don't return NULL pointers as buffers */
36 : #define ACCEL_BUFFER_BASE ((void *)(1ull << 63))
37 : #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 :
39 : #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 :
41 : struct accel_module {
42 : struct spdk_accel_module_if *module;
43 : bool supports_memory_domains;
44 : };
45 :
46 : /* Largest context size for all accel modules */
47 : static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
48 :
49 : static struct spdk_accel_module_if *g_accel_module = NULL;
50 : static spdk_accel_fini_cb g_fini_cb_fn = NULL;
51 : static void *g_fini_cb_arg = NULL;
52 : static bool g_modules_started = false;
53 : static struct spdk_memory_domain *g_accel_domain;
54 :
55 : /* Global list of registered accelerator modules */
56 : static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
57 : TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
58 :
59 : /* Crypto keyring */
60 : static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
61 : static struct spdk_spinlock g_keyring_spin;
62 :
63 : /* Global array mapping capabilities to modules */
64 : static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
65 : static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
66 : TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
67 : static struct spdk_accel_driver *g_accel_driver;
68 : static struct spdk_accel_opts g_opts = {
69 : .small_cache_size = ACCEL_SMALL_CACHE_SIZE,
70 : .large_cache_size = ACCEL_LARGE_CACHE_SIZE,
71 : .task_count = MAX_TASKS_PER_CHANNEL,
72 : .sequence_count = MAX_TASKS_PER_CHANNEL,
73 : .buf_count = MAX_TASKS_PER_CHANNEL,
74 : };
75 : static struct accel_stats g_stats;
76 : static struct spdk_spinlock g_stats_lock;
77 :
78 : static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
79 : "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
80 : "compress", "decompress", "encrypt", "decrypt", "xor",
81 : "dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy"
82 : };
83 :
84 : enum accel_sequence_state {
85 : ACCEL_SEQUENCE_STATE_INIT,
86 : ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
87 : ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
88 : ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
89 : ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
90 : ACCEL_SEQUENCE_STATE_PULL_DATA,
91 : ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
92 : ACCEL_SEQUENCE_STATE_EXEC_TASK,
93 : ACCEL_SEQUENCE_STATE_AWAIT_TASK,
94 : ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
95 : ACCEL_SEQUENCE_STATE_NEXT_TASK,
96 : ACCEL_SEQUENCE_STATE_PUSH_DATA,
97 : ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
98 : ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
99 : ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
100 : ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
101 : ACCEL_SEQUENCE_STATE_ERROR,
102 : ACCEL_SEQUENCE_STATE_MAX,
103 : };
104 :
105 : static const char *g_seq_states[]
106 : __attribute__((unused)) = {
107 : [ACCEL_SEQUENCE_STATE_INIT] = "init",
108 : [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
109 : [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
110 : [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
111 : [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
112 : [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
113 : [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
114 : [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
115 : [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
116 : [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
117 : [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
118 : [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
119 : [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
120 : [ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
121 : [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
122 : [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
123 : [ACCEL_SEQUENCE_STATE_ERROR] = "error",
124 : [ACCEL_SEQUENCE_STATE_MAX] = "",
125 : };
126 :
127 : #define ACCEL_SEQUENCE_STATE_STRING(s) \
128 : (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
129 : ? g_seq_states[s] : "unknown")
130 :
131 : struct accel_buffer {
132 : struct spdk_accel_sequence *seq;
133 : void *buf;
134 : uint64_t len;
135 : struct spdk_iobuf_entry iobuf;
136 : spdk_accel_sequence_get_buf_cb cb_fn;
137 : void *cb_ctx;
138 : SLIST_ENTRY(accel_buffer) link;
139 : struct accel_io_channel *ch;
140 : };
141 :
142 : struct accel_io_channel {
143 : struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST];
144 : struct spdk_io_channel *driver_channel;
145 : void *task_pool_base;
146 : struct spdk_accel_sequence *seq_pool_base;
147 : struct accel_buffer *buf_pool_base;
148 : struct spdk_accel_task_aux_data *task_aux_data_base;
149 : STAILQ_HEAD(, spdk_accel_task) task_pool;
150 : SLIST_HEAD(, spdk_accel_task_aux_data) task_aux_data_pool;
151 : SLIST_HEAD(, spdk_accel_sequence) seq_pool;
152 : SLIST_HEAD(, accel_buffer) buf_pool;
153 : struct spdk_iobuf_channel iobuf;
154 : struct accel_stats stats;
155 : };
156 :
157 : TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
158 :
159 : struct spdk_accel_sequence {
160 : struct accel_io_channel *ch;
161 : struct accel_sequence_tasks tasks;
162 : SLIST_HEAD(, accel_buffer) bounce_bufs;
163 : int status;
164 : /* state uses enum accel_sequence_state */
165 : uint8_t state;
166 : bool in_process_sequence;
167 : spdk_accel_completion_cb cb_fn;
168 : void *cb_arg;
169 : SLIST_ENTRY(spdk_accel_sequence) link;
170 : };
171 : SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size");
172 :
173 : #define accel_update_stats(ch, event, v) \
174 : do { \
175 : (ch)->stats.event += (v); \
176 : } while (0)
177 :
178 : #define accel_update_task_stats(ch, task, event, v) \
179 : accel_update_stats(ch, operations[(task)->op_code].event, v)
180 :
181 : static inline void accel_sequence_task_cb(void *cb_arg, int status);
182 :
183 : static inline void
184 709 : accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
185 : {
186 709 : SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
187 : ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
188 709 : assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
189 709 : seq->state = state;
190 709 : }
191 :
192 : static void
193 9 : accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
194 : {
195 9 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
196 9 : assert(status != 0);
197 9 : seq->status = status;
198 9 : }
199 :
200 : int
201 15 : spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
202 : {
203 15 : if (opcode >= SPDK_ACCEL_OPC_LAST) {
204 : /* invalid opcode */
205 0 : return -EINVAL;
206 : }
207 :
208 15 : if (g_modules_opc[opcode].module) {
209 15 : *module_name = g_modules_opc[opcode].module->name;
210 : } else {
211 0 : return -ENOENT;
212 : }
213 :
214 15 : return 0;
215 : }
216 :
217 : void
218 0 : _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
219 : {
220 : struct spdk_accel_module_if *accel_module;
221 : enum spdk_accel_opcode opcode;
222 0 : int j = 0;
223 :
224 0 : TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
225 0 : for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
226 0 : if (accel_module->supports_opcode(opcode)) {
227 0 : info->ops[j] = opcode;
228 0 : j++;
229 : }
230 : }
231 0 : info->name = accel_module->name;
232 0 : info->num_ops = j;
233 0 : fn(info);
234 0 : j = 0;
235 : }
236 0 : }
237 :
238 : const char *
239 0 : spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
240 : {
241 0 : if (opcode < SPDK_ACCEL_OPC_LAST) {
242 0 : return g_opcode_strings[opcode];
243 : }
244 :
245 0 : return NULL;
246 : }
247 :
248 : int
249 0 : spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
250 : {
251 : char *copy;
252 :
253 0 : if (g_modules_started == true) {
254 : /* we don't allow re-assignment once things have started */
255 0 : return -EINVAL;
256 : }
257 :
258 0 : if (opcode >= SPDK_ACCEL_OPC_LAST) {
259 : /* invalid opcode */
260 0 : return -EINVAL;
261 : }
262 :
263 0 : copy = strdup(name);
264 0 : if (copy == NULL) {
265 0 : return -ENOMEM;
266 : }
267 :
268 : /* module selection will be validated after the framework starts. */
269 0 : free(g_modules_opc_override[opcode]);
270 0 : g_modules_opc_override[opcode] = copy;
271 :
272 0 : return 0;
273 : }
274 :
275 : void
276 95 : spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
277 : {
278 95 : struct accel_io_channel *accel_ch = accel_task->accel_ch;
279 : spdk_accel_completion_cb cb_fn;
280 : void *cb_arg;
281 :
282 95 : accel_update_task_stats(accel_ch, accel_task, executed, 1);
283 95 : accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
284 95 : if (spdk_unlikely(status != 0)) {
285 3 : accel_update_task_stats(accel_ch, accel_task, failed, 1);
286 : }
287 :
288 95 : if (accel_task->seq) {
289 94 : accel_sequence_task_cb(accel_task->seq, status);
290 94 : return;
291 : }
292 :
293 1 : cb_fn = accel_task->cb_fn;
294 1 : cb_arg = accel_task->cb_arg;
295 :
296 1 : if (accel_task->has_aux) {
297 0 : SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link);
298 0 : accel_task->aux = NULL;
299 0 : accel_task->has_aux = false;
300 : }
301 :
302 : /* We should put the accel_task into the list firstly in order to avoid
303 : * the accel task list is exhausted when there is recursive call to
304 : * allocate accel_task in user's call back function (cb_fn)
305 : */
306 1 : STAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
307 :
308 1 : cb_fn(cb_arg, status);
309 : }
310 :
311 : inline static struct spdk_accel_task *
312 151 : _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
313 : {
314 : struct spdk_accel_task *accel_task;
315 :
316 151 : accel_task = STAILQ_FIRST(&accel_ch->task_pool);
317 151 : if (spdk_unlikely(accel_task == NULL)) {
318 11 : accel_update_stats(accel_ch, retry.task, 1);
319 11 : return NULL;
320 : }
321 :
322 140 : STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link);
323 140 : accel_task->link.stqe_next = NULL;
324 :
325 140 : accel_task->cb_fn = cb_fn;
326 140 : accel_task->cb_arg = cb_arg;
327 140 : accel_task->accel_ch = accel_ch;
328 140 : accel_task->s.iovs = NULL;
329 140 : accel_task->d.iovs = NULL;
330 :
331 140 : return accel_task;
332 : }
333 :
334 : static inline int
335 92 : accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
336 : {
337 92 : struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
338 92 : struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
339 : int rc;
340 :
341 92 : rc = module->submit_tasks(module_ch, task);
342 92 : if (spdk_unlikely(rc != 0)) {
343 2 : accel_update_task_stats(accel_ch, task, failed, 1);
344 : }
345 :
346 92 : return rc;
347 : }
348 :
349 : static inline uint64_t
350 114 : accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
351 : {
352 114 : uint64_t result = 0;
353 : uint32_t i;
354 :
355 259 : for (i = 0; i < iovcnt; ++i) {
356 145 : result += iovs[i].iov_len;
357 : }
358 :
359 114 : return result;
360 : }
361 :
362 : #define ACCEL_TASK_ALLOC_AUX_BUF(task) \
363 : do { \
364 : (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool); \
365 : if (spdk_unlikely(!(task)->aux)) { \
366 : SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); \
367 : STAILQ_INSERT_HEAD(&(task)->accel_ch->task_pool, (task), link); \
368 : assert(0); \
369 : return -ENOMEM; \
370 : } \
371 : SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link); \
372 : (task)->has_aux = true; \
373 : } while (0)
374 :
375 : /* Accel framework public API for copy function */
376 : int
377 2 : spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
378 : uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
379 : {
380 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
381 : struct spdk_accel_task *accel_task;
382 :
383 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
384 2 : if (spdk_unlikely(accel_task == NULL)) {
385 1 : return -ENOMEM;
386 : }
387 :
388 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
389 :
390 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
391 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
392 1 : accel_task->d.iovs[0].iov_base = dst;
393 1 : accel_task->d.iovs[0].iov_len = nbytes;
394 1 : accel_task->d.iovcnt = 1;
395 1 : accel_task->s.iovs[0].iov_base = src;
396 1 : accel_task->s.iovs[0].iov_len = nbytes;
397 1 : accel_task->s.iovcnt = 1;
398 1 : accel_task->nbytes = nbytes;
399 1 : accel_task->op_code = SPDK_ACCEL_OPC_COPY;
400 1 : accel_task->src_domain = NULL;
401 1 : accel_task->dst_domain = NULL;
402 :
403 1 : return accel_submit_task(accel_ch, accel_task);
404 : }
405 :
406 : /* Accel framework public API for dual cast copy function */
407 : int
408 4 : spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
409 : void *dst2, void *src, uint64_t nbytes,
410 : spdk_accel_completion_cb cb_fn, void *cb_arg)
411 : {
412 4 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
413 : struct spdk_accel_task *accel_task;
414 :
415 4 : if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
416 2 : SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
417 2 : return -EINVAL;
418 : }
419 :
420 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
421 2 : if (spdk_unlikely(accel_task == NULL)) {
422 1 : return -ENOMEM;
423 : }
424 :
425 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
426 :
427 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
428 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
429 1 : accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2];
430 1 : accel_task->d.iovs[0].iov_base = dst1;
431 1 : accel_task->d.iovs[0].iov_len = nbytes;
432 1 : accel_task->d.iovcnt = 1;
433 1 : accel_task->d2.iovs[0].iov_base = dst2;
434 1 : accel_task->d2.iovs[0].iov_len = nbytes;
435 1 : accel_task->d2.iovcnt = 1;
436 1 : accel_task->s.iovs[0].iov_base = src;
437 1 : accel_task->s.iovs[0].iov_len = nbytes;
438 1 : accel_task->s.iovcnt = 1;
439 1 : accel_task->nbytes = nbytes;
440 1 : accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
441 1 : accel_task->src_domain = NULL;
442 1 : accel_task->dst_domain = NULL;
443 :
444 1 : return accel_submit_task(accel_ch, accel_task);
445 : }
446 :
447 : /* Accel framework public API for compare function */
448 :
449 : int
450 2 : spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
451 : void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
452 : void *cb_arg)
453 : {
454 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
455 : struct spdk_accel_task *accel_task;
456 :
457 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
458 2 : if (spdk_unlikely(accel_task == NULL)) {
459 1 : return -ENOMEM;
460 : }
461 :
462 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
463 :
464 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
465 1 : accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2];
466 1 : accel_task->s.iovs[0].iov_base = src1;
467 1 : accel_task->s.iovs[0].iov_len = nbytes;
468 1 : accel_task->s.iovcnt = 1;
469 1 : accel_task->s2.iovs[0].iov_base = src2;
470 1 : accel_task->s2.iovs[0].iov_len = nbytes;
471 1 : accel_task->s2.iovcnt = 1;
472 1 : accel_task->nbytes = nbytes;
473 1 : accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
474 1 : accel_task->src_domain = NULL;
475 1 : accel_task->dst_domain = NULL;
476 :
477 1 : return accel_submit_task(accel_ch, accel_task);
478 : }
479 :
480 : /* Accel framework public API for fill function */
481 : int
482 2 : spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
483 : uint8_t fill, uint64_t nbytes,
484 : spdk_accel_completion_cb cb_fn, void *cb_arg)
485 : {
486 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
487 : struct spdk_accel_task *accel_task;
488 :
489 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
490 2 : if (spdk_unlikely(accel_task == NULL)) {
491 1 : return -ENOMEM;
492 : }
493 :
494 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
495 :
496 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
497 1 : accel_task->d.iovs[0].iov_base = dst;
498 1 : accel_task->d.iovs[0].iov_len = nbytes;
499 1 : accel_task->d.iovcnt = 1;
500 1 : accel_task->nbytes = nbytes;
501 1 : memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
502 1 : accel_task->op_code = SPDK_ACCEL_OPC_FILL;
503 1 : accel_task->src_domain = NULL;
504 1 : accel_task->dst_domain = NULL;
505 :
506 1 : return accel_submit_task(accel_ch, accel_task);
507 : }
508 :
509 : /* Accel framework public API for CRC-32C function */
510 : int
511 2 : spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
512 : void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
513 : void *cb_arg)
514 : {
515 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
516 : struct spdk_accel_task *accel_task;
517 :
518 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
519 2 : if (spdk_unlikely(accel_task == NULL)) {
520 1 : return -ENOMEM;
521 : }
522 :
523 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
524 :
525 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
526 1 : accel_task->s.iovs[0].iov_base = src;
527 1 : accel_task->s.iovs[0].iov_len = nbytes;
528 1 : accel_task->s.iovcnt = 1;
529 1 : accel_task->nbytes = nbytes;
530 1 : accel_task->crc_dst = crc_dst;
531 1 : accel_task->seed = seed;
532 1 : accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
533 1 : accel_task->src_domain = NULL;
534 1 : accel_task->dst_domain = NULL;
535 :
536 1 : return accel_submit_task(accel_ch, accel_task);
537 : }
538 :
539 : /* Accel framework public API for chained CRC-32C function */
540 : int
541 1 : spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
542 : struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
543 : spdk_accel_completion_cb cb_fn, void *cb_arg)
544 : {
545 1 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
546 : struct spdk_accel_task *accel_task;
547 :
548 1 : if (iov == NULL) {
549 0 : SPDK_ERRLOG("iov should not be NULL");
550 0 : return -EINVAL;
551 : }
552 :
553 1 : if (!iov_cnt) {
554 0 : SPDK_ERRLOG("iovcnt should not be zero value\n");
555 0 : return -EINVAL;
556 : }
557 :
558 1 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
559 1 : if (spdk_unlikely(accel_task == NULL)) {
560 0 : SPDK_ERRLOG("no memory\n");
561 0 : assert(0);
562 : return -ENOMEM;
563 : }
564 :
565 1 : accel_task->s.iovs = iov;
566 1 : accel_task->s.iovcnt = iov_cnt;
567 1 : accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
568 1 : accel_task->crc_dst = crc_dst;
569 1 : accel_task->seed = seed;
570 1 : accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
571 1 : accel_task->src_domain = NULL;
572 1 : accel_task->dst_domain = NULL;
573 :
574 1 : return accel_submit_task(accel_ch, accel_task);
575 : }
576 :
577 : /* Accel framework public API for copy with CRC-32C function */
578 : int
579 2 : spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
580 : void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
581 : spdk_accel_completion_cb cb_fn, void *cb_arg)
582 : {
583 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
584 : struct spdk_accel_task *accel_task;
585 :
586 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
587 2 : if (spdk_unlikely(accel_task == NULL)) {
588 1 : return -ENOMEM;
589 : }
590 :
591 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
592 :
593 1 : accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
594 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
595 1 : accel_task->d.iovs[0].iov_base = dst;
596 1 : accel_task->d.iovs[0].iov_len = nbytes;
597 1 : accel_task->d.iovcnt = 1;
598 1 : accel_task->s.iovs[0].iov_base = src;
599 1 : accel_task->s.iovs[0].iov_len = nbytes;
600 1 : accel_task->s.iovcnt = 1;
601 1 : accel_task->nbytes = nbytes;
602 1 : accel_task->crc_dst = crc_dst;
603 1 : accel_task->seed = seed;
604 1 : accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
605 1 : accel_task->src_domain = NULL;
606 1 : accel_task->dst_domain = NULL;
607 :
608 1 : return accel_submit_task(accel_ch, accel_task);
609 : }
610 :
611 : /* Accel framework public API for chained copy + CRC-32C function */
612 : int
613 0 : spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
614 : struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
615 : uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg)
616 : {
617 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
618 : struct spdk_accel_task *accel_task;
619 : uint64_t nbytes;
620 :
621 0 : if (src_iovs == NULL) {
622 0 : SPDK_ERRLOG("iov should not be NULL");
623 0 : return -EINVAL;
624 : }
625 :
626 0 : if (!iov_cnt) {
627 0 : SPDK_ERRLOG("iovcnt should not be zero value\n");
628 0 : return -EINVAL;
629 : }
630 :
631 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
632 0 : if (spdk_unlikely(accel_task == NULL)) {
633 0 : SPDK_ERRLOG("no memory\n");
634 0 : assert(0);
635 : return -ENOMEM;
636 : }
637 :
638 0 : nbytes = accel_get_iovlen(src_iovs, iov_cnt);
639 :
640 0 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
641 :
642 0 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
643 0 : accel_task->d.iovs[0].iov_base = dst;
644 0 : accel_task->d.iovs[0].iov_len = nbytes;
645 0 : accel_task->d.iovcnt = 1;
646 0 : accel_task->s.iovs = src_iovs;
647 0 : accel_task->s.iovcnt = iov_cnt;
648 0 : accel_task->nbytes = nbytes;
649 0 : accel_task->crc_dst = crc_dst;
650 0 : accel_task->seed = seed;
651 0 : accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
652 0 : accel_task->src_domain = NULL;
653 0 : accel_task->dst_domain = NULL;
654 :
655 0 : return accel_submit_task(accel_ch, accel_task);
656 : }
657 :
658 : int
659 0 : spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
660 : struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size,
661 : spdk_accel_completion_cb cb_fn, void *cb_arg)
662 : {
663 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
664 : struct spdk_accel_task *accel_task;
665 :
666 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
667 0 : if (spdk_unlikely(accel_task == NULL)) {
668 0 : return -ENOMEM;
669 : }
670 :
671 0 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
672 :
673 0 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
674 0 : accel_task->d.iovs[0].iov_base = dst;
675 0 : accel_task->d.iovs[0].iov_len = nbytes;
676 0 : accel_task->d.iovcnt = 1;
677 0 : accel_task->output_size = output_size;
678 0 : accel_task->s.iovs = src_iovs;
679 0 : accel_task->s.iovcnt = src_iovcnt;
680 0 : accel_task->nbytes = nbytes;
681 0 : accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
682 0 : accel_task->src_domain = NULL;
683 0 : accel_task->dst_domain = NULL;
684 :
685 0 : return accel_submit_task(accel_ch, accel_task);
686 : }
687 :
688 : int
689 0 : spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
690 : size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
691 : uint32_t *output_size, spdk_accel_completion_cb cb_fn,
692 : void *cb_arg)
693 : {
694 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
695 : struct spdk_accel_task *accel_task;
696 :
697 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
698 0 : if (spdk_unlikely(accel_task == NULL)) {
699 0 : return -ENOMEM;
700 : }
701 :
702 0 : accel_task->output_size = output_size;
703 0 : accel_task->s.iovs = src_iovs;
704 0 : accel_task->s.iovcnt = src_iovcnt;
705 0 : accel_task->d.iovs = dst_iovs;
706 0 : accel_task->d.iovcnt = dst_iovcnt;
707 0 : accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
708 0 : accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
709 0 : accel_task->src_domain = NULL;
710 0 : accel_task->dst_domain = NULL;
711 :
712 0 : return accel_submit_task(accel_ch, accel_task);
713 : }
714 :
715 : int
716 0 : spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
717 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
718 : struct iovec *src_iovs, uint32_t src_iovcnt,
719 : uint64_t iv, uint32_t block_size,
720 : spdk_accel_completion_cb cb_fn, void *cb_arg)
721 : {
722 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
723 : struct spdk_accel_task *accel_task;
724 :
725 0 : if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
726 0 : return -EINVAL;
727 : }
728 :
729 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
730 0 : if (spdk_unlikely(accel_task == NULL)) {
731 0 : return -ENOMEM;
732 : }
733 :
734 0 : accel_task->crypto_key = key;
735 0 : accel_task->s.iovs = src_iovs;
736 0 : accel_task->s.iovcnt = src_iovcnt;
737 0 : accel_task->d.iovs = dst_iovs;
738 0 : accel_task->d.iovcnt = dst_iovcnt;
739 0 : accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
740 0 : accel_task->iv = iv;
741 0 : accel_task->block_size = block_size;
742 0 : accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
743 0 : accel_task->src_domain = NULL;
744 0 : accel_task->dst_domain = NULL;
745 :
746 0 : return accel_submit_task(accel_ch, accel_task);
747 : }
748 :
749 : int
750 0 : spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
751 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
752 : struct iovec *src_iovs, uint32_t src_iovcnt,
753 : uint64_t iv, uint32_t block_size,
754 : spdk_accel_completion_cb cb_fn, void *cb_arg)
755 : {
756 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
757 : struct spdk_accel_task *accel_task;
758 :
759 0 : if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
760 0 : return -EINVAL;
761 : }
762 :
763 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
764 0 : if (spdk_unlikely(accel_task == NULL)) {
765 0 : return -ENOMEM;
766 : }
767 :
768 0 : accel_task->crypto_key = key;
769 0 : accel_task->s.iovs = src_iovs;
770 0 : accel_task->s.iovcnt = src_iovcnt;
771 0 : accel_task->d.iovs = dst_iovs;
772 0 : accel_task->d.iovcnt = dst_iovcnt;
773 0 : accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
774 0 : accel_task->iv = iv;
775 0 : accel_task->block_size = block_size;
776 0 : accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
777 0 : accel_task->src_domain = NULL;
778 0 : accel_task->dst_domain = NULL;
779 :
780 0 : return accel_submit_task(accel_ch, accel_task);
781 : }
782 :
783 : int
784 2 : spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
785 : uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
786 : {
787 2 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
788 : struct spdk_accel_task *accel_task;
789 :
790 2 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
791 2 : if (spdk_unlikely(accel_task == NULL)) {
792 1 : return -ENOMEM;
793 : }
794 :
795 1 : ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
796 :
797 1 : accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
798 1 : accel_task->nsrcs.srcs = sources;
799 1 : accel_task->nsrcs.cnt = nsrcs;
800 1 : accel_task->d.iovs[0].iov_base = dst;
801 1 : accel_task->d.iovs[0].iov_len = nbytes;
802 1 : accel_task->d.iovcnt = 1;
803 1 : accel_task->nbytes = nbytes;
804 1 : accel_task->op_code = SPDK_ACCEL_OPC_XOR;
805 1 : accel_task->src_domain = NULL;
806 1 : accel_task->dst_domain = NULL;
807 :
808 1 : return accel_submit_task(accel_ch, accel_task);
809 : }
810 :
811 : int
812 0 : spdk_accel_submit_dif_verify(struct spdk_io_channel *ch,
813 : struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
814 : const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
815 : spdk_accel_completion_cb cb_fn, void *cb_arg)
816 : {
817 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
818 : struct spdk_accel_task *accel_task;
819 :
820 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
821 0 : if (accel_task == NULL) {
822 0 : return -ENOMEM;
823 : }
824 :
825 0 : accel_task->s.iovs = iovs;
826 0 : accel_task->s.iovcnt = iovcnt;
827 0 : accel_task->dif.ctx = ctx;
828 0 : accel_task->dif.err = err;
829 0 : accel_task->dif.num_blocks = num_blocks;
830 0 : accel_task->nbytes = num_blocks * ctx->block_size;
831 0 : accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
832 0 : accel_task->src_domain = NULL;
833 0 : accel_task->dst_domain = NULL;
834 :
835 0 : return accel_submit_task(accel_ch, accel_task);
836 : }
837 :
838 : int
839 0 : spdk_accel_submit_dif_generate(struct spdk_io_channel *ch,
840 : struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
841 : const struct spdk_dif_ctx *ctx,
842 : spdk_accel_completion_cb cb_fn, void *cb_arg)
843 : {
844 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
845 : struct spdk_accel_task *accel_task;
846 :
847 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
848 0 : if (accel_task == NULL) {
849 0 : return -ENOMEM;
850 : }
851 :
852 0 : accel_task->s.iovs = iovs;
853 0 : accel_task->s.iovcnt = iovcnt;
854 0 : accel_task->dif.ctx = ctx;
855 0 : accel_task->dif.num_blocks = num_blocks;
856 0 : accel_task->nbytes = num_blocks * ctx->block_size;
857 0 : accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
858 0 : accel_task->src_domain = NULL;
859 0 : accel_task->dst_domain = NULL;
860 :
861 0 : return accel_submit_task(accel_ch, accel_task);
862 : }
863 :
864 : int
865 0 : spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs,
866 : size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
867 : uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
868 : spdk_accel_completion_cb cb_fn, void *cb_arg)
869 : {
870 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
871 : struct spdk_accel_task *accel_task;
872 :
873 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
874 0 : if (accel_task == NULL) {
875 0 : return -ENOMEM;
876 : }
877 :
878 0 : accel_task->s.iovs = src_iovs;
879 0 : accel_task->s.iovcnt = src_iovcnt;
880 0 : accel_task->d.iovs = dst_iovs;
881 0 : accel_task->d.iovcnt = dst_iovcnt;
882 0 : accel_task->dif.ctx = ctx;
883 0 : accel_task->dif.num_blocks = num_blocks;
884 0 : accel_task->nbytes = num_blocks * ctx->block_size;
885 0 : accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
886 0 : accel_task->src_domain = NULL;
887 0 : accel_task->dst_domain = NULL;
888 :
889 0 : return accel_submit_task(accel_ch, accel_task);
890 : }
891 :
892 : int
893 0 : spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch,
894 : struct iovec *dst_iovs, size_t dst_iovcnt,
895 : struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks,
896 : const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
897 : spdk_accel_completion_cb cb_fn, void *cb_arg)
898 : {
899 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
900 : struct spdk_accel_task *accel_task;
901 :
902 0 : accel_task = _get_task(accel_ch, cb_fn, cb_arg);
903 0 : if (accel_task == NULL) {
904 0 : return -ENOMEM;
905 : }
906 :
907 0 : accel_task->s.iovs = src_iovs;
908 0 : accel_task->s.iovcnt = src_iovcnt;
909 0 : accel_task->d.iovs = dst_iovs;
910 0 : accel_task->d.iovcnt = dst_iovcnt;
911 0 : accel_task->dif.ctx = ctx;
912 0 : accel_task->dif.err = err;
913 0 : accel_task->dif.num_blocks = num_blocks;
914 0 : accel_task->nbytes = num_blocks * ctx->block_size;
915 0 : accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
916 0 : accel_task->src_domain = NULL;
917 0 : accel_task->dst_domain = NULL;
918 :
919 0 : return accel_submit_task(accel_ch, accel_task);
920 : }
921 :
922 : static inline struct accel_buffer *
923 33 : accel_get_buf(struct accel_io_channel *ch, uint64_t len)
924 : {
925 : struct accel_buffer *buf;
926 :
927 33 : buf = SLIST_FIRST(&ch->buf_pool);
928 33 : if (spdk_unlikely(buf == NULL)) {
929 0 : accel_update_stats(ch, retry.bufdesc, 1);
930 0 : return NULL;
931 : }
932 :
933 33 : SLIST_REMOVE_HEAD(&ch->buf_pool, link);
934 33 : buf->len = len;
935 33 : buf->buf = NULL;
936 33 : buf->seq = NULL;
937 33 : buf->cb_fn = NULL;
938 :
939 33 : return buf;
940 : }
941 :
942 : static inline void
943 33 : accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
944 : {
945 33 : if (buf->buf != NULL) {
946 29 : spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
947 : }
948 :
949 33 : SLIST_INSERT_HEAD(&ch->buf_pool, buf, link);
950 33 : }
951 :
952 : static inline struct spdk_accel_sequence *
953 60 : accel_sequence_get(struct accel_io_channel *ch)
954 : {
955 : struct spdk_accel_sequence *seq;
956 :
957 60 : seq = SLIST_FIRST(&ch->seq_pool);
958 60 : if (spdk_unlikely(seq == NULL)) {
959 3 : accel_update_stats(ch, retry.sequence, 1);
960 3 : return NULL;
961 : }
962 :
963 57 : SLIST_REMOVE_HEAD(&ch->seq_pool, link);
964 :
965 57 : TAILQ_INIT(&seq->tasks);
966 57 : SLIST_INIT(&seq->bounce_bufs);
967 :
968 57 : seq->ch = ch;
969 57 : seq->status = 0;
970 57 : seq->state = ACCEL_SEQUENCE_STATE_INIT;
971 57 : seq->in_process_sequence = false;
972 :
973 57 : return seq;
974 : }
975 :
976 : static inline void
977 57 : accel_sequence_put(struct spdk_accel_sequence *seq)
978 : {
979 57 : struct accel_io_channel *ch = seq->ch;
980 : struct accel_buffer *buf;
981 :
982 76 : while (!SLIST_EMPTY(&seq->bounce_bufs)) {
983 19 : buf = SLIST_FIRST(&seq->bounce_bufs);
984 19 : SLIST_REMOVE_HEAD(&seq->bounce_bufs, link);
985 19 : accel_put_buf(seq->ch, buf);
986 : }
987 :
988 57 : assert(TAILQ_EMPTY(&seq->tasks));
989 57 : seq->ch = NULL;
990 :
991 57 : SLIST_INSERT_HEAD(&ch->seq_pool, seq, link);
992 57 : }
993 :
994 : static void accel_sequence_task_cb(void *cb_arg, int status);
995 :
996 : static inline struct spdk_accel_task *
997 134 : accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
998 : spdk_accel_step_cb cb_fn, void *cb_arg)
999 : {
1000 : struct spdk_accel_task *task;
1001 :
1002 134 : task = _get_task(ch, NULL, NULL);
1003 134 : if (spdk_unlikely(task == NULL)) {
1004 3 : return task;
1005 : }
1006 :
1007 131 : task->step_cb_fn = cb_fn;
1008 131 : task->cb_arg = cb_arg;
1009 131 : task->seq = seq;
1010 :
1011 131 : return task;
1012 : }
1013 :
1014 : int
1015 35 : spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1016 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
1017 : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1018 : struct iovec *src_iovs, uint32_t src_iovcnt,
1019 : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1020 : spdk_accel_step_cb cb_fn, void *cb_arg)
1021 : {
1022 35 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1023 : struct spdk_accel_task *task;
1024 35 : struct spdk_accel_sequence *seq = *pseq;
1025 :
1026 35 : if (seq == NULL) {
1027 12 : seq = accel_sequence_get(accel_ch);
1028 12 : if (spdk_unlikely(seq == NULL)) {
1029 1 : return -ENOMEM;
1030 : }
1031 : }
1032 :
1033 34 : assert(seq->ch == accel_ch);
1034 34 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1035 34 : if (spdk_unlikely(task == NULL)) {
1036 1 : if (*pseq == NULL) {
1037 1 : accel_sequence_put(seq);
1038 : }
1039 :
1040 1 : return -ENOMEM;
1041 : }
1042 :
1043 33 : task->dst_domain = dst_domain;
1044 33 : task->dst_domain_ctx = dst_domain_ctx;
1045 33 : task->d.iovs = dst_iovs;
1046 33 : task->d.iovcnt = dst_iovcnt;
1047 33 : task->src_domain = src_domain;
1048 33 : task->src_domain_ctx = src_domain_ctx;
1049 33 : task->s.iovs = src_iovs;
1050 33 : task->s.iovcnt = src_iovcnt;
1051 33 : task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1052 33 : task->op_code = SPDK_ACCEL_OPC_COPY;
1053 :
1054 33 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1055 33 : *pseq = seq;
1056 :
1057 33 : return 0;
1058 : }
1059 :
1060 : int
1061 39 : spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1062 : void *buf, uint64_t len,
1063 : struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
1064 : spdk_accel_step_cb cb_fn, void *cb_arg)
1065 : {
1066 39 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1067 : struct spdk_accel_task *task;
1068 39 : struct spdk_accel_sequence *seq = *pseq;
1069 :
1070 39 : if (seq == NULL) {
1071 23 : seq = accel_sequence_get(accel_ch);
1072 23 : if (spdk_unlikely(seq == NULL)) {
1073 1 : return -ENOMEM;
1074 : }
1075 : }
1076 :
1077 38 : assert(seq->ch == accel_ch);
1078 38 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1079 38 : if (spdk_unlikely(task == NULL)) {
1080 1 : if (*pseq == NULL) {
1081 1 : accel_sequence_put(seq);
1082 : }
1083 :
1084 1 : return -ENOMEM;
1085 : }
1086 :
1087 37 : memset(&task->fill_pattern, pattern, sizeof(uint64_t));
1088 :
1089 37 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1090 37 : if (spdk_unlikely(!task->aux)) {
1091 0 : SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");
1092 0 : if (*pseq == NULL) {
1093 0 : accel_sequence_put((seq));
1094 : }
1095 0 : STAILQ_INSERT_HEAD(&task->accel_ch->task_pool, task, link);
1096 0 : task->seq = NULL;
1097 0 : assert(0);
1098 : return -ENOMEM;
1099 : }
1100 37 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1101 37 : task->has_aux = true;
1102 :
1103 37 : task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
1104 37 : task->d.iovs[0].iov_base = buf;
1105 37 : task->d.iovs[0].iov_len = len;
1106 37 : task->d.iovcnt = 1;
1107 37 : task->nbytes = len;
1108 37 : task->src_domain = NULL;
1109 37 : task->dst_domain = domain;
1110 37 : task->dst_domain_ctx = domain_ctx;
1111 37 : task->op_code = SPDK_ACCEL_OPC_FILL;
1112 :
1113 37 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1114 37 : *pseq = seq;
1115 :
1116 37 : return 0;
1117 : }
1118 :
1119 : int
1120 40 : spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1121 : struct iovec *dst_iovs, size_t dst_iovcnt,
1122 : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1123 : struct iovec *src_iovs, size_t src_iovcnt,
1124 : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1125 : spdk_accel_step_cb cb_fn, void *cb_arg)
1126 : {
1127 40 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1128 : struct spdk_accel_task *task;
1129 40 : struct spdk_accel_sequence *seq = *pseq;
1130 :
1131 40 : if (seq == NULL) {
1132 17 : seq = accel_sequence_get(accel_ch);
1133 17 : if (spdk_unlikely(seq == NULL)) {
1134 1 : return -ENOMEM;
1135 : }
1136 : }
1137 :
1138 39 : assert(seq->ch == accel_ch);
1139 39 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1140 39 : if (spdk_unlikely(task == NULL)) {
1141 1 : if (*pseq == NULL) {
1142 1 : accel_sequence_put(seq);
1143 : }
1144 :
1145 1 : return -ENOMEM;
1146 : }
1147 :
1148 : /* TODO: support output_size for chaining */
1149 38 : task->output_size = NULL;
1150 38 : task->dst_domain = dst_domain;
1151 38 : task->dst_domain_ctx = dst_domain_ctx;
1152 38 : task->d.iovs = dst_iovs;
1153 38 : task->d.iovcnt = dst_iovcnt;
1154 38 : task->src_domain = src_domain;
1155 38 : task->src_domain_ctx = src_domain_ctx;
1156 38 : task->s.iovs = src_iovs;
1157 38 : task->s.iovcnt = src_iovcnt;
1158 38 : task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1159 38 : task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1160 :
1161 38 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1162 38 : *pseq = seq;
1163 :
1164 38 : return 0;
1165 : }
1166 :
1167 : int
1168 8 : spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1169 : struct spdk_accel_crypto_key *key,
1170 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
1171 : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1172 : struct iovec *src_iovs, uint32_t src_iovcnt,
1173 : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1174 : uint64_t iv, uint32_t block_size,
1175 : spdk_accel_step_cb cb_fn, void *cb_arg)
1176 : {
1177 8 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1178 : struct spdk_accel_task *task;
1179 8 : struct spdk_accel_sequence *seq = *pseq;
1180 :
1181 8 : assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1182 :
1183 8 : if (seq == NULL) {
1184 6 : seq = accel_sequence_get(accel_ch);
1185 6 : if (spdk_unlikely(seq == NULL)) {
1186 0 : return -ENOMEM;
1187 : }
1188 : }
1189 :
1190 8 : assert(seq->ch == accel_ch);
1191 8 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1192 8 : if (spdk_unlikely(task == NULL)) {
1193 0 : if (*pseq == NULL) {
1194 0 : accel_sequence_put(seq);
1195 : }
1196 :
1197 0 : return -ENOMEM;
1198 : }
1199 :
1200 8 : task->crypto_key = key;
1201 8 : task->src_domain = src_domain;
1202 8 : task->src_domain_ctx = src_domain_ctx;
1203 8 : task->s.iovs = src_iovs;
1204 8 : task->s.iovcnt = src_iovcnt;
1205 8 : task->dst_domain = dst_domain;
1206 8 : task->dst_domain_ctx = dst_domain_ctx;
1207 8 : task->d.iovs = dst_iovs;
1208 8 : task->d.iovcnt = dst_iovcnt;
1209 8 : task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1210 8 : task->iv = iv;
1211 8 : task->block_size = block_size;
1212 8 : task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1213 :
1214 8 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1215 8 : *pseq = seq;
1216 :
1217 8 : return 0;
1218 : }
1219 :
1220 : int
1221 9 : spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1222 : struct spdk_accel_crypto_key *key,
1223 : struct iovec *dst_iovs, uint32_t dst_iovcnt,
1224 : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1225 : struct iovec *src_iovs, uint32_t src_iovcnt,
1226 : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1227 : uint64_t iv, uint32_t block_size,
1228 : spdk_accel_step_cb cb_fn, void *cb_arg)
1229 : {
1230 9 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1231 : struct spdk_accel_task *task;
1232 9 : struct spdk_accel_sequence *seq = *pseq;
1233 :
1234 9 : assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1235 :
1236 9 : if (seq == NULL) {
1237 0 : seq = accel_sequence_get(accel_ch);
1238 0 : if (spdk_unlikely(seq == NULL)) {
1239 0 : return -ENOMEM;
1240 : }
1241 : }
1242 :
1243 9 : assert(seq->ch == accel_ch);
1244 9 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1245 9 : if (spdk_unlikely(task == NULL)) {
1246 0 : if (*pseq == NULL) {
1247 0 : accel_sequence_put(seq);
1248 : }
1249 :
1250 0 : return -ENOMEM;
1251 : }
1252 :
1253 9 : task->crypto_key = key;
1254 9 : task->src_domain = src_domain;
1255 9 : task->src_domain_ctx = src_domain_ctx;
1256 9 : task->s.iovs = src_iovs;
1257 9 : task->s.iovcnt = src_iovcnt;
1258 9 : task->dst_domain = dst_domain;
1259 9 : task->dst_domain_ctx = dst_domain_ctx;
1260 9 : task->d.iovs = dst_iovs;
1261 9 : task->d.iovcnt = dst_iovcnt;
1262 9 : task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1263 9 : task->iv = iv;
1264 9 : task->block_size = block_size;
1265 9 : task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1266 :
1267 9 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1268 9 : *pseq = seq;
1269 :
1270 9 : return 0;
1271 : }
1272 :
1273 : int
1274 6 : spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1275 : uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1276 : struct spdk_memory_domain *domain, void *domain_ctx,
1277 : uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1278 : {
1279 6 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1280 : struct spdk_accel_task *task;
1281 6 : struct spdk_accel_sequence *seq = *pseq;
1282 :
1283 6 : if (seq == NULL) {
1284 2 : seq = accel_sequence_get(accel_ch);
1285 2 : if (spdk_unlikely(seq == NULL)) {
1286 0 : return -ENOMEM;
1287 : }
1288 : }
1289 :
1290 6 : assert(seq->ch == accel_ch);
1291 6 : task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1292 6 : if (spdk_unlikely(task == NULL)) {
1293 0 : if (*pseq == NULL) {
1294 0 : accel_sequence_put(seq);
1295 : }
1296 :
1297 0 : return -ENOMEM;
1298 : }
1299 :
1300 6 : task->s.iovs = iovs;
1301 6 : task->s.iovcnt = iovcnt;
1302 6 : task->src_domain = domain;
1303 6 : task->src_domain_ctx = domain_ctx;
1304 6 : task->nbytes = accel_get_iovlen(iovs, iovcnt);
1305 6 : task->crc_dst = dst;
1306 6 : task->seed = seed;
1307 6 : task->op_code = SPDK_ACCEL_OPC_CRC32C;
1308 6 : task->dst_domain = NULL;
1309 :
1310 6 : TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1311 6 : *pseq = seq;
1312 :
1313 6 : return 0;
1314 : }
1315 :
1316 : int
1317 14 : spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1318 : struct spdk_memory_domain **domain, void **domain_ctx)
1319 : {
1320 14 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1321 : struct accel_buffer *accel_buf;
1322 :
1323 14 : accel_buf = accel_get_buf(accel_ch, len);
1324 14 : if (spdk_unlikely(accel_buf == NULL)) {
1325 0 : return -ENOMEM;
1326 : }
1327 :
1328 14 : accel_buf->ch = accel_ch;
1329 :
1330 : /* We always return the same pointer and identify the buffers through domain_ctx */
1331 14 : *buf = ACCEL_BUFFER_BASE;
1332 14 : *domain_ctx = accel_buf;
1333 14 : *domain = g_accel_domain;
1334 :
1335 14 : return 0;
1336 : }
1337 :
1338 : void
1339 14 : spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1340 : struct spdk_memory_domain *domain, void *domain_ctx)
1341 : {
1342 14 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1343 14 : struct accel_buffer *accel_buf = domain_ctx;
1344 :
1345 14 : assert(domain == g_accel_domain);
1346 14 : assert(buf == ACCEL_BUFFER_BASE);
1347 :
1348 14 : accel_put_buf(accel_ch, accel_buf);
1349 14 : }
1350 :
1351 : static void
1352 131 : accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1353 : {
1354 131 : struct accel_io_channel *ch = seq->ch;
1355 : spdk_accel_step_cb cb_fn;
1356 : void *cb_arg;
1357 :
1358 131 : TAILQ_REMOVE(&seq->tasks, task, seq_link);
1359 131 : cb_fn = task->step_cb_fn;
1360 131 : cb_arg = task->cb_arg;
1361 131 : task->seq = NULL;
1362 131 : if (task->has_aux) {
1363 58 : SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link);
1364 58 : task->aux = NULL;
1365 58 : task->has_aux = false;
1366 : }
1367 131 : STAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1368 131 : if (cb_fn != NULL) {
1369 131 : cb_fn(cb_arg);
1370 : }
1371 131 : }
1372 :
1373 : static void
1374 54 : accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1375 : {
1376 : struct spdk_accel_task *task;
1377 :
1378 72 : while (!TAILQ_EMPTY(&seq->tasks)) {
1379 18 : task = TAILQ_FIRST(&seq->tasks);
1380 18 : accel_sequence_complete_task(seq, task);
1381 : }
1382 54 : }
1383 :
1384 : static void
1385 52 : accel_sequence_complete(struct spdk_accel_sequence *seq)
1386 : {
1387 52 : SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1388 :
1389 52 : accel_update_stats(seq->ch, sequence_executed, 1);
1390 52 : if (spdk_unlikely(seq->status != 0)) {
1391 10 : accel_update_stats(seq->ch, sequence_failed, 1);
1392 : }
1393 :
1394 : /* First notify all users that appended operations to this sequence */
1395 52 : accel_sequence_complete_tasks(seq);
1396 :
1397 : /* Then notify the user that finished the sequence */
1398 52 : seq->cb_fn(seq->cb_arg, seq->status);
1399 :
1400 52 : accel_sequence_put(seq);
1401 52 : }
1402 :
1403 : static void
1404 28 : accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1405 : {
1406 : uintptr_t offset;
1407 :
1408 28 : offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1409 28 : assert(offset < accel_buf->len);
1410 :
1411 28 : diov->iov_base = (char *)accel_buf->buf + offset;
1412 28 : diov->iov_len = siov->iov_len;
1413 28 : }
1414 :
1415 : static void
1416 10 : accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1417 : {
1418 : struct spdk_accel_task *task;
1419 : struct iovec *iov;
1420 :
1421 : /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1422 : * in a sequence that were using it.
1423 : */
1424 38 : TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1425 28 : if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1426 11 : if (!task->has_aux) {
1427 11 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1428 11 : assert(task->aux && "Can't allocate aux data structure");
1429 11 : task->has_aux = true;
1430 11 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1431 : }
1432 :
1433 11 : iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1434 11 : assert(task->s.iovcnt == 1);
1435 11 : accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1436 11 : task->src_domain = NULL;
1437 11 : task->s.iovs = iov;
1438 : }
1439 28 : if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1440 17 : if (!task->has_aux) {
1441 2 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1442 2 : assert(task->aux && "Can't allocate aux data structure");
1443 2 : task->has_aux = true;
1444 2 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1445 : }
1446 :
1447 17 : iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1448 17 : assert(task->d.iovcnt == 1);
1449 17 : accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1450 17 : task->dst_domain = NULL;
1451 17 : task->d.iovs = iov;
1452 : }
1453 : }
1454 10 : }
1455 :
1456 : static void accel_process_sequence(struct spdk_accel_sequence *seq);
1457 :
1458 : static void
1459 3 : accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1460 : {
1461 : struct accel_buffer *accel_buf;
1462 :
1463 3 : accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1464 :
1465 3 : assert(accel_buf->seq != NULL);
1466 3 : assert(accel_buf->buf == NULL);
1467 3 : accel_buf->buf = buf;
1468 :
1469 3 : assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1470 3 : accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1471 3 : accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1472 3 : accel_process_sequence(accel_buf->seq);
1473 3 : }
1474 :
1475 : static bool
1476 29 : accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1477 : spdk_iobuf_get_cb cb_fn)
1478 : {
1479 29 : struct accel_io_channel *ch = seq->ch;
1480 :
1481 29 : assert(buf->seq == NULL);
1482 :
1483 29 : buf->seq = seq;
1484 :
1485 : /* Buffer might be already allocated by memory domain translation. */
1486 29 : if (buf->buf) {
1487 0 : return true;
1488 : }
1489 :
1490 29 : buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1491 29 : if (spdk_unlikely(buf->buf == NULL)) {
1492 5 : accel_update_stats(ch, retry.iobuf, 1);
1493 5 : return false;
1494 : }
1495 :
1496 24 : return true;
1497 : }
1498 :
1499 : static bool
1500 89 : accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1501 : {
1502 : /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1503 : * NULL */
1504 89 : if (task->src_domain == g_accel_domain) {
1505 0 : if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1506 : accel_iobuf_get_virtbuf_cb)) {
1507 0 : return false;
1508 : }
1509 :
1510 0 : accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1511 : }
1512 :
1513 89 : if (task->dst_domain == g_accel_domain) {
1514 10 : if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1515 : accel_iobuf_get_virtbuf_cb)) {
1516 3 : return false;
1517 : }
1518 :
1519 7 : accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1520 : }
1521 :
1522 86 : return true;
1523 : }
1524 :
1525 : static void
1526 0 : accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1527 : {
1528 : struct accel_buffer *accel_buf;
1529 :
1530 0 : accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1531 :
1532 0 : assert(accel_buf->seq != NULL);
1533 0 : assert(accel_buf->buf == NULL);
1534 0 : accel_buf->buf = buf;
1535 :
1536 0 : accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1537 0 : accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1538 0 : }
1539 :
1540 : bool
1541 0 : spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1542 : struct spdk_memory_domain *domain, void *domain_ctx,
1543 : spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1544 : {
1545 0 : struct accel_buffer *accel_buf = domain_ctx;
1546 :
1547 0 : assert(domain == g_accel_domain);
1548 0 : accel_buf->cb_fn = cb_fn;
1549 0 : accel_buf->cb_ctx = cb_ctx;
1550 :
1551 0 : if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1552 0 : return false;
1553 : }
1554 :
1555 0 : accel_sequence_set_virtbuf(seq, accel_buf);
1556 :
1557 0 : return true;
1558 : }
1559 :
1560 : struct spdk_accel_task *
1561 24 : spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1562 : {
1563 24 : return TAILQ_FIRST(&seq->tasks);
1564 : }
1565 :
1566 : struct spdk_accel_task *
1567 0 : spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1568 : {
1569 0 : return TAILQ_NEXT(task, seq_link);
1570 : }
1571 :
1572 : static inline void
1573 19 : accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1574 : uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1575 : struct accel_buffer *buf)
1576 : {
1577 19 : bounce->orig_iovs = *iovs;
1578 19 : bounce->orig_iovcnt = *iovcnt;
1579 19 : bounce->orig_domain = *domain;
1580 19 : bounce->orig_domain_ctx = *domain_ctx;
1581 19 : bounce->iov.iov_base = buf->buf;
1582 19 : bounce->iov.iov_len = buf->len;
1583 :
1584 19 : *iovs = &bounce->iov;
1585 19 : *iovcnt = 1;
1586 19 : *domain = NULL;
1587 19 : }
1588 :
1589 : static void
1590 1 : accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1591 : {
1592 : struct spdk_accel_task *task;
1593 : struct accel_buffer *accel_buf;
1594 :
1595 1 : accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1596 1 : assert(accel_buf->buf == NULL);
1597 1 : accel_buf->buf = buf;
1598 :
1599 1 : task = TAILQ_FIRST(&accel_buf->seq->tasks);
1600 1 : assert(task != NULL);
1601 :
1602 1 : assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1603 1 : accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1604 1 : assert(task->aux);
1605 1 : assert(task->has_aux);
1606 1 : accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1607 : &task->src_domain_ctx, accel_buf);
1608 1 : accel_process_sequence(accel_buf->seq);
1609 1 : }
1610 :
1611 : static void
1612 1 : accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1613 : {
1614 : struct spdk_accel_task *task;
1615 : struct accel_buffer *accel_buf;
1616 :
1617 1 : accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1618 1 : assert(accel_buf->buf == NULL);
1619 1 : accel_buf->buf = buf;
1620 :
1621 1 : task = TAILQ_FIRST(&accel_buf->seq->tasks);
1622 1 : assert(task != NULL);
1623 :
1624 1 : assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1625 1 : accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1626 1 : assert(task->aux);
1627 1 : assert(task->has_aux);
1628 1 : accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1629 : &task->dst_domain_ctx, accel_buf);
1630 1 : accel_process_sequence(accel_buf->seq);
1631 1 : }
1632 :
1633 : static int
1634 73 : accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1635 : {
1636 : struct accel_buffer *buf;
1637 :
1638 73 : if (task->src_domain != NULL) {
1639 : /* By the time we're here, accel buffers should have been allocated */
1640 9 : assert(task->src_domain != g_accel_domain);
1641 :
1642 9 : if (!task->has_aux) {
1643 8 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1644 8 : if (spdk_unlikely(!task->aux)) {
1645 0 : SPDK_ERRLOG("Can't allocate aux data structure\n");
1646 0 : assert(0);
1647 : return -EAGAIN;
1648 : }
1649 8 : task->has_aux = true;
1650 8 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1651 : }
1652 9 : buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1653 9 : if (buf == NULL) {
1654 0 : SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1655 0 : return -ENOMEM;
1656 : }
1657 :
1658 9 : SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1659 9 : if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1660 1 : return -EAGAIN;
1661 : }
1662 :
1663 8 : accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt,
1664 : &task->src_domain, &task->src_domain_ctx, buf);
1665 : }
1666 :
1667 72 : if (task->dst_domain != NULL) {
1668 : /* By the time we're here, accel buffers should have been allocated */
1669 10 : assert(task->dst_domain != g_accel_domain);
1670 :
1671 10 : if (!task->has_aux) {
1672 0 : task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1673 0 : if (spdk_unlikely(!task->aux)) {
1674 0 : SPDK_ERRLOG("Can't allocate aux data structure\n");
1675 0 : assert(0);
1676 : return -EAGAIN;
1677 : }
1678 0 : task->has_aux = true;
1679 0 : SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1680 : }
1681 10 : buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1682 10 : if (buf == NULL) {
1683 : /* The src buffer will be released when a sequence is completed */
1684 0 : SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1685 0 : return -ENOMEM;
1686 : }
1687 :
1688 10 : SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1689 10 : if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1690 1 : return -EAGAIN;
1691 : }
1692 :
1693 9 : accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt,
1694 : &task->dst_domain, &task->dst_domain_ctx, buf);
1695 : }
1696 :
1697 71 : return 0;
1698 : }
1699 :
1700 : static void
1701 8 : accel_task_pull_data_cb(void *ctx, int status)
1702 : {
1703 8 : struct spdk_accel_sequence *seq = ctx;
1704 :
1705 8 : assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1706 8 : if (spdk_likely(status == 0)) {
1707 7 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1708 : } else {
1709 1 : accel_sequence_set_fail(seq, status);
1710 : }
1711 :
1712 8 : accel_process_sequence(seq);
1713 8 : }
1714 :
1715 : static void
1716 9 : accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1717 : {
1718 : int rc;
1719 :
1720 9 : assert(task->has_aux);
1721 9 : assert(task->aux);
1722 9 : assert(task->aux->bounce.s.orig_iovs != NULL);
1723 9 : assert(task->aux->bounce.s.orig_domain != NULL);
1724 9 : assert(task->aux->bounce.s.orig_domain != g_accel_domain);
1725 9 : assert(!g_modules_opc[task->op_code].supports_memory_domains);
1726 :
1727 9 : rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain,
1728 9 : task->aux->bounce.s.orig_domain_ctx,
1729 9 : task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt,
1730 : task->s.iovs, task->s.iovcnt,
1731 : accel_task_pull_data_cb, seq);
1732 9 : if (spdk_unlikely(rc != 0)) {
1733 1 : SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1734 : spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1735 1 : accel_sequence_set_fail(seq, rc);
1736 : }
1737 9 : }
1738 :
1739 : static void
1740 7 : accel_task_push_data_cb(void *ctx, int status)
1741 : {
1742 7 : struct spdk_accel_sequence *seq = ctx;
1743 :
1744 7 : assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1745 7 : if (spdk_likely(status == 0)) {
1746 6 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1747 : } else {
1748 1 : accel_sequence_set_fail(seq, status);
1749 : }
1750 :
1751 7 : accel_process_sequence(seq);
1752 7 : }
1753 :
1754 : static void
1755 8 : accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1756 : {
1757 : int rc;
1758 :
1759 8 : assert(task->has_aux);
1760 8 : assert(task->aux);
1761 8 : assert(task->aux->bounce.d.orig_iovs != NULL);
1762 8 : assert(task->aux->bounce.d.orig_domain != NULL);
1763 8 : assert(task->aux->bounce.d.orig_domain != g_accel_domain);
1764 8 : assert(!g_modules_opc[task->op_code].supports_memory_domains);
1765 :
1766 8 : rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain,
1767 8 : task->aux->bounce.d.orig_domain_ctx,
1768 8 : task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt,
1769 : task->d.iovs, task->d.iovcnt,
1770 : accel_task_push_data_cb, seq);
1771 8 : if (spdk_unlikely(rc != 0)) {
1772 1 : SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1773 : spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1774 1 : accel_sequence_set_fail(seq, rc);
1775 : }
1776 8 : }
1777 :
1778 : static void
1779 166 : accel_process_sequence(struct spdk_accel_sequence *seq)
1780 : {
1781 166 : struct accel_io_channel *accel_ch = seq->ch;
1782 : struct spdk_accel_task *task;
1783 : enum accel_sequence_state state;
1784 : int rc;
1785 :
1786 : /* Prevent recursive calls to this function */
1787 166 : if (spdk_unlikely(seq->in_process_sequence)) {
1788 74 : return;
1789 : }
1790 92 : seq->in_process_sequence = true;
1791 :
1792 92 : task = TAILQ_FIRST(&seq->tasks);
1793 : do {
1794 375 : state = seq->state;
1795 375 : switch (state) {
1796 91 : case ACCEL_SEQUENCE_STATE_INIT:
1797 91 : if (g_accel_driver != NULL) {
1798 13 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
1799 13 : break;
1800 : }
1801 : /* Fall through */
1802 : case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1803 89 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1804 89 : if (!accel_sequence_check_virtbuf(seq, task)) {
1805 : /* We couldn't allocate a buffer, wait until one is available */
1806 3 : break;
1807 : }
1808 86 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1809 : /* Fall through */
1810 88 : case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1811 : /* If a module supports memory domains, we don't need to allocate bounce
1812 : * buffers */
1813 88 : if (g_modules_opc[task->op_code].supports_memory_domains) {
1814 15 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1815 15 : break;
1816 : }
1817 73 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1818 73 : rc = accel_sequence_check_bouncebuf(seq, task);
1819 73 : if (spdk_unlikely(rc != 0)) {
1820 : /* We couldn't allocate a buffer, wait until one is available */
1821 2 : if (rc == -EAGAIN) {
1822 2 : break;
1823 : }
1824 0 : accel_sequence_set_fail(seq, rc);
1825 0 : break;
1826 : }
1827 71 : if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) {
1828 9 : assert(task->aux->bounce.s.orig_iovs);
1829 9 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1830 9 : break;
1831 : }
1832 62 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1833 : /* Fall through */
1834 84 : case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1835 84 : SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1836 : g_opcode_strings[task->op_code], seq);
1837 :
1838 84 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1839 84 : rc = accel_submit_task(accel_ch, task);
1840 84 : if (spdk_unlikely(rc != 0)) {
1841 2 : SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1842 : g_opcode_strings[task->op_code], seq);
1843 2 : accel_sequence_set_fail(seq, rc);
1844 : }
1845 84 : break;
1846 9 : case ACCEL_SEQUENCE_STATE_PULL_DATA:
1847 9 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1848 9 : accel_task_pull_data(seq, task);
1849 9 : break;
1850 80 : case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1851 80 : if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) {
1852 8 : assert(task->aux->bounce.d.orig_iovs);
1853 8 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1854 8 : break;
1855 : }
1856 72 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1857 72 : break;
1858 8 : case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1859 8 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1860 8 : accel_task_push_data(seq, task);
1861 8 : break;
1862 78 : case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1863 78 : accel_sequence_complete_task(seq, task);
1864 : /* Check if there are any remaining tasks */
1865 78 : task = TAILQ_FIRST(&seq->tasks);
1866 78 : if (task == NULL) {
1867 : /* Immediately return here to make sure we don't touch the sequence
1868 : * after it's completed */
1869 39 : accel_sequence_complete(seq);
1870 39 : return;
1871 : }
1872 39 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1873 39 : break;
1874 13 : case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
1875 13 : assert(!TAILQ_EMPTY(&seq->tasks));
1876 :
1877 13 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1878 13 : rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
1879 13 : if (spdk_unlikely(rc != 0)) {
1880 1 : SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1881 : seq, g_accel_driver->name);
1882 1 : accel_sequence_set_fail(seq, rc);
1883 : }
1884 13 : break;
1885 11 : case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
1886 : /* Get the task again, as the driver might have completed some tasks
1887 : * synchronously */
1888 11 : task = TAILQ_FIRST(&seq->tasks);
1889 11 : if (task == NULL) {
1890 : /* Immediately return here to make sure we don't touch the sequence
1891 : * after it's completed */
1892 3 : accel_sequence_complete(seq);
1893 3 : return;
1894 : }
1895 : /* We don't want to execute the next task through the driver, so we
1896 : * explicitly omit the INIT state here */
1897 8 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1898 8 : break;
1899 10 : case ACCEL_SEQUENCE_STATE_ERROR:
1900 : /* Immediately return here to make sure we don't touch the sequence
1901 : * after it's completed */
1902 10 : assert(seq->status != 0);
1903 10 : accel_sequence_complete(seq);
1904 10 : return;
1905 40 : case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1906 : case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1907 : case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1908 : case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1909 : case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1910 : case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1911 40 : break;
1912 0 : default:
1913 0 : assert(0 && "bad state");
1914 : break;
1915 : }
1916 323 : } while (seq->state != state);
1917 :
1918 40 : seq->in_process_sequence = false;
1919 : }
1920 :
1921 : static void
1922 94 : accel_sequence_task_cb(void *cb_arg, int status)
1923 : {
1924 94 : struct spdk_accel_sequence *seq = cb_arg;
1925 94 : struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1926 :
1927 94 : switch (seq->state) {
1928 82 : case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1929 82 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1930 82 : if (spdk_unlikely(status != 0)) {
1931 2 : SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1932 : g_opcode_strings[task->op_code], seq);
1933 2 : accel_sequence_set_fail(seq, status);
1934 : }
1935 :
1936 82 : accel_process_sequence(seq);
1937 82 : break;
1938 12 : case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1939 12 : assert(g_accel_driver != NULL);
1940 : /* Immediately remove the task from the outstanding list to make sure the next call
1941 : * to spdk_accel_sequence_first_task() doesn't return it */
1942 12 : accel_sequence_complete_task(seq, task);
1943 12 : if (spdk_unlikely(status != 0)) {
1944 1 : SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1945 : "driver: %s\n", g_opcode_strings[task->op_code], seq,
1946 : g_accel_driver->name);
1947 : /* Update status without using accel_sequence_set_fail() to avoid changing
1948 : * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1949 1 : seq->status = status;
1950 : }
1951 12 : break;
1952 0 : default:
1953 0 : assert(0 && "bad state");
1954 : break;
1955 : }
1956 94 : }
1957 :
1958 : void
1959 12 : spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1960 : {
1961 12 : assert(g_accel_driver != NULL);
1962 12 : assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1963 :
1964 12 : if (spdk_likely(seq->status == 0)) {
1965 11 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
1966 : } else {
1967 1 : accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1968 : }
1969 :
1970 12 : accel_process_sequence(seq);
1971 12 : }
1972 :
1973 : static bool
1974 28 : accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1975 : {
1976 : /* For now, just do a dumb check that the iovecs arrays are exactly the same */
1977 28 : if (iovacnt != iovbcnt) {
1978 0 : return false;
1979 : }
1980 :
1981 28 : return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1982 : }
1983 :
1984 : static bool
1985 22 : accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
1986 : {
1987 : struct spdk_accel_task *prev;
1988 :
1989 22 : switch (task->op_code) {
1990 18 : case SPDK_ACCEL_OPC_DECOMPRESS:
1991 : case SPDK_ACCEL_OPC_FILL:
1992 : case SPDK_ACCEL_OPC_ENCRYPT:
1993 : case SPDK_ACCEL_OPC_DECRYPT:
1994 18 : if (task->dst_domain != next->src_domain) {
1995 0 : return false;
1996 : }
1997 18 : if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1998 : next->s.iovs, next->s.iovcnt)) {
1999 1 : return false;
2000 : }
2001 17 : task->d.iovs = next->d.iovs;
2002 17 : task->d.iovcnt = next->d.iovcnt;
2003 17 : task->dst_domain = next->dst_domain;
2004 17 : task->dst_domain_ctx = next->dst_domain_ctx;
2005 17 : break;
2006 4 : case SPDK_ACCEL_OPC_CRC32C:
2007 : /* crc32 is special, because it doesn't have a dst buffer */
2008 4 : if (task->src_domain != next->src_domain) {
2009 0 : return false;
2010 : }
2011 4 : if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
2012 : next->s.iovs, next->s.iovcnt)) {
2013 1 : return false;
2014 : }
2015 : /* We can only change crc32's buffer if we can change previous task's buffer */
2016 3 : prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
2017 3 : if (prev == NULL) {
2018 1 : return false;
2019 : }
2020 2 : if (!accel_task_set_dstbuf(prev, next)) {
2021 0 : return false;
2022 : }
2023 2 : task->s.iovs = next->d.iovs;
2024 2 : task->s.iovcnt = next->d.iovcnt;
2025 2 : task->src_domain = next->dst_domain;
2026 2 : task->src_domain_ctx = next->dst_domain_ctx;
2027 2 : break;
2028 0 : default:
2029 0 : return false;
2030 : }
2031 :
2032 19 : return true;
2033 : }
2034 :
2035 : static void
2036 72 : accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
2037 : struct spdk_accel_task **next_task)
2038 : {
2039 72 : struct spdk_accel_task *next = *next_task;
2040 :
2041 72 : switch (task->op_code) {
2042 8 : case SPDK_ACCEL_OPC_COPY:
2043 : /* We only allow changing src of operations that actually have a src, e.g. we never
2044 : * do it for fill. Theoretically, it is possible, but we'd have to be careful to
2045 : * change the src of the operation after fill (which in turn could also be a fill).
2046 : * So, for the sake of simplicity, skip this type of operations for now.
2047 : */
2048 8 : if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
2049 6 : next->op_code != SPDK_ACCEL_OPC_COPY &&
2050 5 : next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
2051 4 : next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
2052 2 : next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C) {
2053 2 : break;
2054 : }
2055 6 : if (task->dst_domain != next->src_domain) {
2056 0 : break;
2057 : }
2058 6 : if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2059 : next->s.iovs, next->s.iovcnt)) {
2060 0 : break;
2061 : }
2062 6 : next->s.iovs = task->s.iovs;
2063 6 : next->s.iovcnt = task->s.iovcnt;
2064 6 : next->src_domain = task->src_domain;
2065 6 : next->src_domain_ctx = task->src_domain_ctx;
2066 6 : accel_sequence_complete_task(seq, task);
2067 6 : break;
2068 64 : case SPDK_ACCEL_OPC_DECOMPRESS:
2069 : case SPDK_ACCEL_OPC_FILL:
2070 : case SPDK_ACCEL_OPC_ENCRYPT:
2071 : case SPDK_ACCEL_OPC_DECRYPT:
2072 : case SPDK_ACCEL_OPC_CRC32C:
2073 : /* We can only merge tasks when one of them is a copy */
2074 64 : if (next->op_code != SPDK_ACCEL_OPC_COPY) {
2075 44 : break;
2076 : }
2077 20 : if (!accel_task_set_dstbuf(task, next)) {
2078 3 : break;
2079 : }
2080 : /* We're removing next_task from the tasks queue, so we need to update its pointer,
2081 : * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
2082 17 : *next_task = TAILQ_NEXT(next, seq_link);
2083 17 : accel_sequence_complete_task(seq, next);
2084 17 : break;
2085 0 : default:
2086 0 : assert(0 && "bad opcode");
2087 : break;
2088 : }
2089 72 : }
2090 :
2091 : void
2092 52 : spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
2093 : spdk_accel_completion_cb cb_fn, void *cb_arg)
2094 : {
2095 52 : struct spdk_accel_task *task, *next;
2096 :
2097 : /* Try to remove any copy operations if possible */
2098 124 : TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
2099 109 : if (next == NULL) {
2100 37 : break;
2101 : }
2102 72 : accel_sequence_merge_tasks(seq, task, &next);
2103 : }
2104 :
2105 52 : seq->cb_fn = cb_fn;
2106 52 : seq->cb_arg = cb_arg;
2107 :
2108 52 : accel_process_sequence(seq);
2109 52 : }
2110 :
2111 : void
2112 0 : spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
2113 : {
2114 0 : struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
2115 : struct spdk_accel_task *task;
2116 :
2117 0 : TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
2118 :
2119 0 : while (!TAILQ_EMPTY(&tasks)) {
2120 0 : task = TAILQ_FIRST(&tasks);
2121 0 : TAILQ_REMOVE(&tasks, task, seq_link);
2122 0 : TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
2123 : }
2124 0 : }
2125 :
2126 : void
2127 3 : spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
2128 : {
2129 3 : if (seq == NULL) {
2130 1 : return;
2131 : }
2132 :
2133 2 : accel_sequence_complete_tasks(seq);
2134 2 : accel_sequence_put(seq);
2135 : }
2136 :
2137 : struct spdk_memory_domain *
2138 0 : spdk_accel_get_memory_domain(void)
2139 : {
2140 0 : return g_accel_domain;
2141 : }
2142 :
2143 : static struct spdk_accel_module_if *
2144 7 : _module_find_by_name(const char *name)
2145 : {
2146 7 : struct spdk_accel_module_if *accel_module = NULL;
2147 :
2148 16 : TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2149 10 : if (strcmp(name, accel_module->name) == 0) {
2150 1 : break;
2151 : }
2152 : }
2153 :
2154 7 : return accel_module;
2155 : }
2156 :
2157 : static inline struct spdk_accel_crypto_key *
2158 0 : _accel_crypto_key_get(const char *name)
2159 : {
2160 : struct spdk_accel_crypto_key *key;
2161 :
2162 0 : assert(spdk_spin_held(&g_keyring_spin));
2163 :
2164 0 : TAILQ_FOREACH(key, &g_keyring, link) {
2165 0 : if (strcmp(name, key->param.key_name) == 0) {
2166 0 : return key;
2167 : }
2168 : }
2169 :
2170 0 : return NULL;
2171 : }
2172 :
2173 : static void
2174 0 : accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
2175 : {
2176 0 : if (key->param.hex_key) {
2177 0 : spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
2178 0 : free(key->param.hex_key);
2179 : }
2180 0 : if (key->param.hex_key2) {
2181 0 : spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2182 0 : free(key->param.hex_key2);
2183 : }
2184 0 : free(key->param.tweak_mode);
2185 0 : free(key->param.key_name);
2186 0 : free(key->param.cipher);
2187 0 : if (key->key) {
2188 0 : spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2189 0 : free(key->key);
2190 : }
2191 0 : if (key->key2) {
2192 0 : spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2193 0 : free(key->key2);
2194 : }
2195 0 : free(key);
2196 0 : }
2197 :
2198 : static void
2199 0 : accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2200 : {
2201 0 : assert(key->module_if);
2202 0 : assert(key->module_if->crypto_key_deinit);
2203 :
2204 0 : key->module_if->crypto_key_deinit(key);
2205 0 : accel_crypto_key_free_mem(key);
2206 0 : }
2207 :
2208 : /*
2209 : * This function mitigates a timing side channel which could be caused by using strcmp()
2210 : * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2211 : * the article [1] for more details
2212 : * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2213 : */
2214 : static bool
2215 0 : accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2216 : {
2217 : size_t i;
2218 0 : volatile size_t x = k1_len ^ k2_len;
2219 :
2220 0 : for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2221 0 : x |= k1[i] ^ k2[i];
2222 : }
2223 :
2224 0 : return x == 0;
2225 : }
2226 :
2227 : static const char *g_tweak_modes[] = {
2228 : [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2229 : [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2230 : [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2231 : [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2232 : };
2233 :
2234 : static const char *g_ciphers[] = {
2235 : [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2236 : [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2237 : };
2238 :
2239 : int
2240 0 : spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2241 : {
2242 : struct spdk_accel_module_if *module;
2243 : struct spdk_accel_crypto_key *key;
2244 : size_t hex_key_size, hex_key2_size;
2245 0 : bool found = false;
2246 : size_t i;
2247 : int rc;
2248 :
2249 0 : if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2250 0 : return -EINVAL;
2251 : }
2252 :
2253 0 : if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2254 : /* hardly ever possible, but let's check and warn the user */
2255 0 : SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2256 : }
2257 0 : module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2258 :
2259 0 : if (!module) {
2260 0 : SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2261 0 : return -ENOENT;
2262 : }
2263 :
2264 0 : if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2265 0 : SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2266 0 : return -ENOTSUP;
2267 : }
2268 :
2269 0 : key = calloc(1, sizeof(*key));
2270 0 : if (!key) {
2271 0 : return -ENOMEM;
2272 : }
2273 :
2274 0 : key->param.key_name = strdup(param->key_name);
2275 0 : if (!key->param.key_name) {
2276 0 : rc = -ENOMEM;
2277 0 : goto error;
2278 : }
2279 :
2280 0 : for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2281 0 : assert(g_ciphers[i]);
2282 :
2283 0 : if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2284 0 : key->cipher = i;
2285 0 : found = true;
2286 0 : break;
2287 : }
2288 : }
2289 :
2290 0 : if (!found) {
2291 0 : SPDK_ERRLOG("Failed to parse cipher\n");
2292 0 : rc = -EINVAL;
2293 0 : goto error;
2294 : }
2295 :
2296 0 : key->param.cipher = strdup(param->cipher);
2297 0 : if (!key->param.cipher) {
2298 0 : rc = -ENOMEM;
2299 0 : goto error;
2300 : }
2301 :
2302 0 : hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2303 0 : if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2304 0 : SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2305 0 : rc = -EINVAL;
2306 0 : goto error;
2307 : }
2308 :
2309 0 : if (hex_key_size == 0) {
2310 0 : SPDK_ERRLOG("key1 size cannot be 0\n");
2311 0 : rc = -EINVAL;
2312 0 : goto error;
2313 : }
2314 :
2315 0 : key->param.hex_key = strdup(param->hex_key);
2316 0 : if (!key->param.hex_key) {
2317 0 : rc = -ENOMEM;
2318 0 : goto error;
2319 : }
2320 :
2321 0 : key->key_size = hex_key_size / 2;
2322 0 : key->key = spdk_unhexlify(key->param.hex_key);
2323 0 : if (!key->key) {
2324 0 : SPDK_ERRLOG("Failed to unhexlify key1\n");
2325 0 : rc = -EINVAL;
2326 0 : goto error;
2327 : }
2328 :
2329 0 : if (param->hex_key2) {
2330 0 : hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2331 0 : if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2332 0 : SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2333 0 : rc = -EINVAL;
2334 0 : goto error;
2335 : }
2336 :
2337 0 : if (hex_key2_size == 0) {
2338 0 : SPDK_ERRLOG("key2 size cannot be 0\n");
2339 0 : rc = -EINVAL;
2340 0 : goto error;
2341 : }
2342 :
2343 0 : key->param.hex_key2 = strdup(param->hex_key2);
2344 0 : if (!key->param.hex_key2) {
2345 0 : rc = -ENOMEM;
2346 0 : goto error;
2347 : }
2348 :
2349 0 : key->key2_size = hex_key2_size / 2;
2350 0 : key->key2 = spdk_unhexlify(key->param.hex_key2);
2351 0 : if (!key->key2) {
2352 0 : SPDK_ERRLOG("Failed to unhexlify key2\n");
2353 0 : rc = -EINVAL;
2354 0 : goto error;
2355 : }
2356 : }
2357 :
2358 0 : key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2359 0 : if (param->tweak_mode) {
2360 0 : found = false;
2361 :
2362 0 : key->param.tweak_mode = strdup(param->tweak_mode);
2363 0 : if (!key->param.tweak_mode) {
2364 0 : rc = -ENOMEM;
2365 0 : goto error;
2366 : }
2367 :
2368 0 : for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2369 0 : assert(g_tweak_modes[i]);
2370 :
2371 0 : if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2372 0 : key->tweak_mode = i;
2373 0 : found = true;
2374 0 : break;
2375 : }
2376 : }
2377 :
2378 0 : if (!found) {
2379 0 : SPDK_ERRLOG("Failed to parse tweak mode\n");
2380 0 : rc = -EINVAL;
2381 0 : goto error;
2382 : }
2383 : }
2384 :
2385 0 : if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2386 0 : (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2387 0 : SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2388 : g_tweak_modes[key->tweak_mode]);
2389 0 : rc = -EINVAL;
2390 0 : goto error;
2391 : }
2392 :
2393 0 : if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2394 0 : SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2395 : g_ciphers[key->cipher], key->key_size);
2396 0 : rc = -EINVAL;
2397 0 : goto error;
2398 : }
2399 :
2400 0 : if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2401 0 : if (!key->key2) {
2402 0 : SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2403 0 : rc = -EINVAL;
2404 0 : goto error;
2405 : }
2406 :
2407 0 : if (key->key_size != key->key2_size) {
2408 0 : SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2409 : key->key_size,
2410 : key->key2_size);
2411 0 : rc = -EINVAL;
2412 0 : goto error;
2413 : }
2414 :
2415 0 : if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2416 0 : SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2417 0 : rc = -EINVAL;
2418 0 : goto error;
2419 : }
2420 : }
2421 :
2422 0 : if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2423 0 : if (key->key2_size) {
2424 0 : SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2425 0 : rc = -EINVAL;
2426 0 : goto error;
2427 : }
2428 : }
2429 :
2430 0 : key->module_if = module;
2431 :
2432 0 : spdk_spin_lock(&g_keyring_spin);
2433 0 : if (_accel_crypto_key_get(param->key_name)) {
2434 0 : rc = -EEXIST;
2435 : } else {
2436 0 : rc = module->crypto_key_init(key);
2437 0 : if (rc) {
2438 0 : SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2439 : } else {
2440 0 : TAILQ_INSERT_TAIL(&g_keyring, key, link);
2441 : }
2442 : }
2443 0 : spdk_spin_unlock(&g_keyring_spin);
2444 :
2445 0 : if (rc) {
2446 0 : goto error;
2447 : }
2448 :
2449 0 : return 0;
2450 :
2451 0 : error:
2452 0 : accel_crypto_key_free_mem(key);
2453 0 : return rc;
2454 : }
2455 :
2456 : int
2457 0 : spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2458 : {
2459 0 : if (!key || !key->module_if) {
2460 0 : return -EINVAL;
2461 : }
2462 :
2463 0 : spdk_spin_lock(&g_keyring_spin);
2464 0 : if (!_accel_crypto_key_get(key->param.key_name)) {
2465 0 : spdk_spin_unlock(&g_keyring_spin);
2466 0 : return -ENOENT;
2467 : }
2468 0 : TAILQ_REMOVE(&g_keyring, key, link);
2469 0 : spdk_spin_unlock(&g_keyring_spin);
2470 :
2471 0 : accel_crypto_key_destroy_unsafe(key);
2472 :
2473 0 : return 0;
2474 : }
2475 :
2476 : struct spdk_accel_crypto_key *
2477 0 : spdk_accel_crypto_key_get(const char *name)
2478 : {
2479 : struct spdk_accel_crypto_key *key;
2480 :
2481 0 : spdk_spin_lock(&g_keyring_spin);
2482 0 : key = _accel_crypto_key_get(name);
2483 0 : spdk_spin_unlock(&g_keyring_spin);
2484 :
2485 0 : return key;
2486 : }
2487 :
2488 : /* Helper function when accel modules register with the framework. */
2489 : void
2490 5 : spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2491 : {
2492 : struct spdk_accel_module_if *tmp;
2493 :
2494 5 : if (_module_find_by_name(accel_module->name)) {
2495 0 : SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2496 0 : assert(false);
2497 : return;
2498 : }
2499 :
2500 8 : TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2501 5 : if (accel_module->priority < tmp->priority) {
2502 2 : break;
2503 : }
2504 : }
2505 :
2506 5 : if (tmp != NULL) {
2507 2 : TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2508 : } else {
2509 3 : TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2510 : }
2511 : }
2512 :
2513 : /* Framework level channel create callback. */
2514 : static int
2515 11 : accel_create_channel(void *io_device, void *ctx_buf)
2516 : {
2517 11 : struct accel_io_channel *accel_ch = ctx_buf;
2518 : struct spdk_accel_task *accel_task;
2519 : struct spdk_accel_task_aux_data *accel_task_aux;
2520 : struct spdk_accel_sequence *seq;
2521 : struct accel_buffer *buf;
2522 : size_t task_size_aligned;
2523 : uint8_t *task_mem;
2524 11 : uint32_t i = 0, j;
2525 : int rc;
2526 :
2527 11 : task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE);
2528 11 : accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2529 11 : g_opts.task_count * task_size_aligned);
2530 11 : if (!accel_ch->task_pool_base) {
2531 0 : return -ENOMEM;
2532 : }
2533 11 : memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned);
2534 :
2535 11 : accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2536 11 : g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2537 11 : if (accel_ch->seq_pool_base == NULL) {
2538 0 : goto err;
2539 : }
2540 11 : memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2541 :
2542 11 : accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data));
2543 11 : if (accel_ch->task_aux_data_base == NULL) {
2544 0 : goto err;
2545 : }
2546 :
2547 11 : accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2548 11 : if (accel_ch->buf_pool_base == NULL) {
2549 0 : goto err;
2550 : }
2551 :
2552 11 : STAILQ_INIT(&accel_ch->task_pool);
2553 11 : SLIST_INIT(&accel_ch->task_aux_data_pool);
2554 11 : SLIST_INIT(&accel_ch->seq_pool);
2555 11 : SLIST_INIT(&accel_ch->buf_pool);
2556 :
2557 11 : task_mem = accel_ch->task_pool_base;
2558 22539 : for (i = 0; i < g_opts.task_count; i++) {
2559 22528 : accel_task = (struct spdk_accel_task *)task_mem;
2560 22528 : accel_task->aux = NULL;
2561 22528 : STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2562 22528 : task_mem += task_size_aligned;
2563 22528 : accel_task_aux = &accel_ch->task_aux_data_base[i];
2564 22528 : SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link);
2565 : }
2566 22539 : for (i = 0; i < g_opts.sequence_count; i++) {
2567 22528 : seq = &accel_ch->seq_pool_base[i];
2568 22528 : SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link);
2569 : }
2570 22539 : for (i = 0; i < g_opts.buf_count; i++) {
2571 22528 : buf = &accel_ch->buf_pool_base[i];
2572 22528 : SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link);
2573 : }
2574 :
2575 : /* Assign modules and get IO channels for each */
2576 176 : for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2577 165 : accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2578 : /* This can happen if idxd runs out of channels. */
2579 165 : if (accel_ch->module_ch[i] == NULL) {
2580 0 : SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2581 0 : goto err;
2582 : }
2583 : }
2584 :
2585 11 : if (g_accel_driver != NULL) {
2586 0 : accel_ch->driver_channel = g_accel_driver->get_io_channel();
2587 0 : if (accel_ch->driver_channel == NULL) {
2588 0 : SPDK_ERRLOG("Failed to get driver's IO channel\n");
2589 0 : goto err;
2590 : }
2591 : }
2592 :
2593 11 : rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2594 : g_opts.large_cache_size);
2595 11 : if (rc != 0) {
2596 0 : SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2597 0 : goto err;
2598 : }
2599 :
2600 11 : return 0;
2601 0 : err:
2602 0 : if (accel_ch->driver_channel != NULL) {
2603 0 : spdk_put_io_channel(accel_ch->driver_channel);
2604 : }
2605 0 : for (j = 0; j < i; j++) {
2606 0 : spdk_put_io_channel(accel_ch->module_ch[j]);
2607 : }
2608 0 : free(accel_ch->task_pool_base);
2609 0 : free(accel_ch->task_aux_data_base);
2610 0 : free(accel_ch->seq_pool_base);
2611 0 : free(accel_ch->buf_pool_base);
2612 :
2613 0 : return -ENOMEM;
2614 : }
2615 :
2616 : static void
2617 11 : accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2618 : {
2619 : int i;
2620 :
2621 11 : total->sequence_executed += stats->sequence_executed;
2622 11 : total->sequence_failed += stats->sequence_failed;
2623 11 : total->retry.task += stats->retry.task;
2624 11 : total->retry.sequence += stats->retry.sequence;
2625 11 : total->retry.iobuf += stats->retry.iobuf;
2626 11 : total->retry.bufdesc += stats->retry.bufdesc;
2627 176 : for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
2628 165 : total->operations[i].executed += stats->operations[i].executed;
2629 165 : total->operations[i].failed += stats->operations[i].failed;
2630 165 : total->operations[i].num_bytes += stats->operations[i].num_bytes;
2631 : }
2632 11 : }
2633 :
2634 : /* Framework level channel destroy callback. */
2635 : static void
2636 11 : accel_destroy_channel(void *io_device, void *ctx_buf)
2637 : {
2638 11 : struct accel_io_channel *accel_ch = ctx_buf;
2639 : int i;
2640 :
2641 11 : spdk_iobuf_channel_fini(&accel_ch->iobuf);
2642 :
2643 11 : if (accel_ch->driver_channel != NULL) {
2644 0 : spdk_put_io_channel(accel_ch->driver_channel);
2645 : }
2646 :
2647 176 : for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2648 165 : assert(accel_ch->module_ch[i] != NULL);
2649 165 : spdk_put_io_channel(accel_ch->module_ch[i]);
2650 165 : accel_ch->module_ch[i] = NULL;
2651 : }
2652 :
2653 : /* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2654 11 : spdk_spin_lock(&g_stats_lock);
2655 11 : accel_add_stats(&g_stats, &accel_ch->stats);
2656 11 : spdk_spin_unlock(&g_stats_lock);
2657 :
2658 11 : free(accel_ch->task_pool_base);
2659 11 : free(accel_ch->task_aux_data_base);
2660 11 : free(accel_ch->seq_pool_base);
2661 11 : free(accel_ch->buf_pool_base);
2662 11 : }
2663 :
2664 : struct spdk_io_channel *
2665 11 : spdk_accel_get_io_channel(void)
2666 : {
2667 11 : return spdk_get_io_channel(&spdk_accel_module_list);
2668 : }
2669 :
2670 : static int
2671 2 : accel_module_initialize(void)
2672 : {
2673 : struct spdk_accel_module_if *accel_module, *tmp_module;
2674 2 : int rc = 0, module_rc;
2675 :
2676 7 : TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2677 5 : module_rc = accel_module->module_init();
2678 5 : if (module_rc) {
2679 0 : TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2680 0 : if (module_rc == -ENODEV) {
2681 0 : SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name);
2682 0 : } else if (!rc) {
2683 0 : SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2684 0 : rc = module_rc;
2685 : }
2686 0 : continue;
2687 : }
2688 :
2689 5 : SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2690 : }
2691 :
2692 2 : return rc;
2693 : }
2694 :
2695 : static void
2696 30 : accel_module_init_opcode(enum spdk_accel_opcode opcode)
2697 : {
2698 30 : struct accel_module *module = &g_modules_opc[opcode];
2699 30 : struct spdk_accel_module_if *module_if = module->module;
2700 :
2701 30 : if (module_if->get_memory_domains != NULL) {
2702 0 : module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2703 : }
2704 30 : }
2705 :
2706 : static int
2707 0 : accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
2708 : struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
2709 : void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
2710 : {
2711 0 : struct accel_buffer *buf = src_domain_ctx;
2712 :
2713 0 : SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len);
2714 :
2715 0 : assert(g_accel_domain == src_domain);
2716 0 : assert(spdk_memory_domain_get_system_domain() == dst_domain);
2717 0 : assert(buf->buf == NULL);
2718 0 : assert(addr == ACCEL_BUFFER_BASE);
2719 0 : assert(len == buf->len);
2720 :
2721 0 : buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL);
2722 0 : if (spdk_unlikely(buf->buf == NULL)) {
2723 0 : return -ENOMEM;
2724 : }
2725 :
2726 0 : result->iov_count = 1;
2727 0 : result->iov.iov_base = buf->buf;
2728 0 : result->iov.iov_len = buf->len;
2729 0 : SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base);
2730 0 : return 0;
2731 : }
2732 :
2733 : static void
2734 0 : accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx,
2735 : struct iovec *iov, uint32_t iovcnt)
2736 : {
2737 0 : struct accel_buffer *buf = domain_ctx;
2738 :
2739 0 : SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len);
2740 :
2741 0 : assert(g_accel_domain == domain);
2742 0 : assert(iovcnt == 1);
2743 0 : assert(buf->buf != NULL);
2744 0 : assert(iov[0].iov_base == buf->buf);
2745 0 : assert(iov[0].iov_len == buf->len);
2746 :
2747 0 : spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len);
2748 0 : buf->buf = NULL;
2749 0 : }
2750 :
2751 : int
2752 2 : spdk_accel_initialize(void)
2753 : {
2754 : enum spdk_accel_opcode op;
2755 2 : struct spdk_accel_module_if *accel_module = NULL;
2756 : int rc;
2757 :
2758 : /*
2759 : * We need a unique identifier for the accel framework, so use the
2760 : * spdk_accel_module_list address for this purpose.
2761 : */
2762 2 : spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2763 : sizeof(struct accel_io_channel), "accel");
2764 :
2765 2 : spdk_spin_init(&g_keyring_spin);
2766 2 : spdk_spin_init(&g_stats_lock);
2767 :
2768 2 : rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2769 : "SPDK_ACCEL_DMA_DEVICE");
2770 2 : if (rc != 0) {
2771 0 : SPDK_ERRLOG("Failed to create accel memory domain\n");
2772 0 : return rc;
2773 : }
2774 :
2775 2 : spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate);
2776 2 : spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate);
2777 :
2778 2 : g_modules_started = true;
2779 2 : rc = accel_module_initialize();
2780 2 : if (rc) {
2781 0 : return rc;
2782 : }
2783 :
2784 2 : if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
2785 0 : rc = g_accel_driver->init();
2786 0 : if (rc != 0) {
2787 0 : SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
2788 : spdk_strerror(-rc));
2789 0 : return rc;
2790 : }
2791 : }
2792 :
2793 : /* The module list is order by priority, with the highest priority modules being at the end
2794 : * of the list. The software module should be somewhere at the beginning of the list,
2795 : * before all HW modules.
2796 : * NOTE: all opcodes must be supported by software in the event that no HW modules are
2797 : * initialized to support the operation.
2798 : */
2799 7 : TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2800 80 : for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2801 75 : if (accel_module->supports_opcode(op)) {
2802 75 : g_modules_opc[op].module = accel_module;
2803 75 : SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2804 : }
2805 : }
2806 :
2807 5 : if (accel_module->get_ctx_size != NULL) {
2808 1 : g_max_accel_module_size = spdk_max(g_max_accel_module_size,
2809 : accel_module->get_ctx_size());
2810 : }
2811 : }
2812 :
2813 : /* Now lets check for overrides and apply all that exist */
2814 32 : for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2815 30 : if (g_modules_opc_override[op] != NULL) {
2816 0 : accel_module = _module_find_by_name(g_modules_opc_override[op]);
2817 0 : if (accel_module == NULL) {
2818 0 : SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2819 0 : return -EINVAL;
2820 : }
2821 0 : if (accel_module->supports_opcode(op) == false) {
2822 0 : SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2823 0 : return -EINVAL;
2824 : }
2825 0 : g_modules_opc[op].module = accel_module;
2826 : }
2827 : }
2828 :
2829 2 : if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2830 0 : SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2831 0 : return -EINVAL;
2832 : }
2833 :
2834 32 : for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2835 30 : assert(g_modules_opc[op].module != NULL);
2836 30 : accel_module_init_opcode(op);
2837 : }
2838 :
2839 2 : rc = spdk_iobuf_register_module("accel");
2840 2 : if (rc != 0) {
2841 0 : SPDK_ERRLOG("Failed to register accel iobuf module\n");
2842 0 : return rc;
2843 : }
2844 :
2845 2 : return 0;
2846 : }
2847 :
2848 : static void
2849 2 : accel_module_finish_cb(void)
2850 : {
2851 2 : spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2852 :
2853 2 : cb_fn(g_fini_cb_arg);
2854 2 : g_fini_cb_fn = NULL;
2855 2 : g_fini_cb_arg = NULL;
2856 2 : }
2857 :
2858 : static void
2859 0 : accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2860 : const char *module_str)
2861 : {
2862 0 : spdk_json_write_object_begin(w);
2863 0 : spdk_json_write_named_string(w, "method", "accel_assign_opc");
2864 0 : spdk_json_write_named_object_begin(w, "params");
2865 0 : spdk_json_write_named_string(w, "opname", opc_str);
2866 0 : spdk_json_write_named_string(w, "module", module_str);
2867 0 : spdk_json_write_object_end(w);
2868 0 : spdk_json_write_object_end(w);
2869 0 : }
2870 :
2871 : static void
2872 0 : __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2873 : {
2874 0 : spdk_json_write_named_string(w, "name", key->param.key_name);
2875 0 : spdk_json_write_named_string(w, "cipher", key->param.cipher);
2876 0 : spdk_json_write_named_string(w, "key", key->param.hex_key);
2877 0 : if (key->param.hex_key2) {
2878 0 : spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2879 : }
2880 :
2881 0 : if (key->param.tweak_mode) {
2882 0 : spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
2883 : }
2884 0 : }
2885 :
2886 : void
2887 0 : _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2888 : {
2889 0 : spdk_json_write_object_begin(w);
2890 0 : __accel_crypto_key_dump_param(w, key);
2891 0 : spdk_json_write_object_end(w);
2892 0 : }
2893 :
2894 : static void
2895 0 : _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2896 : struct spdk_accel_crypto_key *key)
2897 : {
2898 0 : spdk_json_write_object_begin(w);
2899 0 : spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2900 0 : spdk_json_write_named_object_begin(w, "params");
2901 0 : __accel_crypto_key_dump_param(w, key);
2902 0 : spdk_json_write_object_end(w);
2903 0 : spdk_json_write_object_end(w);
2904 0 : }
2905 :
2906 : static void
2907 0 : accel_write_options(struct spdk_json_write_ctx *w)
2908 : {
2909 0 : spdk_json_write_object_begin(w);
2910 0 : spdk_json_write_named_string(w, "method", "accel_set_options");
2911 0 : spdk_json_write_named_object_begin(w, "params");
2912 0 : spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2913 0 : spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2914 0 : spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2915 0 : spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2916 0 : spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2917 0 : spdk_json_write_object_end(w);
2918 0 : spdk_json_write_object_end(w);
2919 0 : }
2920 :
2921 : static void
2922 0 : _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2923 : {
2924 : struct spdk_accel_crypto_key *key;
2925 :
2926 0 : spdk_spin_lock(&g_keyring_spin);
2927 0 : TAILQ_FOREACH(key, &g_keyring, link) {
2928 0 : if (full_dump) {
2929 0 : _accel_crypto_key_write_config_json(w, key);
2930 : } else {
2931 0 : _accel_crypto_key_dump_param(w, key);
2932 : }
2933 : }
2934 0 : spdk_spin_unlock(&g_keyring_spin);
2935 0 : }
2936 :
2937 : void
2938 0 : _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2939 : {
2940 0 : _accel_crypto_keys_write_config_json(w, false);
2941 0 : }
2942 :
2943 : void
2944 0 : spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2945 : {
2946 : struct spdk_accel_module_if *accel_module;
2947 : int i;
2948 :
2949 0 : spdk_json_write_array_begin(w);
2950 0 : accel_write_options(w);
2951 :
2952 0 : TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2953 0 : if (accel_module->write_config_json) {
2954 0 : accel_module->write_config_json(w);
2955 : }
2956 : }
2957 0 : for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2958 0 : if (g_modules_opc_override[i]) {
2959 0 : accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2960 : }
2961 : }
2962 :
2963 0 : _accel_crypto_keys_write_config_json(w, true);
2964 :
2965 0 : spdk_json_write_array_end(w);
2966 0 : }
2967 :
2968 : void
2969 7 : spdk_accel_module_finish(void)
2970 : {
2971 7 : if (!g_accel_module) {
2972 2 : g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2973 : } else {
2974 5 : g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2975 : }
2976 :
2977 7 : if (!g_accel_module) {
2978 2 : if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
2979 0 : g_accel_driver->fini();
2980 : }
2981 :
2982 2 : spdk_spin_destroy(&g_keyring_spin);
2983 2 : spdk_spin_destroy(&g_stats_lock);
2984 2 : if (g_accel_domain) {
2985 2 : spdk_memory_domain_destroy(g_accel_domain);
2986 2 : g_accel_domain = NULL;
2987 : }
2988 2 : accel_module_finish_cb();
2989 2 : return;
2990 : }
2991 :
2992 5 : if (g_accel_module->module_fini) {
2993 1 : spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2994 : } else {
2995 4 : spdk_accel_module_finish();
2996 : }
2997 : }
2998 :
2999 : static void
3000 2 : accel_io_device_unregister_cb(void *io_device)
3001 : {
3002 : struct spdk_accel_crypto_key *key, *key_tmp;
3003 : enum spdk_accel_opcode op;
3004 :
3005 2 : spdk_spin_lock(&g_keyring_spin);
3006 2 : TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
3007 0 : accel_crypto_key_destroy_unsafe(key);
3008 : }
3009 2 : spdk_spin_unlock(&g_keyring_spin);
3010 :
3011 32 : for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3012 30 : if (g_modules_opc_override[op] != NULL) {
3013 0 : free(g_modules_opc_override[op]);
3014 0 : g_modules_opc_override[op] = NULL;
3015 : }
3016 30 : g_modules_opc[op].module = NULL;
3017 : }
3018 :
3019 2 : spdk_accel_module_finish();
3020 2 : }
3021 :
3022 : void
3023 2 : spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
3024 : {
3025 2 : assert(cb_fn != NULL);
3026 :
3027 2 : g_fini_cb_fn = cb_fn;
3028 2 : g_fini_cb_arg = cb_arg;
3029 :
3030 2 : spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
3031 2 : }
3032 :
3033 : static struct spdk_accel_driver *
3034 2 : accel_find_driver(const char *name)
3035 : {
3036 : struct spdk_accel_driver *driver;
3037 :
3038 2 : TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
3039 1 : if (strcmp(driver->name, name) == 0) {
3040 1 : return driver;
3041 : }
3042 : }
3043 :
3044 1 : return NULL;
3045 : }
3046 :
3047 : int
3048 1 : spdk_accel_set_driver(const char *name)
3049 : {
3050 : struct spdk_accel_driver *driver;
3051 :
3052 1 : driver = accel_find_driver(name);
3053 1 : if (driver == NULL) {
3054 0 : SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
3055 0 : return -ENODEV;
3056 : }
3057 :
3058 1 : g_accel_driver = driver;
3059 :
3060 1 : return 0;
3061 : }
3062 :
3063 : const char *
3064 0 : spdk_accel_get_driver_name(void)
3065 : {
3066 0 : if (!g_accel_driver) {
3067 0 : return NULL;
3068 : }
3069 :
3070 0 : return g_accel_driver->name;
3071 : }
3072 :
3073 : void
3074 1 : spdk_accel_driver_register(struct spdk_accel_driver *driver)
3075 : {
3076 1 : if (accel_find_driver(driver->name)) {
3077 0 : SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
3078 0 : assert(0);
3079 : return;
3080 : }
3081 :
3082 1 : TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
3083 : }
3084 :
3085 : int
3086 0 : spdk_accel_set_opts(const struct spdk_accel_opts *opts)
3087 : {
3088 0 : if (!opts) {
3089 0 : SPDK_ERRLOG("opts cannot be NULL\n");
3090 0 : return -1;
3091 : }
3092 :
3093 0 : if (!opts->opts_size) {
3094 0 : SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
3095 0 : return -1;
3096 : }
3097 :
3098 : #define SET_FIELD(field) \
3099 : if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
3100 : g_opts.field = opts->field; \
3101 : } \
3102 :
3103 0 : SET_FIELD(small_cache_size);
3104 0 : SET_FIELD(large_cache_size);
3105 0 : SET_FIELD(task_count);
3106 0 : SET_FIELD(sequence_count);
3107 0 : SET_FIELD(buf_count);
3108 :
3109 0 : g_opts.opts_size = opts->opts_size;
3110 :
3111 : #undef SET_FIELD
3112 :
3113 0 : return 0;
3114 : }
3115 :
3116 : void
3117 0 : spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size)
3118 : {
3119 0 : if (!opts) {
3120 0 : SPDK_ERRLOG("opts should not be NULL\n");
3121 0 : return;
3122 : }
3123 :
3124 0 : if (!opts_size) {
3125 0 : SPDK_ERRLOG("opts_size should not be zero value\n");
3126 0 : return;
3127 : }
3128 :
3129 0 : opts->opts_size = opts_size;
3130 :
3131 : #define SET_FIELD(field) \
3132 : if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \
3133 : opts->field = g_opts.field; \
3134 : } \
3135 :
3136 0 : SET_FIELD(small_cache_size);
3137 0 : SET_FIELD(large_cache_size);
3138 0 : SET_FIELD(task_count);
3139 0 : SET_FIELD(sequence_count);
3140 0 : SET_FIELD(buf_count);
3141 :
3142 : #undef SET_FIELD
3143 :
3144 : /* Do not remove this statement, you should always update this statement when you adding a new field,
3145 : * and do not forget to add the SET_FIELD statement for your added field. */
3146 : SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size");
3147 : }
3148 :
3149 : struct accel_get_stats_ctx {
3150 : struct accel_stats stats;
3151 : accel_get_stats_cb cb_fn;
3152 : void *cb_arg;
3153 : };
3154 :
3155 : static void
3156 0 : accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
3157 : {
3158 0 : struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3159 :
3160 0 : ctx->cb_fn(&ctx->stats, ctx->cb_arg);
3161 0 : free(ctx);
3162 0 : }
3163 :
3164 : static void
3165 0 : accel_get_channel_stats(struct spdk_io_channel_iter *iter)
3166 : {
3167 0 : struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
3168 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3169 0 : struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3170 :
3171 0 : accel_add_stats(&ctx->stats, &accel_ch->stats);
3172 0 : spdk_for_each_channel_continue(iter, 0);
3173 0 : }
3174 :
3175 : int
3176 0 : accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
3177 : {
3178 : struct accel_get_stats_ctx *ctx;
3179 :
3180 0 : ctx = calloc(1, sizeof(*ctx));
3181 0 : if (ctx == NULL) {
3182 0 : return -ENOMEM;
3183 : }
3184 :
3185 0 : spdk_spin_lock(&g_stats_lock);
3186 0 : accel_add_stats(&ctx->stats, &g_stats);
3187 0 : spdk_spin_unlock(&g_stats_lock);
3188 :
3189 0 : ctx->cb_fn = cb_fn;
3190 0 : ctx->cb_arg = cb_arg;
3191 :
3192 0 : spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
3193 : accel_get_channel_stats_done);
3194 :
3195 0 : return 0;
3196 : }
3197 :
3198 : void
3199 0 : spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
3200 : struct spdk_accel_opcode_stats *stats, size_t size)
3201 : {
3202 0 : struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3203 :
3204 : #define FIELD_OK(field) \
3205 : offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
3206 :
3207 : #define SET_FIELD(field, value) \
3208 : if (FIELD_OK(field)) { \
3209 : stats->field = value; \
3210 : }
3211 :
3212 0 : SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
3213 0 : SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
3214 0 : SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
3215 :
3216 : #undef FIELD_OK
3217 : #undef SET_FIELD
3218 0 : }
3219 :
3220 : uint8_t
3221 0 : spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
3222 : const struct spdk_accel_operation_exec_ctx *ctx)
3223 : {
3224 0 : struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
3225 0 : struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
3226 :
3227 0 : if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
3228 0 : g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
3229 : }
3230 :
3231 0 : if (module->get_operation_info != NULL) {
3232 0 : module->get_operation_info(opcode, ctx, &modinfo);
3233 : }
3234 :
3235 : /* If a driver is set, it'll execute most of the operations, while the rest will usually
3236 : * fall back to accel_sw, which doesn't have any alignment requiremenets. However, to be
3237 : * extra safe, return the max(driver, module) if a driver delegates some operations to a
3238 : * hardware module. */
3239 0 : return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
3240 : }
3241 :
3242 : struct spdk_accel_module_if *
3243 0 : spdk_accel_get_module(const char *name)
3244 : {
3245 : struct spdk_accel_module_if *module;
3246 :
3247 0 : TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
3248 0 : if (strcmp(module->name, name) == 0) {
3249 0 : return module;
3250 : }
3251 : }
3252 :
3253 0 : return NULL;
3254 : }
3255 :
3256 : int
3257 0 : spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode,
3258 : struct spdk_memory_domain **domains,
3259 : int array_size)
3260 : {
3261 0 : assert(opcode < SPDK_ACCEL_OPC_LAST);
3262 :
3263 0 : if (g_modules_opc[opcode].module->get_memory_domains) {
3264 0 : return g_modules_opc[opcode].module->get_memory_domains(domains, array_size);
3265 : }
3266 :
3267 0 : return 0;
3268 : }
3269 :
3270 1 : SPDK_LOG_REGISTER_COMPONENT(accel)
|