Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2018 Intel Corporation.
3 : * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4 : * All rights reserved.
5 : */
6 :
7 : #include "accel_dpdk_cryptodev.h"
8 :
9 : #include "spdk/accel.h"
10 : #include "spdk/accel_module.h"
11 : #include "spdk/env.h"
12 : #include "spdk/likely.h"
13 : #include "spdk/thread.h"
14 : #include "spdk/util.h"
15 : #include "spdk/log.h"
16 : #include "spdk/json.h"
17 : #include "spdk_internal/sgl.h"
18 :
19 : #include <rte_bus_vdev.h>
20 : #include <rte_crypto.h>
21 : #include <rte_cryptodev.h>
22 : #include <rte_mbuf_dyn.h>
23 : #include <rte_version.h>
24 :
25 : /* The VF spread is the number of queue pairs between virtual functions, we use this to
26 : * load balance the QAT device.
27 : */
28 : #define ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD 32
29 :
30 : /* This controls how many ops will be dequeued from the crypto driver in one run
31 : * of the poller. It is mainly a performance knob as it effectively determines how
32 : * much work the poller has to do. However even that can vary between crypto drivers
33 : * as the ACCEL_DPDK_CRYPTODEV_AESNI_MB driver for example does all the crypto work on dequeue whereas the
34 : * QAT driver just dequeues what has been completed already.
35 : */
36 : #define ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE 64
37 :
38 : #define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (128)
39 :
40 : /* The number of MBUFS we need must be a power of two and to support other small IOs
41 : * in addition to the limits mentioned above, we go to the next power of two. It is
42 : * big number because it is one mempool for source and destination mbufs. It may
43 : * need to be bigger to support multiple crypto drivers at once.
44 : */
45 : #define ACCEL_DPDK_CRYPTODEV_NUM_MBUFS 32768
46 : #define ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE 256
47 : #define ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES 128
48 : #define ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS (2 * ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES)
49 : #define ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE 0
50 :
51 : /* This is the max number of IOs we can supply to any crypto device QP at one time.
52 : * It can vary between drivers.
53 : */
54 : #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS 2048
55 :
56 : /* At this moment DPDK descriptors allocation for mlx5 has some issues. We use 512
57 : * as a compromise value between performance and the time spent for initialization. */
58 : #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5 512
59 :
60 : #define ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP 64
61 :
62 : /* Common for suported devices. */
63 : #define ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS 2
64 : #define ACCEL_DPDK_CRYPTODEV_IV_OFFSET (sizeof(struct rte_crypto_op) + \
65 : sizeof(struct rte_crypto_sym_op) + \
66 : (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \
67 : sizeof(struct rte_crypto_sym_xform)))
68 : #define ACCEL_DPDK_CRYPTODEV_IV_LENGTH 16
69 :
70 : /* Driver names */
71 : #define ACCEL_DPDK_CRYPTODEV_AESNI_MB "crypto_aesni_mb"
72 : #define ACCEL_DPDK_CRYPTODEV_QAT "crypto_qat"
73 : #define ACCEL_DPDK_CRYPTODEV_QAT_ASYM "crypto_qat_asym"
74 : #define ACCEL_DPDK_CRYPTODEV_MLX5 "mlx5_pci"
75 :
76 : /* Supported ciphers */
77 : #define ACCEL_DPDK_CRYPTODEV_AES_CBC "AES_CBC" /* QAT and ACCEL_DPDK_CRYPTODEV_AESNI_MB */
78 : #define ACCEL_DPDK_CRYPTODEV_AES_XTS "AES_XTS" /* QAT and MLX5 */
79 :
80 : /* Specific to AES_CBC. */
81 : #define ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH 16
82 :
83 : /* Limit of the max memory len attached to mbuf - rte_pktmbuf_attach_extbuf has uint16_t `buf_len`
84 : * parameter, we use closes aligned value 32768 for better performance */
85 : #define ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN 32768
86 :
87 : /* Used to store IO context in mbuf */
88 : static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = {
89 : .name = "context_accel_dpdk_cryptodev",
90 : .size = sizeof(uint64_t),
91 : .align = __alignof__(uint64_t),
92 : .flags = 0,
93 : };
94 :
95 : struct accel_dpdk_cryptodev_device;
96 :
97 : enum accel_dpdk_cryptodev_driver_type {
98 : ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB = 0,
99 : ACCEL_DPDK_CRYPTODEV_DRIVER_QAT,
100 : ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI,
101 : ACCEL_DPDK_CRYPTODEV_DRIVER_LAST
102 : };
103 :
104 : struct accel_dpdk_cryptodev_qp {
105 : struct accel_dpdk_cryptodev_device *device; /* ptr to crypto device */
106 : uint32_t num_enqueued_ops; /* Used to decide whether to poll the qp or not */
107 : uint8_t qp; /* queue identifier */
108 : bool in_use; /* whether this node is in use or not */
109 : uint8_t index; /* used by QAT to load balance placement of qpairs */
110 : TAILQ_ENTRY(accel_dpdk_cryptodev_qp) link;
111 : };
112 :
113 : struct accel_dpdk_cryptodev_device {
114 : enum accel_dpdk_cryptodev_driver_type type;
115 : struct rte_cryptodev_info cdev_info; /* includes DPDK device friendly name */
116 : uint32_t qp_desc_nr; /* max number of qp descriptors to be enqueued in burst */
117 : uint8_t cdev_id; /* identifier for the device */
118 : TAILQ_HEAD(, accel_dpdk_cryptodev_qp) qpairs;
119 : TAILQ_ENTRY(accel_dpdk_cryptodev_device) link;
120 : };
121 :
122 : struct accel_dpdk_cryptodev_key_handle {
123 : struct accel_dpdk_cryptodev_device *device;
124 : TAILQ_ENTRY(accel_dpdk_cryptodev_key_handle) link;
125 : void *session_encrypt; /* encryption session for this key */
126 : void *session_decrypt; /* decryption session for this key */
127 : struct rte_crypto_sym_xform cipher_xform; /* crypto control struct for this key */
128 : };
129 :
130 : struct accel_dpdk_cryptodev_key_priv {
131 : enum accel_dpdk_cryptodev_driver_type driver;
132 : enum spdk_accel_cipher cipher;
133 : char *xts_key;
134 : TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys;
135 : };
136 :
137 : /* The crypto channel struct. It is allocated and freed on my behalf by the io channel code.
138 : * We store things in here that are needed on per thread basis like the base_channel for this thread,
139 : * and the poller for this thread.
140 : */
141 : struct accel_dpdk_cryptodev_io_channel {
142 : /* completion poller */
143 : struct spdk_poller *poller;
144 : /* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */
145 : struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST];
146 : /* Used to queue tasks when qpair is full or only part of crypto ops was submitted to the PMD */
147 : TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks;
148 : /* Used to queue tasks that were completed in submission path - to avoid calling cpl_cb and possibly overflow
149 : * call stack */
150 : TAILQ_HEAD(, accel_dpdk_cryptodev_task) completed_tasks;
151 : };
152 :
153 : struct accel_dpdk_cryptodev_task {
154 : struct spdk_accel_task base;
155 : uint32_t cryop_completed; /* The number of crypto operations completed by HW */
156 : uint32_t cryop_submitted; /* The number of crypto operations submitted to HW */
157 : uint32_t cryop_total; /* Total number of crypto operations in this task */
158 : bool is_failed;
159 : bool inplace;
160 : TAILQ_ENTRY(accel_dpdk_cryptodev_task) link;
161 : };
162 :
163 : /* Shared mempools between all devices on this system */
164 : static struct rte_mempool *g_session_mp = NULL;
165 : static struct rte_mempool *g_session_mp_priv = NULL;
166 : static struct rte_mempool *g_mbuf_mp = NULL; /* mbuf mempool */
167 : static int g_mbuf_offset;
168 : static struct rte_mempool *g_crypto_op_mp = NULL; /* crypto operations, must be rte* mempool */
169 :
170 : static struct rte_mbuf_ext_shared_info g_shinfo = {}; /* used by DPDK mbuf macro */
171 :
172 : static uint8_t g_qat_total_qp = 0;
173 : static uint8_t g_next_qat_index;
174 :
175 : static const char *g_driver_names[] = {
176 : [ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = ACCEL_DPDK_CRYPTODEV_AESNI_MB,
177 : [ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = ACCEL_DPDK_CRYPTODEV_QAT,
178 : [ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = ACCEL_DPDK_CRYPTODEV_MLX5
179 : };
180 : static const char *g_cipher_names[] = {
181 : [SPDK_ACCEL_CIPHER_AES_CBC] = ACCEL_DPDK_CRYPTODEV_AES_CBC,
182 : [SPDK_ACCEL_CIPHER_AES_XTS] = ACCEL_DPDK_CRYPTODEV_AES_XTS,
183 : };
184 :
185 : static enum accel_dpdk_cryptodev_driver_type g_dpdk_cryptodev_driver =
186 : ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
187 :
188 : /* Global list of all crypto devices */
189 : static TAILQ_HEAD(, accel_dpdk_cryptodev_device) g_crypto_devices = TAILQ_HEAD_INITIALIZER(
190 : g_crypto_devices);
191 : static pthread_mutex_t g_device_lock = PTHREAD_MUTEX_INITIALIZER;
192 :
193 : static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module;
194 :
195 : static int accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
196 : struct accel_dpdk_cryptodev_task *task);
197 :
198 : void
199 0 : accel_dpdk_cryptodev_enable(void)
200 : {
201 0 : spdk_accel_module_list_add(&g_accel_dpdk_cryptodev_module);
202 0 : }
203 :
204 : int
205 0 : accel_dpdk_cryptodev_set_driver(const char *driver_name)
206 : {
207 0 : if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) {
208 0 : g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT;
209 0 : } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) {
210 0 : g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
211 0 : } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) {
212 0 : g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI;
213 0 : } else {
214 0 : SPDK_ERRLOG("Unsupported driver %s\n", driver_name);
215 0 : return -EINVAL;
216 : }
217 :
218 0 : SPDK_NOTICELOG("Using driver %s\n", driver_name);
219 :
220 0 : return 0;
221 0 : }
222 :
223 : const char *
224 0 : accel_dpdk_cryptodev_get_driver(void)
225 : {
226 0 : return g_driver_names[g_dpdk_cryptodev_driver];
227 : }
228 :
229 : static inline uint16_t
230 5 : accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp,
231 : struct accel_dpdk_cryptodev_io_channel *crypto_ch)
232 : {
233 5 : struct rte_crypto_op *dequeued_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
234 5 : struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
235 5 : struct accel_dpdk_cryptodev_task *task;
236 5 : uint32_t num_mbufs = 0;
237 5 : int i;
238 5 : uint16_t num_dequeued_ops;
239 :
240 : /* Each run of the poller will get just what the device has available
241 : * at the moment we call it, we don't check again after draining the
242 : * first batch.
243 : */
244 5 : num_dequeued_ops = rte_cryptodev_dequeue_burst(qp->device->cdev_id, qp->qp,
245 5 : dequeued_ops, ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE);
246 : /* Check if operation was processed successfully */
247 9 : for (i = 0; i < num_dequeued_ops; i++) {
248 :
249 : /* We don't know the order or association of the crypto ops wrt any
250 : * particular task so need to look at each and determine if it's
251 : * the last one for it's task or not.
252 : */
253 4 : task = (struct accel_dpdk_cryptodev_task *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src,
254 : g_mbuf_offset, uint64_t *);
255 4 : assert(task != NULL);
256 :
257 4 : if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
258 1 : SPDK_ERRLOG("error with op %d status %u\n", i, dequeued_ops[i]->status);
259 : /* Update the task status to error, we'll still process the
260 : * rest of the crypto ops for this task though so they
261 : * aren't left hanging.
262 : */
263 1 : task->is_failed = true;
264 1 : }
265 :
266 : /* Return the associated src and dst mbufs by collecting them into
267 : * an array that we can use the bulk API to free after the loop.
268 : */
269 4 : *RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0;
270 4 : mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src;
271 4 : if (dequeued_ops[i]->sym->m_dst) {
272 0 : mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst;
273 0 : }
274 :
275 4 : task->cryop_completed++;
276 4 : if (task->cryop_completed == task->cryop_total) {
277 : /* Complete the IO */
278 2 : spdk_accel_task_complete(&task->base, task->is_failed ? -EINVAL : 0);
279 4 : } else if (task->cryop_completed == task->cryop_submitted) {
280 : /* submit remaining crypto ops */
281 1 : int rc = accel_dpdk_cryptodev_process_task(crypto_ch, task);
282 :
283 1 : if (spdk_unlikely(rc)) {
284 0 : if (rc == -ENOMEM) {
285 0 : TAILQ_INSERT_TAIL(&crypto_ch->queued_tasks, task, link);
286 0 : continue;
287 0 : } else if (rc == -EALREADY) {
288 : /* -EALREADY means that a task is completed, but it might be unsafe to complete
289 : * it if we are in the submission path. Since we are in the poller context, we can
290 : * complete th task immediately */
291 0 : rc = 0;
292 0 : }
293 0 : spdk_accel_task_complete(&task->base, rc);
294 0 : }
295 1 : }
296 4 : }
297 :
298 : /* Now bulk free both mbufs and crypto operations. */
299 5 : if (num_dequeued_ops > 0) {
300 3 : rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, num_dequeued_ops);
301 3 : assert(num_mbufs > 0);
302 : /* This also releases chained mbufs if any. */
303 3 : rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs);
304 3 : }
305 :
306 5 : assert(qp->num_enqueued_ops >= num_dequeued_ops);
307 5 : qp->num_enqueued_ops -= num_dequeued_ops;
308 :
309 10 : return num_dequeued_ops;
310 5 : }
311 :
312 : /* This is the poller for the crypto module. It uses a single API to dequeue whatever is ready at
313 : * the device. Then we need to decide if what we've got so far (including previous poller
314 : * runs) totals up to one or more complete task */
315 : static int
316 6 : accel_dpdk_cryptodev_poller(void *args)
317 : {
318 6 : struct accel_dpdk_cryptodev_io_channel *crypto_ch = args;
319 6 : struct accel_dpdk_cryptodev_qp *qp;
320 6 : struct accel_dpdk_cryptodev_task *task, *task_tmp;
321 6 : TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks_tmp;
322 6 : uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0, num_completed_tasks = 0;
323 6 : int i, rc;
324 :
325 24 : for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
326 18 : qp = crypto_ch->device_qp[i];
327 : /* Avoid polling "idle" qps since it may affect performance */
328 18 : if (qp && qp->num_enqueued_ops) {
329 5 : num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp, crypto_ch);
330 5 : }
331 18 : }
332 :
333 6 : if (!TAILQ_EMPTY(&crypto_ch->queued_tasks)) {
334 2 : TAILQ_INIT(&queued_tasks_tmp);
335 :
336 4 : TAILQ_FOREACH_SAFE(task, &crypto_ch->queued_tasks, link, task_tmp) {
337 2 : TAILQ_REMOVE(&crypto_ch->queued_tasks, task, link);
338 2 : rc = accel_dpdk_cryptodev_process_task(crypto_ch, task);
339 2 : if (spdk_unlikely(rc)) {
340 1 : if (rc == -ENOMEM) {
341 1 : TAILQ_INSERT_TAIL(&queued_tasks_tmp, task, link);
342 : /* Other queued tasks may belong to other qpairs,
343 : * so process the whole list */
344 1 : continue;
345 0 : } else if (rc == -EALREADY) {
346 : /* -EALREADY means that a task is completed, but it might be unsafe to complete
347 : * it if we are in the submission path. Since we are in the poller context, we can
348 : * complete th task immediately */
349 0 : rc = 0;
350 0 : }
351 0 : spdk_accel_task_complete(&task->base, rc);
352 0 : num_completed_tasks++;
353 0 : } else {
354 1 : num_enqueued_ops++;
355 : }
356 1 : }
357 :
358 2 : TAILQ_SWAP(&crypto_ch->queued_tasks, &queued_tasks_tmp, accel_dpdk_cryptodev_task, link);
359 2 : }
360 :
361 7 : TAILQ_FOREACH_SAFE(task, &crypto_ch->completed_tasks, link, task_tmp) {
362 1 : TAILQ_REMOVE(&crypto_ch->completed_tasks, task, link);
363 1 : spdk_accel_task_complete(&task->base, 0);
364 1 : num_completed_tasks++;
365 1 : }
366 :
367 12 : return !!(num_dequeued_ops + num_enqueued_ops + num_completed_tasks);
368 6 : }
369 :
370 : /* Allocate the new mbuf of @remainder size with data pointed by @addr and attach
371 : * it to the @orig_mbuf. */
372 : static inline int
373 12 : accel_dpdk_cryptodev_mbuf_chain_remainder(struct accel_dpdk_cryptodev_task *task,
374 : struct rte_mbuf *orig_mbuf, uint8_t *addr, uint64_t *_remainder)
375 : {
376 12 : uint64_t phys_addr, phys_len, remainder = *_remainder;
377 12 : struct rte_mbuf *chain_mbuf;
378 12 : int rc;
379 :
380 12 : phys_len = remainder;
381 12 : phys_addr = spdk_vtophys((void *)addr, &phys_len);
382 12 : if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) {
383 0 : return -EFAULT;
384 : }
385 12 : remainder = spdk_min(remainder, phys_len);
386 12 : remainder = spdk_min(remainder, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
387 12 : rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1);
388 12 : if (spdk_unlikely(rc)) {
389 0 : return -ENOMEM;
390 : }
391 : /* Store context in every mbuf as we don't know anything about completion order */
392 12 : *RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task;
393 12 : rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, remainder, &g_shinfo);
394 12 : rte_pktmbuf_append(chain_mbuf, remainder);
395 :
396 : /* Chained buffer is released by rte_pktbuf_free_bulk() automagicaly. */
397 12 : rte_pktmbuf_chain(orig_mbuf, chain_mbuf);
398 12 : *_remainder = remainder;
399 :
400 12 : return 0;
401 12 : }
402 :
403 : /* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the
404 : * contiguous space that was physically available. */
405 : static inline uint64_t
406 1574 : accel_dpdk_cryptodev_mbuf_attach_buf(struct accel_dpdk_cryptodev_task *task, struct rte_mbuf *mbuf,
407 : uint8_t *addr, uint32_t len)
408 : {
409 1574 : uint64_t phys_addr, phys_len;
410 :
411 : /* Store context in every mbuf as we don't know anything about completion order */
412 1574 : *RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task;
413 :
414 1574 : phys_len = len;
415 1574 : phys_addr = spdk_vtophys((void *)addr, &phys_len);
416 1574 : if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) {
417 1 : return 0;
418 : }
419 1573 : assert(phys_len <= len);
420 1573 : phys_len = spdk_min(phys_len, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
421 :
422 : /* Set the mbuf elements address and length. */
423 1573 : rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo);
424 1573 : rte_pktmbuf_append(mbuf, phys_len);
425 :
426 1573 : return phys_len;
427 1574 : }
428 :
429 : static inline struct accel_dpdk_cryptodev_key_handle *
430 30 : accel_dpdk_find_key_handle_in_channel(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
431 : struct accel_dpdk_cryptodev_key_priv *key)
432 : {
433 30 : struct accel_dpdk_cryptodev_key_handle *key_handle;
434 :
435 30 : if (key->driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
436 : /* Crypto key is registered on all available devices while io_channel opens CQ/QP on a single device.
437 : * We need to iterate a list of key entries to find a suitable device */
438 0 : TAILQ_FOREACH(key_handle, &key->dev_keys, link) {
439 0 : if (key_handle->device->cdev_id ==
440 0 : crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->device->cdev_id) {
441 0 : return key_handle;
442 : }
443 0 : }
444 0 : return NULL;
445 : } else {
446 30 : return TAILQ_FIRST(&key->dev_keys);
447 : }
448 30 : }
449 :
450 : static inline int
451 28 : accel_dpdk_cryptodev_task_alloc_resources(struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs,
452 : struct rte_crypto_op **crypto_ops, int count)
453 : {
454 28 : int rc;
455 :
456 : /* Get the number of source mbufs that we need. These will always be 1:1 because we
457 : * don't support chaining. The reason we don't is because of our decision to use
458 : * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the
459 : * op would be > 1 LBA.
460 : */
461 28 : rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, count);
462 28 : if (rc) {
463 1 : SPDK_ERRLOG("Failed to get src_mbufs!\n");
464 1 : return -ENOMEM;
465 : }
466 :
467 : /* Get the same amount to describe destination. If crypto operation is inline then we don't just skip it */
468 27 : if (dst_mbufs) {
469 8 : rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, count);
470 8 : if (rc) {
471 0 : SPDK_ERRLOG("Failed to get dst_mbufs!\n");
472 0 : goto err_free_src;
473 : }
474 8 : }
475 :
476 : #ifdef __clang_analyzer__
477 : /* silence scan-build false positive */
478 : SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE,
479 : 0x1000);
480 : #endif
481 : /* Allocate crypto operations. */
482 54 : rc = rte_crypto_op_bulk_alloc(g_crypto_op_mp,
483 : RTE_CRYPTO_OP_TYPE_SYMMETRIC,
484 27 : crypto_ops, count);
485 27 : if (rc < count) {
486 0 : SPDK_ERRLOG("Failed to allocate crypto ops! rc %d\n", rc);
487 0 : goto err_free_ops;
488 : }
489 :
490 27 : return 0;
491 :
492 : err_free_ops:
493 0 : if (rc > 0) {
494 0 : rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, rc);
495 0 : }
496 0 : if (dst_mbufs) {
497 : /* This also releases chained mbufs if any. */
498 0 : rte_pktmbuf_free_bulk(dst_mbufs, count);
499 0 : }
500 : err_free_src:
501 : /* This also releases chained mbufs if any. */
502 0 : rte_pktmbuf_free_bulk(src_mbufs, count);
503 :
504 0 : return -ENOMEM;
505 28 : }
506 :
507 : static inline int
508 1574 : accel_dpdk_cryptodev_mbuf_add_single_block(struct spdk_iov_sgl *sgl, struct rte_mbuf *mbuf,
509 : struct accel_dpdk_cryptodev_task *task)
510 : {
511 1574 : int rc;
512 1574 : uint8_t *buf_addr;
513 1574 : uint64_t phys_len;
514 1574 : uint64_t remainder;
515 1574 : uint64_t buf_len;
516 :
517 1574 : assert(sgl->iov->iov_len > sgl->iov_offset);
518 1574 : buf_len = spdk_min(task->base.block_size, sgl->iov->iov_len - sgl->iov_offset);
519 1574 : buf_addr = sgl->iov->iov_base + sgl->iov_offset;
520 1574 : phys_len = accel_dpdk_cryptodev_mbuf_attach_buf(task, mbuf, buf_addr, buf_len);
521 1574 : if (spdk_unlikely(phys_len == 0)) {
522 1 : return -EFAULT;
523 : }
524 1573 : buf_len = spdk_min(buf_len, phys_len);
525 1573 : spdk_iov_sgl_advance(sgl, buf_len);
526 :
527 : /* Handle the case of page boundary. */
528 1573 : assert(task->base.block_size >= buf_len);
529 1573 : remainder = task->base.block_size - buf_len;
530 1585 : while (remainder) {
531 12 : buf_len = spdk_min(remainder, sgl->iov->iov_len - sgl->iov_offset);
532 12 : buf_addr = sgl->iov->iov_base + sgl->iov_offset;
533 12 : rc = accel_dpdk_cryptodev_mbuf_chain_remainder(task, mbuf, buf_addr, &buf_len);
534 12 : if (spdk_unlikely(rc)) {
535 0 : return rc;
536 : }
537 12 : spdk_iov_sgl_advance(sgl, buf_len);
538 12 : remainder -= buf_len;
539 : }
540 :
541 1573 : return 0;
542 1574 : }
543 :
544 : static inline void
545 1057 : accel_dpdk_cryptodev_op_set_iv(struct rte_crypto_op *crypto_op, uint64_t iv)
546 : {
547 1057 : uint8_t *iv_ptr = rte_crypto_op_ctod_offset(crypto_op, uint8_t *, ACCEL_DPDK_CRYPTODEV_IV_OFFSET);
548 :
549 : /* Set the IV - we use the LBA of the crypto_op */
550 1057 : memset(iv_ptr, 0, ACCEL_DPDK_CRYPTODEV_IV_LENGTH);
551 1057 : rte_memcpy(iv_ptr, &iv, sizeof(uint64_t));
552 1057 : }
553 :
554 : static inline void
555 3 : accel_dpdk_cryptodev_update_resources_from_pools(struct rte_crypto_op **crypto_ops,
556 : struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs,
557 : uint32_t num_enqueued_ops, uint32_t cryop_cnt)
558 : {
559 3 : memmove(crypto_ops, &crypto_ops[num_enqueued_ops], sizeof(crypto_ops[0]) * cryop_cnt);
560 3 : memmove(src_mbufs, &src_mbufs[num_enqueued_ops], sizeof(src_mbufs[0]) * cryop_cnt);
561 3 : if (dst_mbufs) {
562 0 : memmove(dst_mbufs, &dst_mbufs[num_enqueued_ops], sizeof(dst_mbufs[0]) * cryop_cnt);
563 0 : }
564 3 : }
565 :
566 : static int
567 34 : accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
568 : struct accel_dpdk_cryptodev_task *task)
569 : {
570 34 : uint16_t num_enqueued_ops;
571 34 : uint32_t cryop_cnt;
572 34 : uint32_t crypto_len = task->base.block_size;
573 34 : uint64_t dst_length, total_length;
574 34 : uint32_t sgl_offset;
575 34 : uint32_t qp_capacity;
576 34 : uint64_t iv_start;
577 34 : uint32_t i, crypto_index;
578 34 : struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
579 34 : struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
580 34 : struct rte_mbuf *dst_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
581 34 : void *session;
582 34 : struct accel_dpdk_cryptodev_key_priv *priv;
583 34 : struct accel_dpdk_cryptodev_key_handle *key_handle;
584 34 : struct accel_dpdk_cryptodev_qp *qp;
585 34 : struct accel_dpdk_cryptodev_device *dev;
586 34 : struct spdk_iov_sgl src, dst = {};
587 34 : int rc;
588 34 : bool inplace = task->inplace;
589 :
590 34 : if (spdk_unlikely(!task->base.crypto_key ||
591 : task->base.crypto_key->module_if != &g_accel_dpdk_cryptodev_module)) {
592 2 : return -EINVAL;
593 : }
594 :
595 32 : priv = task->base.crypto_key->priv;
596 32 : assert(priv->driver < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST);
597 :
598 32 : if (task->cryop_completed) {
599 : /* We continue to process remaining blocks */
600 7 : assert(task->cryop_submitted == task->cryop_completed);
601 7 : assert(task->cryop_total > task->cryop_completed);
602 7 : cryop_cnt = task->cryop_total - task->cryop_completed;
603 7 : sgl_offset = task->cryop_completed * crypto_len;
604 7 : iv_start = task->base.iv + task->cryop_completed;
605 7 : } else {
606 : /* That is a new task */
607 25 : total_length = 0;
608 121 : for (i = 0; i < task->base.s.iovcnt; i++) {
609 96 : total_length += task->base.s.iovs[i].iov_len;
610 96 : }
611 25 : dst_length = 0;
612 115 : for (i = 0; i < task->base.d.iovcnt; i++) {
613 90 : dst_length += task->base.d.iovs[i].iov_len;
614 90 : }
615 :
616 25 : if (spdk_unlikely(total_length != dst_length || !total_length)) {
617 0 : return -ERANGE;
618 : }
619 25 : if (spdk_unlikely(total_length % task->base.block_size != 0)) {
620 0 : return -EINVAL;
621 : }
622 :
623 25 : cryop_cnt = total_length / task->base.block_size;
624 25 : task->cryop_total = cryop_cnt;
625 25 : sgl_offset = 0;
626 25 : iv_start = task->base.iv;
627 : }
628 :
629 : /* Limit the number of crypto ops that we can process once */
630 32 : cryop_cnt = spdk_min(cryop_cnt, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
631 :
632 32 : qp = crypto_ch->device_qp[priv->driver];
633 32 : assert(qp);
634 32 : dev = qp->device;
635 32 : assert(dev);
636 32 : assert(dev->qp_desc_nr >= qp->num_enqueued_ops);
637 :
638 32 : qp_capacity = dev->qp_desc_nr - qp->num_enqueued_ops;
639 32 : cryop_cnt = spdk_min(cryop_cnt, qp_capacity);
640 32 : if (spdk_unlikely(cryop_cnt == 0)) {
641 : /* QP is full */
642 2 : return -ENOMEM;
643 : }
644 :
645 30 : key_handle = accel_dpdk_find_key_handle_in_channel(crypto_ch, priv);
646 30 : if (spdk_unlikely(!key_handle)) {
647 1 : SPDK_ERRLOG("Failed to find a key handle, driver %s, cipher %s\n", g_driver_names[priv->driver],
648 : g_cipher_names[priv->cipher]);
649 1 : return -EINVAL;
650 : }
651 : /* mlx5_pci binds keys to a specific device, we can't use a key with any device */
652 29 : assert(dev == key_handle->device || priv->driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI);
653 :
654 29 : if (task->base.op_code == SPDK_ACCEL_OPC_ENCRYPT) {
655 11 : session = key_handle->session_encrypt;
656 29 : } else if (task->base.op_code == SPDK_ACCEL_OPC_DECRYPT) {
657 17 : session = key_handle->session_decrypt;
658 17 : } else {
659 1 : return -EINVAL;
660 : }
661 :
662 28 : rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, inplace ? NULL : dst_mbufs,
663 28 : crypto_ops, cryop_cnt);
664 28 : if (rc) {
665 1 : return rc;
666 : }
667 :
668 : /* As we don't support chaining because of a decision to use LBA as IV, construction
669 : * of crypto operations is straightforward. We build both the op, the mbuf and the
670 : * dst_mbuf in our local arrays by looping through the length of the accel task and
671 : * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each
672 : * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single
673 : * mbuf per crypto operation.
674 : */
675 27 : spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, 0);
676 27 : spdk_iov_sgl_advance(&src, sgl_offset);
677 27 : if (!inplace) {
678 8 : spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, 0);
679 8 : spdk_iov_sgl_advance(&dst, sgl_offset);
680 8 : }
681 :
682 1084 : for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) {
683 1058 : rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task);
684 1058 : if (spdk_unlikely(rc)) {
685 1 : goto free_ops;
686 : }
687 1057 : accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start);
688 1057 : iv_start++;
689 :
690 : /* Set the data to encrypt/decrypt length */
691 1057 : crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len;
692 1057 : crypto_ops[crypto_index]->sym->cipher.data.offset = 0;
693 1057 : rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], session);
694 :
695 : /* link the mbuf to the crypto op. */
696 1057 : crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index];
697 :
698 1057 : if (inplace) {
699 541 : crypto_ops[crypto_index]->sym->m_dst = NULL;
700 541 : } else {
701 : #ifndef __clang_analyzer__
702 : /* scan-build thinks that dst_mbufs is not initialized */
703 516 : rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task);
704 516 : if (spdk_unlikely(rc)) {
705 0 : goto free_ops;
706 : }
707 516 : crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index];
708 : #endif
709 : }
710 1057 : }
711 :
712 : /* Enqueue everything we've got but limit by the max number of descriptors we
713 : * configured the crypto device for.
714 : */
715 26 : num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, cryop_cnt);
716 : /* This value is used in the completion callback to determine when the accel task is complete. */
717 26 : task->cryop_submitted += num_enqueued_ops;
718 26 : qp->num_enqueued_ops += num_enqueued_ops;
719 : /* We were unable to enqueue everything but did get some, so need to decide what
720 : * to do based on the status of the last op.
721 : */
722 26 : if (num_enqueued_ops < cryop_cnt) {
723 6 : switch (crypto_ops[num_enqueued_ops]->status) {
724 : case RTE_CRYPTO_OP_STATUS_SUCCESS:
725 : /* Crypto operation might be completed successfully but enqueuing to a completion ring might fail.
726 : * That might happen with SW PMDs like openssl
727 : * We can't retry such operation on next turn since if crypto operation was inplace, we can encrypt/
728 : * decrypt already processed buffer. See github issue #2907 for more details.
729 : * Handle this case as the crypto op was completed successfully - increment cryop_submitted and
730 : * cryop_completed.
731 : * We won't receive a completion for such operation, so we need to cleanup mbufs and crypto_ops */
732 3 : assert(task->cryop_total > task->cryop_completed);
733 3 : task->cryop_completed++;
734 3 : task->cryop_submitted++;
735 3 : if (task->cryop_completed == task->cryop_total) {
736 1 : assert(num_enqueued_ops == 0);
737 : /* All crypto ops are completed. We can't complete the task immediately since this function might be
738 : * called in scope of spdk_accel_submit_* function and user's logic in the completion callback
739 : * might lead to stack overflow */
740 1 : cryop_cnt -= num_enqueued_ops;
741 1 : accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs,
742 1 : num_enqueued_ops, cryop_cnt);
743 1 : rc = -EALREADY;
744 1 : goto free_ops;
745 : }
746 : /* fallthrough */
747 : case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED:
748 4 : if (num_enqueued_ops == 0) {
749 : /* Nothing was submitted. Free crypto ops and mbufs, treat this case as NOMEM */
750 2 : rc = -ENOMEM;
751 2 : goto free_ops;
752 : }
753 : /* Part of the crypto operations were not submitted, release mbufs and crypto ops.
754 : * The rest crypto ops will be submitted again once current batch is completed */
755 2 : cryop_cnt -= num_enqueued_ops;
756 2 : accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs,
757 2 : num_enqueued_ops, cryop_cnt);
758 2 : rc = 0;
759 2 : goto free_ops;
760 : default:
761 : /* For all other statuses, mark task as failed so that the poller will pick
762 : * the failure up for the overall task status.
763 : */
764 1 : task->is_failed = true;
765 1 : if (num_enqueued_ops == 0) {
766 : /* If nothing was enqueued, but the last one wasn't because of
767 : * busy, fail it now as the poller won't know anything about it.
768 : */
769 1 : rc = -EINVAL;
770 1 : goto free_ops;
771 : }
772 0 : break;
773 : }
774 0 : }
775 :
776 20 : return 0;
777 :
778 : /* Error cleanup paths. */
779 : free_ops:
780 7 : if (!inplace) {
781 : /* This also releases chained mbufs if any. */
782 0 : rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt);
783 0 : }
784 7 : rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, cryop_cnt);
785 : /* This also releases chained mbufs if any. */
786 7 : rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt);
787 7 : return rc;
788 34 : }
789 :
790 : static inline struct accel_dpdk_cryptodev_qp *
791 8 : accel_dpdk_cryptodev_get_next_device_qpair(enum accel_dpdk_cryptodev_driver_type type)
792 : {
793 8 : struct accel_dpdk_cryptodev_device *device, *device_tmp;
794 8 : struct accel_dpdk_cryptodev_qp *qpair;
795 :
796 20 : TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, device_tmp) {
797 20 : if (device->type != type) {
798 12 : continue;
799 : }
800 20 : TAILQ_FOREACH(qpair, &device->qpairs, link) {
801 20 : if (!qpair->in_use) {
802 8 : qpair->in_use = true;
803 8 : return qpair;
804 : }
805 12 : }
806 0 : }
807 :
808 0 : return NULL;
809 8 : }
810 :
811 : /* Helper function for the channel creation callback.
812 : * Returns the number of drivers assigned to the channel */
813 : static uint32_t
814 4 : accel_dpdk_cryptodev_assign_device_qps(struct accel_dpdk_cryptodev_io_channel *crypto_ch)
815 : {
816 4 : struct accel_dpdk_cryptodev_device *device;
817 4 : struct accel_dpdk_cryptodev_qp *device_qp;
818 4 : uint32_t num_drivers = 0;
819 4 : bool qat_found = false;
820 :
821 4 : pthread_mutex_lock(&g_device_lock);
822 :
823 16 : TAILQ_FOREACH(device, &g_crypto_devices, link) {
824 12 : if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT && !qat_found) {
825 : /* For some QAT devices, the optimal qp to use is every 32nd as this spreads the
826 : * workload out over the multiple virtual functions in the device. For the devices
827 : * where this isn't the case, it doesn't hurt.
828 : */
829 101 : TAILQ_FOREACH(device_qp, &device->qpairs, link) {
830 101 : if (device_qp->index != g_next_qat_index) {
831 96 : continue;
832 : }
833 5 : if (device_qp->in_use == false) {
834 4 : assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] == NULL);
835 4 : crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = device_qp;
836 4 : device_qp->in_use = true;
837 4 : g_next_qat_index = (g_next_qat_index + ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD) % g_qat_total_qp;
838 4 : qat_found = true;
839 4 : num_drivers++;
840 4 : break;
841 : } else {
842 : /* if the preferred index is used, skip to the next one in this set. */
843 1 : g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp;
844 : }
845 1 : }
846 4 : }
847 12 : }
848 :
849 : /* For ACCEL_DPDK_CRYPTODEV_AESNI_MB and MLX5_PCI select devices in round-robin manner */
850 4 : device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB);
851 4 : if (device_qp) {
852 4 : assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] == NULL);
853 4 : crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = device_qp;
854 4 : num_drivers++;
855 4 : }
856 :
857 4 : device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI);
858 4 : if (device_qp) {
859 4 : assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] == NULL);
860 4 : crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = device_qp;
861 4 : num_drivers++;
862 4 : }
863 :
864 4 : pthread_mutex_unlock(&g_device_lock);
865 :
866 8 : return num_drivers;
867 4 : }
868 :
869 : static void
870 0 : _accel_dpdk_cryptodev_destroy_cb(void *io_device, void *ctx_buf)
871 : {
872 0 : struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)
873 0 : ctx_buf;
874 0 : int i;
875 :
876 0 : pthread_mutex_lock(&g_device_lock);
877 0 : for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
878 0 : if (crypto_ch->device_qp[i]) {
879 0 : crypto_ch->device_qp[i]->in_use = false;
880 0 : }
881 0 : }
882 0 : pthread_mutex_unlock(&g_device_lock);
883 :
884 0 : spdk_poller_unregister(&crypto_ch->poller);
885 0 : }
886 :
887 : static int
888 0 : _accel_dpdk_cryptodev_create_cb(void *io_device, void *ctx_buf)
889 : {
890 0 : struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)
891 0 : ctx_buf;
892 :
893 0 : crypto_ch->poller = SPDK_POLLER_REGISTER(accel_dpdk_cryptodev_poller, crypto_ch, 0);
894 0 : if (!accel_dpdk_cryptodev_assign_device_qps(crypto_ch)) {
895 0 : SPDK_ERRLOG("No crypto drivers assigned\n");
896 0 : spdk_poller_unregister(&crypto_ch->poller);
897 0 : return -EINVAL;
898 : }
899 :
900 : /* We use this to queue tasks when qpair is full or no resources in pools */
901 0 : TAILQ_INIT(&crypto_ch->queued_tasks);
902 0 : TAILQ_INIT(&crypto_ch->completed_tasks);
903 :
904 0 : return 0;
905 0 : }
906 :
907 : static struct spdk_io_channel *
908 0 : accel_dpdk_cryptodev_get_io_channel(void)
909 : {
910 0 : return spdk_get_io_channel(&g_accel_dpdk_cryptodev_module);
911 : }
912 :
913 : static size_t
914 0 : accel_dpdk_cryptodev_ctx_size(void)
915 : {
916 0 : return sizeof(struct accel_dpdk_cryptodev_task);
917 : }
918 :
919 : static bool
920 14 : accel_dpdk_cryptodev_supports_opcode(enum spdk_accel_opcode opc)
921 : {
922 14 : switch (opc) {
923 : case SPDK_ACCEL_OPC_ENCRYPT:
924 : case SPDK_ACCEL_OPC_DECRYPT:
925 2 : return true;
926 : default:
927 12 : return false;
928 : }
929 14 : }
930 :
931 : static int
932 27 : accel_dpdk_cryptodev_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *_task)
933 : {
934 27 : struct accel_dpdk_cryptodev_task *task = SPDK_CONTAINEROF(_task, struct accel_dpdk_cryptodev_task,
935 : base);
936 27 : struct accel_dpdk_cryptodev_io_channel *ch = spdk_io_channel_get_ctx(_ch);
937 27 : int rc;
938 :
939 27 : task->cryop_completed = 0;
940 27 : task->cryop_submitted = 0;
941 27 : task->cryop_total = 0;
942 27 : task->inplace = true;
943 27 : task->is_failed = false;
944 :
945 : /* Check if crypto operation is inplace: no destination or source == destination */
946 27 : if (task->base.s.iovcnt == task->base.d.iovcnt) {
947 25 : if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) {
948 4 : task->inplace = false;
949 4 : }
950 27 : } else if (task->base.d.iovcnt != 0) {
951 2 : task->inplace = false;
952 2 : }
953 :
954 27 : rc = accel_dpdk_cryptodev_process_task(ch, task);
955 27 : if (spdk_unlikely(rc)) {
956 11 : if (rc == -ENOMEM) {
957 4 : TAILQ_INSERT_TAIL(&ch->queued_tasks, task, link);
958 4 : rc = 0;
959 11 : } else if (rc == -EALREADY) {
960 : /* -EALREADY means that a task is completed, but it might be unsafe to complete
961 : * it if we are in the submission path. Hence put it into a dedicated queue to and
962 : * process it during polling */
963 1 : TAILQ_INSERT_TAIL(&ch->completed_tasks, task, link);
964 1 : rc = 0;
965 1 : }
966 11 : }
967 :
968 54 : return rc;
969 27 : }
970 :
971 : /* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but
972 : * this callback has to be here. */
973 : static void
974 0 : shinfo_free_cb(void *arg1, void *arg2)
975 : {
976 0 : }
977 :
978 : static int
979 13 : accel_dpdk_cryptodev_create(uint8_t index, uint16_t num_lcores)
980 : {
981 26 : struct rte_cryptodev_qp_conf qp_conf = {
982 13 : .mp_session = g_session_mp,
983 : #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
984 : .mp_session_private = g_session_mp_priv
985 : #endif
986 : };
987 : /* Setup queue pairs. */
988 13 : struct rte_cryptodev_config conf = { .socket_id = SPDK_ENV_SOCKET_ID_ANY };
989 13 : struct accel_dpdk_cryptodev_device *device;
990 13 : uint8_t j, cdev_id, cdrv_id;
991 13 : struct accel_dpdk_cryptodev_qp *dev_qp;
992 13 : int rc;
993 :
994 13 : device = calloc(1, sizeof(*device));
995 13 : if (!device) {
996 0 : return -ENOMEM;
997 : }
998 :
999 : /* Get details about this device. */
1000 13 : rte_cryptodev_info_get(index, &device->cdev_info);
1001 13 : cdrv_id = device->cdev_info.driver_id;
1002 13 : cdev_id = device->cdev_id = index;
1003 :
1004 13 : if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) {
1005 4 : device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
1006 4 : device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT;
1007 13 : } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) {
1008 6 : device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
1009 6 : device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
1010 9 : } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) {
1011 2 : device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5;
1012 2 : device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI;
1013 3 : } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT_ASYM) == 0) {
1014 : /* ACCEL_DPDK_CRYPTODEV_QAT_ASYM devices are not supported at this time. */
1015 0 : rc = 0;
1016 0 : goto err;
1017 : } else {
1018 1 : SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n",
1019 : cdev_id, device->cdev_info.driver_name);
1020 1 : rc = -EINVAL;
1021 1 : goto err;
1022 : }
1023 :
1024 : /* Before going any further, make sure we have enough resources for this
1025 : * device type to function. We need a unique queue pair per core accross each
1026 : * device type to remain lockless....
1027 : */
1028 36 : if ((rte_cryptodev_device_count_by_driver(cdrv_id) *
1029 12 : device->cdev_info.max_nb_queue_pairs) < num_lcores) {
1030 1 : SPDK_ERRLOG("Insufficient unique queue pairs available for %s\n",
1031 : device->cdev_info.driver_name);
1032 1 : SPDK_ERRLOG("Either add more crypto devices or decrease core count\n");
1033 1 : rc = -EINVAL;
1034 1 : goto err;
1035 : }
1036 :
1037 11 : conf.nb_queue_pairs = device->cdev_info.max_nb_queue_pairs;
1038 11 : rc = rte_cryptodev_configure(cdev_id, &conf);
1039 11 : if (rc < 0) {
1040 1 : SPDK_ERRLOG("Failed to configure cryptodev %u: error %d\n",
1041 : cdev_id, rc);
1042 1 : rc = -EINVAL;
1043 1 : goto err;
1044 : }
1045 :
1046 : /* Pre-setup all potential qpairs now and assign them in the channel
1047 : * callback. If we were to create them there, we'd have to stop the
1048 : * entire device affecting all other threads that might be using it
1049 : * even on other queue pairs.
1050 : */
1051 10 : qp_conf.nb_descriptors = device->qp_desc_nr;
1052 19 : for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) {
1053 10 : rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY);
1054 10 : if (rc < 0) {
1055 1 : SPDK_ERRLOG("Failed to setup queue pair %u on "
1056 : "cryptodev %u: error %d\n", j, cdev_id, rc);
1057 1 : rc = -EINVAL;
1058 1 : goto err_qp_setup;
1059 : }
1060 9 : }
1061 :
1062 9 : rc = rte_cryptodev_start(cdev_id);
1063 9 : if (rc < 0) {
1064 1 : SPDK_ERRLOG("Failed to start device %u: error %d\n", cdev_id, rc);
1065 1 : rc = -EINVAL;
1066 1 : goto err_dev_start;
1067 : }
1068 :
1069 8 : TAILQ_INIT(&device->qpairs);
1070 : /* Build up lists of device/qp combinations per PMD */
1071 16 : for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) {
1072 8 : dev_qp = calloc(1, sizeof(*dev_qp));
1073 8 : if (!dev_qp) {
1074 0 : rc = -ENOMEM;
1075 0 : goto err_qp_alloc;
1076 : }
1077 8 : dev_qp->device = device;
1078 8 : dev_qp->qp = j;
1079 8 : dev_qp->in_use = false;
1080 8 : TAILQ_INSERT_TAIL(&device->qpairs, dev_qp, link);
1081 8 : if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1082 4 : dev_qp->index = g_qat_total_qp++;
1083 4 : }
1084 8 : }
1085 : /* Add to our list of available crypto devices. */
1086 8 : TAILQ_INSERT_TAIL(&g_crypto_devices, device, link);
1087 :
1088 8 : return 0;
1089 :
1090 : err_qp_alloc:
1091 0 : TAILQ_FOREACH(dev_qp, &device->qpairs, link) {
1092 0 : if (dev_qp->device->cdev_id != device->cdev_id) {
1093 0 : continue;
1094 : }
1095 0 : free(dev_qp);
1096 0 : if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1097 0 : assert(g_qat_total_qp);
1098 0 : g_qat_total_qp--;
1099 0 : }
1100 0 : }
1101 1 : rte_cryptodev_stop(cdev_id);
1102 : err_dev_start:
1103 : err_qp_setup:
1104 2 : rte_cryptodev_close(cdev_id);
1105 : err:
1106 5 : free(device);
1107 :
1108 5 : return rc;
1109 13 : }
1110 :
1111 : static void
1112 8 : accel_dpdk_cryptodev_release(struct accel_dpdk_cryptodev_device *device)
1113 : {
1114 8 : struct accel_dpdk_cryptodev_qp *dev_qp, *tmp;
1115 :
1116 8 : assert(device);
1117 :
1118 16 : TAILQ_FOREACH_SAFE(dev_qp, &device->qpairs, link, tmp) {
1119 8 : free(dev_qp);
1120 8 : }
1121 8 : if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1122 4 : assert(g_qat_total_qp >= device->cdev_info.max_nb_queue_pairs);
1123 4 : g_qat_total_qp -= device->cdev_info.max_nb_queue_pairs;
1124 4 : }
1125 8 : rte_cryptodev_stop(device->cdev_id);
1126 8 : rte_cryptodev_close(device->cdev_id);
1127 8 : free(device);
1128 8 : }
1129 :
1130 : static int
1131 12 : accel_dpdk_cryptodev_init(void)
1132 : {
1133 12 : uint8_t cdev_count;
1134 12 : uint8_t cdev_id;
1135 12 : int i, rc;
1136 12 : struct accel_dpdk_cryptodev_device *device, *tmp_dev;
1137 12 : unsigned int max_sess_size = 0, sess_size;
1138 12 : uint16_t num_lcores = rte_lcore_count();
1139 12 : char aesni_args[32];
1140 :
1141 : /* Only the first call via module init should init the crypto drivers. */
1142 12 : if (g_session_mp != NULL) {
1143 0 : return 0;
1144 : }
1145 :
1146 : /* We always init ACCEL_DPDK_CRYPTODEV_AESNI_MB */
1147 12 : snprintf(aesni_args, sizeof(aesni_args), "max_nb_queue_pairs=%d",
1148 : ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP);
1149 12 : rc = rte_vdev_init(ACCEL_DPDK_CRYPTODEV_AESNI_MB, aesni_args);
1150 12 : if (rc) {
1151 1 : SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. "
1152 : "Possibly %s is not supported by DPDK library. "
1153 : "Keep going...\n", ACCEL_DPDK_CRYPTODEV_AESNI_MB, rc, ACCEL_DPDK_CRYPTODEV_AESNI_MB);
1154 1 : }
1155 :
1156 : /* If we have no crypto devices, there's no reason to continue. */
1157 12 : cdev_count = rte_cryptodev_count();
1158 12 : SPDK_NOTICELOG("Found crypto devices: %d\n", (int)cdev_count);
1159 12 : if (cdev_count == 0) {
1160 1 : return 0;
1161 : }
1162 :
1163 11 : g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context);
1164 11 : if (g_mbuf_offset < 0) {
1165 0 : SPDK_ERRLOG("error registering dynamic field with DPDK\n");
1166 0 : return -EINVAL;
1167 : }
1168 :
1169 : /* Create global mempools, shared by all devices regardless of type */
1170 : /* First determine max session size, most pools are shared by all the devices,
1171 : * so we need to find the global max sessions size. */
1172 33 : for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1173 22 : sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
1174 22 : if (sess_size > max_sess_size) {
1175 11 : max_sess_size = sess_size;
1176 11 : }
1177 22 : }
1178 :
1179 : #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1180 : g_session_mp_priv = rte_mempool_create("dpdk_crypto_ses_mp_priv",
1181 : ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0,
1182 : NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1183 : if (g_session_mp_priv == NULL) {
1184 : SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size);
1185 : return -ENOMEM;
1186 : }
1187 :
1188 : /* When session private data mempool allocated, the element size for the session mempool
1189 : * should be 0. */
1190 : max_sess_size = 0;
1191 : #endif
1192 :
1193 11 : g_session_mp = rte_cryptodev_sym_session_pool_create("dpdk_crypto_ses_mp",
1194 11 : ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0,
1195 : SOCKET_ID_ANY);
1196 11 : if (g_session_mp == NULL) {
1197 1 : SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size);
1198 1 : rc = -ENOMEM;
1199 1 : goto error_create_session_mp;
1200 : }
1201 :
1202 10 : g_mbuf_mp = rte_pktmbuf_pool_create("dpdk_crypto_mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS,
1203 : ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
1204 : 0, 0, SPDK_ENV_SOCKET_ID_ANY);
1205 10 : if (g_mbuf_mp == NULL) {
1206 0 : SPDK_ERRLOG("Cannot create mbuf pool\n");
1207 0 : rc = -ENOMEM;
1208 0 : goto error_create_mbuf;
1209 : }
1210 :
1211 : /* We use per op private data as suggested by DPDK and to store the IV and
1212 : * our own struct for queueing ops. */
1213 10 : g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp",
1214 : RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
1215 : (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) +
1216 10 : ACCEL_DPDK_CRYPTODEV_IV_LENGTH, rte_socket_id());
1217 10 : if (g_crypto_op_mp == NULL) {
1218 1 : SPDK_ERRLOG("Cannot create op pool\n");
1219 1 : rc = -ENOMEM;
1220 1 : goto error_create_op;
1221 : }
1222 :
1223 : /* Init all devices */
1224 17 : for (i = 0; i < cdev_count; i++) {
1225 13 : rc = accel_dpdk_cryptodev_create(i, num_lcores);
1226 13 : if (rc) {
1227 5 : goto err;
1228 : }
1229 8 : }
1230 :
1231 4 : g_shinfo.free_cb = shinfo_free_cb;
1232 :
1233 4 : spdk_io_device_register(&g_accel_dpdk_cryptodev_module, _accel_dpdk_cryptodev_create_cb,
1234 : _accel_dpdk_cryptodev_destroy_cb, sizeof(struct accel_dpdk_cryptodev_io_channel),
1235 : "accel_dpdk_cryptodev");
1236 :
1237 4 : return 0;
1238 :
1239 : /* Error cleanup paths. */
1240 : err:
1241 5 : TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp_dev) {
1242 0 : TAILQ_REMOVE(&g_crypto_devices, device, link);
1243 0 : accel_dpdk_cryptodev_release(device);
1244 0 : }
1245 5 : rte_mempool_free(g_crypto_op_mp);
1246 5 : g_crypto_op_mp = NULL;
1247 : error_create_op:
1248 6 : rte_mempool_free(g_mbuf_mp);
1249 6 : g_mbuf_mp = NULL;
1250 : error_create_mbuf:
1251 6 : rte_mempool_free(g_session_mp);
1252 6 : g_session_mp = NULL;
1253 : error_create_session_mp:
1254 7 : if (g_session_mp_priv != NULL) {
1255 0 : rte_mempool_free(g_session_mp_priv);
1256 0 : g_session_mp_priv = NULL;
1257 0 : }
1258 7 : return rc;
1259 12 : }
1260 :
1261 : static void
1262 0 : accel_dpdk_cryptodev_fini_cb(void *io_device)
1263 : {
1264 0 : struct accel_dpdk_cryptodev_device *device, *tmp;
1265 :
1266 0 : TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp) {
1267 0 : TAILQ_REMOVE(&g_crypto_devices, device, link);
1268 0 : accel_dpdk_cryptodev_release(device);
1269 0 : }
1270 0 : rte_vdev_uninit(ACCEL_DPDK_CRYPTODEV_AESNI_MB);
1271 :
1272 0 : rte_mempool_free(g_crypto_op_mp);
1273 0 : rte_mempool_free(g_mbuf_mp);
1274 0 : rte_mempool_free(g_session_mp);
1275 0 : if (g_session_mp_priv != NULL) {
1276 0 : rte_mempool_free(g_session_mp_priv);
1277 0 : }
1278 :
1279 0 : spdk_accel_module_finish();
1280 0 : }
1281 :
1282 : /* Called when the entire module is being torn down. */
1283 : static void
1284 0 : accel_dpdk_cryptodev_fini(void *ctx)
1285 : {
1286 0 : if (g_crypto_op_mp) {
1287 0 : spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, accel_dpdk_cryptodev_fini_cb);
1288 0 : }
1289 0 : }
1290 :
1291 : static void
1292 0 : accel_dpdk_cryptodev_key_handle_session_free(struct accel_dpdk_cryptodev_device *device,
1293 : void *session)
1294 : {
1295 : #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
1296 0 : assert(device != NULL);
1297 :
1298 0 : rte_cryptodev_sym_session_free(device->cdev_id, session);
1299 : #else
1300 : rte_cryptodev_sym_session_free(session);
1301 : #endif
1302 0 : }
1303 :
1304 : static void *
1305 0 : accel_dpdk_cryptodev_key_handle_session_create(struct accel_dpdk_cryptodev_device *device,
1306 : struct rte_crypto_sym_xform *cipher_xform)
1307 : {
1308 0 : void *session;
1309 :
1310 : #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
1311 0 : session = rte_cryptodev_sym_session_create(device->cdev_id, cipher_xform, g_session_mp);
1312 : #else
1313 : session = rte_cryptodev_sym_session_create(g_session_mp);
1314 : if (!session) {
1315 : return NULL;
1316 : }
1317 :
1318 : if (rte_cryptodev_sym_session_init(device->cdev_id, session, cipher_xform, g_session_mp_priv) < 0) {
1319 : accel_dpdk_cryptodev_key_handle_session_free(device, session);
1320 : return NULL;
1321 : }
1322 : #endif
1323 :
1324 0 : return session;
1325 0 : }
1326 :
1327 : static int
1328 0 : accel_dpdk_cryptodev_key_handle_configure(struct spdk_accel_crypto_key *key,
1329 : struct accel_dpdk_cryptodev_key_handle *key_handle)
1330 : {
1331 0 : struct accel_dpdk_cryptodev_key_priv *priv = key->priv;
1332 :
1333 0 : key_handle->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1334 0 : key_handle->cipher_xform.cipher.iv.offset = ACCEL_DPDK_CRYPTODEV_IV_OFFSET;
1335 0 : key_handle->cipher_xform.cipher.iv.length = ACCEL_DPDK_CRYPTODEV_IV_LENGTH;
1336 :
1337 0 : switch (priv->cipher) {
1338 : case SPDK_ACCEL_CIPHER_AES_CBC:
1339 0 : key_handle->cipher_xform.cipher.key.data = key->key;
1340 0 : key_handle->cipher_xform.cipher.key.length = key->key_size;
1341 0 : key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
1342 0 : break;
1343 : case SPDK_ACCEL_CIPHER_AES_XTS:
1344 0 : key_handle->cipher_xform.cipher.key.data = priv->xts_key;
1345 0 : key_handle->cipher_xform.cipher.key.length = key->key_size + key->key2_size;
1346 0 : key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS;
1347 0 : break;
1348 : default:
1349 0 : SPDK_ERRLOG("Invalid cipher name %s.\n", key->param.cipher);
1350 0 : return -EINVAL;
1351 : }
1352 :
1353 0 : key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1354 0 : key_handle->session_encrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device,
1355 0 : &key_handle->cipher_xform);
1356 0 : if (!key_handle->session_encrypt) {
1357 0 : SPDK_ERRLOG("Failed to init encrypt session\n");
1358 0 : return -EINVAL;
1359 : }
1360 :
1361 0 : key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
1362 0 : key_handle->session_decrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device,
1363 0 : &key_handle->cipher_xform);
1364 0 : if (!key_handle->session_decrypt) {
1365 0 : SPDK_ERRLOG("Failed to init decrypt session:");
1366 0 : accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt);
1367 0 : return -EINVAL;
1368 : }
1369 :
1370 0 : return 0;
1371 0 : }
1372 :
1373 : static void
1374 0 : accel_dpdk_cryptodev_key_deinit(struct spdk_accel_crypto_key *key)
1375 : {
1376 0 : struct accel_dpdk_cryptodev_key_handle *key_handle, *key_handle_tmp;
1377 0 : struct accel_dpdk_cryptodev_key_priv *priv = key->priv;
1378 :
1379 0 : TAILQ_FOREACH_SAFE(key_handle, &priv->dev_keys, link, key_handle_tmp) {
1380 0 : accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt);
1381 0 : accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_decrypt);
1382 0 : TAILQ_REMOVE(&priv->dev_keys, key_handle, link);
1383 0 : spdk_memset_s(key_handle, sizeof(*key_handle), 0, sizeof(*key_handle));
1384 0 : free(key_handle);
1385 0 : }
1386 :
1387 0 : if (priv->xts_key) {
1388 0 : spdk_memset_s(priv->xts_key, key->key_size + key->key2_size, 0, key->key_size + key->key2_size);
1389 0 : }
1390 0 : free(priv->xts_key);
1391 0 : free(priv);
1392 0 : }
1393 :
1394 : static bool
1395 0 : accel_dpdk_cryptodev_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
1396 : {
1397 0 : switch (g_dpdk_cryptodev_driver) {
1398 : case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT:
1399 : case ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB:
1400 0 : switch (cipher) {
1401 : case SPDK_ACCEL_CIPHER_AES_XTS:
1402 0 : return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE;
1403 : case SPDK_ACCEL_CIPHER_AES_CBC:
1404 0 : return key_size == ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH;
1405 : default:
1406 0 : return false;
1407 : }
1408 : case ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI:
1409 0 : switch (cipher) {
1410 : case SPDK_ACCEL_CIPHER_AES_XTS:
1411 0 : return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
1412 : default:
1413 0 : return false;
1414 : }
1415 : default:
1416 0 : return false;
1417 : }
1418 0 : }
1419 :
1420 : static int
1421 0 : accel_dpdk_cryptodev_key_init(struct spdk_accel_crypto_key *key)
1422 : {
1423 0 : struct accel_dpdk_cryptodev_device *device;
1424 0 : struct accel_dpdk_cryptodev_key_priv *priv;
1425 0 : struct accel_dpdk_cryptodev_key_handle *key_handle;
1426 0 : enum accel_dpdk_cryptodev_driver_type driver;
1427 0 : int rc;
1428 :
1429 0 : driver = g_dpdk_cryptodev_driver;
1430 :
1431 0 : priv = calloc(1, sizeof(*priv));
1432 0 : if (!priv) {
1433 0 : SPDK_ERRLOG("Memory allocation failed\n");
1434 0 : return -ENOMEM;
1435 : }
1436 0 : key->priv = priv;
1437 0 : priv->driver = driver;
1438 0 : priv->cipher = key->cipher;
1439 0 : TAILQ_INIT(&priv->dev_keys);
1440 :
1441 0 : if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
1442 : /* DPDK expects the keys to be concatenated together. */
1443 0 : priv->xts_key = calloc(key->key_size + key->key2_size + 1, sizeof(char));
1444 0 : if (!priv->xts_key) {
1445 0 : SPDK_ERRLOG("Memory allocation failed\n");
1446 0 : accel_dpdk_cryptodev_key_deinit(key);
1447 0 : return -ENOMEM;
1448 : }
1449 0 : memcpy(priv->xts_key, key->key, key->key_size);
1450 0 : memcpy(priv->xts_key + key->key_size, key->key2, key->key2_size);
1451 0 : }
1452 :
1453 0 : pthread_mutex_lock(&g_device_lock);
1454 0 : TAILQ_FOREACH(device, &g_crypto_devices, link) {
1455 0 : if (device->type != driver) {
1456 0 : continue;
1457 : }
1458 0 : key_handle = calloc(1, sizeof(*key_handle));
1459 0 : if (!key_handle) {
1460 0 : pthread_mutex_unlock(&g_device_lock);
1461 0 : accel_dpdk_cryptodev_key_deinit(key);
1462 0 : return -ENOMEM;
1463 : }
1464 0 : key_handle->device = device;
1465 0 : TAILQ_INSERT_TAIL(&priv->dev_keys, key_handle, link);
1466 0 : rc = accel_dpdk_cryptodev_key_handle_configure(key, key_handle);
1467 0 : if (rc) {
1468 0 : pthread_mutex_unlock(&g_device_lock);
1469 0 : accel_dpdk_cryptodev_key_deinit(key);
1470 0 : return rc;
1471 : }
1472 0 : if (driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
1473 : /* For MLX5_PCI we need to register a key on each device since
1474 : * the key is bound to a specific Protection Domain,
1475 : * so don't break the loop */
1476 0 : break;
1477 : }
1478 0 : }
1479 0 : pthread_mutex_unlock(&g_device_lock);
1480 :
1481 0 : if (TAILQ_EMPTY(&priv->dev_keys)) {
1482 0 : free(priv);
1483 0 : return -ENODEV;
1484 : }
1485 :
1486 0 : return 0;
1487 0 : }
1488 :
1489 : static void
1490 0 : accel_dpdk_cryptodev_write_config_json(struct spdk_json_write_ctx *w)
1491 : {
1492 0 : spdk_json_write_object_begin(w);
1493 0 : spdk_json_write_named_string(w, "method", "dpdk_cryptodev_scan_accel_module");
1494 0 : spdk_json_write_object_end(w);
1495 :
1496 0 : spdk_json_write_object_begin(w);
1497 0 : spdk_json_write_named_string(w, "method", "dpdk_cryptodev_set_driver");
1498 0 : spdk_json_write_named_object_begin(w, "params");
1499 0 : spdk_json_write_named_string(w, "driver_name", g_driver_names[g_dpdk_cryptodev_driver]);
1500 0 : spdk_json_write_object_end(w);
1501 0 : spdk_json_write_object_end(w);
1502 0 : }
1503 :
1504 : static int
1505 0 : accel_dpdk_cryptodev_get_operation_info(enum spdk_accel_opcode opcode,
1506 : const struct spdk_accel_operation_exec_ctx *ctx,
1507 : struct spdk_accel_opcode_info *info)
1508 : {
1509 0 : if (!accel_dpdk_cryptodev_supports_opcode(opcode)) {
1510 0 : SPDK_ERRLOG("Received unexpected opcode: %d", opcode);
1511 0 : assert(false);
1512 : return -EINVAL;
1513 : }
1514 :
1515 0 : switch (g_dpdk_cryptodev_driver) {
1516 : case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT:
1517 0 : info->required_alignment = spdk_u32log2(ctx->block_size);
1518 0 : break;
1519 : default:
1520 0 : info->required_alignment = 0;
1521 0 : break;
1522 : }
1523 :
1524 0 : return 0;
1525 : }
1526 :
1527 : static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module = {
1528 : .module_init = accel_dpdk_cryptodev_init,
1529 : .module_fini = accel_dpdk_cryptodev_fini,
1530 : .write_config_json = accel_dpdk_cryptodev_write_config_json,
1531 : .get_ctx_size = accel_dpdk_cryptodev_ctx_size,
1532 : .name = "dpdk_cryptodev",
1533 : .supports_opcode = accel_dpdk_cryptodev_supports_opcode,
1534 : .get_io_channel = accel_dpdk_cryptodev_get_io_channel,
1535 : .submit_tasks = accel_dpdk_cryptodev_submit_tasks,
1536 : .crypto_key_init = accel_dpdk_cryptodev_key_init,
1537 : .crypto_key_deinit = accel_dpdk_cryptodev_key_deinit,
1538 : .crypto_supports_cipher = accel_dpdk_cryptodev_supports_cipher,
1539 : .get_operation_info = accel_dpdk_cryptodev_get_operation_info,
1540 : };
|