Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2022 Intel Corporation.
3 : * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
4 : * All rights reserved.
5 : */
6 :
7 : #include "accel_dsa.h"
8 :
9 : #include "spdk/stdinc.h"
10 :
11 : #include "spdk/accel_module.h"
12 : #include "spdk/log.h"
13 : #include "spdk_internal/idxd.h"
14 :
15 : #include "spdk/env.h"
16 : #include "spdk/event.h"
17 : #include "spdk/likely.h"
18 : #include "spdk/thread.h"
19 : #include "spdk/idxd.h"
20 : #include "spdk/util.h"
21 : #include "spdk/json.h"
22 : #include "spdk/trace.h"
23 : #include "spdk_internal/trace_defs.h"
24 :
25 : #define ACCEL_DSA_MD_IOBUF_SMALL_CACHE_SIZE 128
26 : #define ACCEL_DSA_MD_IOBUF_LARGE_CACHE_SIZE 32
27 :
28 : static bool g_dsa_enable = false;
29 : static bool g_kernel_mode = false;
30 :
31 : enum channel_state {
32 : IDXD_CHANNEL_ACTIVE,
33 : IDXD_CHANNEL_ERROR,
34 : };
35 :
36 : static bool g_dsa_initialized = false;
37 :
38 : struct idxd_device {
39 : struct spdk_idxd_device *dsa;
40 : TAILQ_ENTRY(idxd_device) tailq;
41 : };
42 : static TAILQ_HEAD(, idxd_device) g_dsa_devices = TAILQ_HEAD_INITIALIZER(g_dsa_devices);
43 : static struct idxd_device *g_next_dev = NULL;
44 : static uint32_t g_num_devices = 0;
45 : static pthread_mutex_t g_dev_lock = PTHREAD_MUTEX_INITIALIZER;
46 :
47 : struct idxd_task {
48 : struct spdk_accel_task task;
49 : struct idxd_io_channel *chan;
50 : struct iovec md_iov;
51 : struct spdk_iobuf_entry iobuf;
52 : };
53 :
54 : struct idxd_io_channel {
55 : struct spdk_idxd_io_channel *chan;
56 : struct idxd_device *dev;
57 : enum channel_state state;
58 : struct spdk_poller *poller;
59 : uint32_t num_outstanding;
60 : STAILQ_HEAD(, spdk_accel_task) queued_tasks;
61 : struct spdk_iobuf_channel iobuf;
62 : };
63 :
64 : static struct spdk_io_channel *dsa_get_io_channel(void);
65 :
66 : static struct idxd_device *
67 0 : idxd_select_device(struct idxd_io_channel *chan)
68 : {
69 0 : uint32_t count = 0;
70 : struct idxd_device *dev;
71 0 : uint32_t numa_id = spdk_env_get_numa_id(spdk_env_get_current_core());
72 :
73 : /*
74 : * We allow channels to share underlying devices,
75 : * selection is round-robin based with a limitation
76 : * on how many channel can share one device.
77 : */
78 0 : do {
79 : /* select next device */
80 0 : pthread_mutex_lock(&g_dev_lock);
81 0 : g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
82 0 : if (g_next_dev == NULL) {
83 0 : g_next_dev = TAILQ_FIRST(&g_dsa_devices);
84 0 : }
85 0 : dev = g_next_dev;
86 0 : pthread_mutex_unlock(&g_dev_lock);
87 :
88 0 : if (numa_id != spdk_idxd_get_socket(dev->dsa)) {
89 0 : continue;
90 : }
91 :
92 : /*
93 : * Now see if a channel is available on this one. We only
94 : * allow a specific number of channels to share a device
95 : * to limit outstanding IO for flow control purposes.
96 : */
97 0 : chan->chan = spdk_idxd_get_channel(dev->dsa);
98 0 : if (chan->chan != NULL) {
99 0 : SPDK_DEBUGLOG(accel_dsa, "On socket %d using device on numa %d\n",
100 : numa_id, spdk_idxd_get_socket(dev->dsa));
101 0 : return dev;
102 : }
103 0 : } while (++count < g_num_devices);
104 :
105 : /* We are out of available channels and/or devices for the local socket. We fix the number
106 : * of channels that we allocate per device and only allocate devices on the same socket
107 : * that the current thread is on. If on a 2 socket system it may be possible to avoid
108 : * this situation by spreading threads across the sockets.
109 : */
110 0 : SPDK_ERRLOG("No more DSA devices available on the local socket.\n");
111 0 : return NULL;
112 0 : }
113 :
114 : static void
115 0 : dsa_done(void *cb_arg, int status)
116 : {
117 0 : struct idxd_task *idxd_task = cb_arg;
118 : struct idxd_io_channel *chan;
119 : int rc;
120 :
121 0 : chan = idxd_task->chan;
122 :
123 : /* If the DSA DIF Check operation detects an error, detailed info about
124 : * this error (like actual/expected values) needs to be obtained by
125 : * calling the software DIF Verify operation.
126 : */
127 0 : if (spdk_unlikely(status == -EIO)) {
128 0 : if (idxd_task->task.op_code == SPDK_ACCEL_OPC_DIF_VERIFY ||
129 0 : idxd_task->task.op_code == SPDK_ACCEL_OPC_DIF_VERIFY_COPY) {
130 0 : rc = spdk_dif_verify(idxd_task->task.s.iovs, idxd_task->task.s.iovcnt,
131 0 : idxd_task->task.dif.num_blocks,
132 0 : idxd_task->task.dif.ctx, idxd_task->task.dif.err);
133 0 : if (rc != 0) {
134 0 : SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n",
135 : idxd_task->task.dif.err->err_type,
136 : idxd_task->task.dif.err->err_offset);
137 0 : }
138 0 : }
139 0 : }
140 :
141 0 : assert(chan->num_outstanding > 0);
142 0 : spdk_trace_record(TRACE_ACCEL_DSA_OP_COMPLETE, 0, 0, 0, chan->num_outstanding - 1);
143 0 : chan->num_outstanding--;
144 :
145 0 : spdk_accel_task_complete(&idxd_task->task, status);
146 0 : }
147 :
148 : static int
149 0 : idxd_submit_dualcast(struct idxd_io_channel *ch, struct idxd_task *idxd_task, int flags)
150 : {
151 0 : struct spdk_accel_task *task = &idxd_task->task;
152 :
153 0 : if (spdk_unlikely(task->d.iovcnt != 1 || task->d2.iovcnt != 1 || task->s.iovcnt != 1)) {
154 0 : return -EINVAL;
155 : }
156 :
157 0 : if (spdk_unlikely(task->d.iovs[0].iov_len != task->s.iovs[0].iov_len ||
158 : task->d.iovs[0].iov_len != task->d2.iovs[0].iov_len)) {
159 0 : return -EINVAL;
160 : }
161 :
162 0 : return spdk_idxd_submit_dualcast(ch->chan, task->d.iovs[0].iov_base,
163 0 : task->d2.iovs[0].iov_base, task->s.iovs[0].iov_base,
164 0 : task->d.iovs[0].iov_len, flags, dsa_done, idxd_task);
165 0 : }
166 :
167 : static int
168 0 : check_dsa_dif_strip_overlap_bufs(struct spdk_accel_task *task)
169 : {
170 : uint64_t src_seg_addr_end_ext;
171 : uint64_t dst_seg_addr_end_ext;
172 : size_t i;
173 :
174 : /* The number of source and destination iovecs must be the same.
175 : * If so, one of them can be used to iterate over both vectors
176 : * later in the loop. */
177 0 : if (task->d.iovcnt != task->s.iovcnt) {
178 0 : SPDK_ERRLOG("Mismatched iovcnts: src=%d, dst=%d\n",
179 : task->s.iovcnt, task->d.iovcnt);
180 0 : return -EINVAL;
181 : }
182 :
183 0 : for (i = 0; i < task->s.iovcnt; i++) {
184 0 : src_seg_addr_end_ext = (uint64_t)task->s.iovs[i].iov_base +
185 0 : task->s.iovs[i].iov_len;
186 :
187 0 : dst_seg_addr_end_ext = (uint64_t)task->d.iovs[i].iov_base +
188 0 : task->s.iovs[i].iov_len;
189 :
190 0 : if ((dst_seg_addr_end_ext >= (uint64_t)task->s.iovs[i].iov_base) &&
191 0 : (dst_seg_addr_end_ext <= src_seg_addr_end_ext)) {
192 0 : return -EFAULT;
193 : }
194 0 : }
195 :
196 0 : return 0;
197 0 : }
198 :
199 : static void
200 0 : spdk_accel_sw_task_complete(void *ctx)
201 : {
202 0 : struct spdk_accel_task *task = (struct spdk_accel_task *)ctx;
203 :
204 0 : spdk_accel_task_complete(task, task->status);
205 0 : }
206 :
207 : static void
208 0 : _accel_dsa_dix_verify_generate_cb(void *cb_arg, int status)
209 : {
210 0 : struct idxd_task *idxd_task = cb_arg;
211 0 : struct iovec *original_mdiov = idxd_task->task.d.iovs;
212 0 : size_t mdiov_len = idxd_task->md_iov.iov_len;
213 : int rc;
214 :
215 0 : if (status != 0) {
216 0 : SPDK_ERRLOG("Unable to complete DIX Verify (DIX Generate failed)\n");
217 0 : goto end;
218 : }
219 :
220 0 : rc = memcmp(original_mdiov->iov_base, idxd_task->md_iov.iov_base, mdiov_len);
221 0 : if (rc != 0) {
222 0 : SPDK_ERRLOG("DIX Verify failed\n");
223 0 : status = -EINVAL;
224 0 : rc = spdk_dix_verify(idxd_task->task.s.iovs, idxd_task->task.s.iovcnt,
225 0 : original_mdiov, idxd_task->task.dif.num_blocks,
226 0 : idxd_task->task.dif.ctx, idxd_task->task.dif.err);
227 0 : if (rc != 0) {
228 0 : SPDK_ERRLOG("DIX error detected. type=%d, offset=%" PRIu32 "\n",
229 : idxd_task->task.dif.err->err_type,
230 : idxd_task->task.dif.err->err_offset);
231 0 : }
232 0 : }
233 :
234 : end:
235 0 : spdk_iobuf_put(&idxd_task->chan->iobuf, idxd_task->md_iov.iov_base, mdiov_len);
236 0 : dsa_done(idxd_task, status);
237 0 : }
238 :
239 : static void
240 0 : _accel_dsa_dix_verify(struct idxd_task *idxd_task)
241 : {
242 : int rc;
243 :
244 : /* Since Intel DSA doesn't provide a separate DIX Verify operation, it is done
245 : * in two steps: DIX Generate to a new buffer and mem compare.
246 : */
247 0 : rc = spdk_idxd_submit_dix_generate(idxd_task->chan->chan, idxd_task->task.s.iovs,
248 0 : idxd_task->task.s.iovcnt, &idxd_task->md_iov, idxd_task->task.dif.num_blocks,
249 0 : idxd_task->task.dif.ctx, 0, _accel_dsa_dix_verify_generate_cb, idxd_task);
250 0 : if (rc != 0) {
251 0 : SPDK_ERRLOG("Unable to complete DIX Verify (DIX Generate failed)\n");
252 0 : spdk_iobuf_put(&idxd_task->chan->iobuf, idxd_task->md_iov.iov_base,
253 0 : idxd_task->md_iov.iov_len);
254 0 : dsa_done(idxd_task, rc);
255 0 : }
256 0 : }
257 :
258 : static void
259 0 : accel_dsa_dix_verify_get_iobuf_cb(struct spdk_iobuf_entry *iobuf, void *buf)
260 : {
261 : struct idxd_task *idxd_task;
262 :
263 0 : idxd_task = SPDK_CONTAINEROF(iobuf, struct idxd_task, iobuf);
264 0 : idxd_task->md_iov.iov_base = buf;
265 0 : _accel_dsa_dix_verify(idxd_task);
266 0 : }
267 :
268 : static int
269 0 : accel_dsa_dix_verify(struct idxd_io_channel *chan, int flags,
270 : struct idxd_task *idxd_task)
271 : {
272 0 : idxd_task->md_iov.iov_len = idxd_task->task.d.iovs[0].iov_len;
273 0 : idxd_task->md_iov.iov_base = spdk_iobuf_get(&chan->iobuf, idxd_task->md_iov.iov_len,
274 0 : &idxd_task->iobuf, accel_dsa_dix_verify_get_iobuf_cb);
275 :
276 0 : if (idxd_task->md_iov.iov_base != NULL) {
277 0 : _accel_dsa_dix_verify(idxd_task);
278 0 : }
279 :
280 0 : return 0;
281 : }
282 :
283 : static int
284 0 : _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
285 : {
286 0 : struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
287 : struct idxd_task *idxd_task;
288 0 : int rc = 0, flags = 0;
289 :
290 0 : idxd_task = SPDK_CONTAINEROF(task, struct idxd_task, task);
291 0 : idxd_task->chan = chan;
292 :
293 0 : switch (task->op_code) {
294 : case SPDK_ACCEL_OPC_COPY:
295 0 : rc = spdk_idxd_submit_copy(chan->chan, task->d.iovs, task->d.iovcnt,
296 0 : task->s.iovs, task->s.iovcnt, flags, dsa_done, idxd_task);
297 0 : break;
298 : case SPDK_ACCEL_OPC_DUALCAST:
299 0 : rc = idxd_submit_dualcast(chan, idxd_task, flags);
300 0 : break;
301 : case SPDK_ACCEL_OPC_COMPARE:
302 0 : rc = spdk_idxd_submit_compare(chan->chan, task->s.iovs, task->s.iovcnt,
303 0 : task->s2.iovs, task->s2.iovcnt, flags,
304 0 : dsa_done, idxd_task);
305 0 : break;
306 : case SPDK_ACCEL_OPC_FILL:
307 0 : rc = spdk_idxd_submit_fill(chan->chan, task->d.iovs, task->d.iovcnt,
308 0 : task->fill_pattern, flags, dsa_done, idxd_task);
309 0 : break;
310 : case SPDK_ACCEL_OPC_CRC32C:
311 0 : rc = spdk_idxd_submit_crc32c(chan->chan, task->s.iovs, task->s.iovcnt, task->seed,
312 0 : task->crc_dst, flags, dsa_done, idxd_task);
313 0 : break;
314 : case SPDK_ACCEL_OPC_COPY_CRC32C:
315 0 : rc = spdk_idxd_submit_copy_crc32c(chan->chan, task->d.iovs, task->d.iovcnt,
316 0 : task->s.iovs, task->s.iovcnt,
317 0 : task->seed, task->crc_dst, flags,
318 0 : dsa_done, idxd_task);
319 0 : break;
320 : case SPDK_ACCEL_OPC_DIF_VERIFY:
321 0 : rc = spdk_idxd_submit_dif_check(chan->chan,
322 0 : task->s.iovs, task->s.iovcnt,
323 0 : task->dif.num_blocks, task->dif.ctx, flags,
324 0 : dsa_done, idxd_task);
325 0 : break;
326 : case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
327 0 : rc = spdk_idxd_submit_dif_insert(chan->chan,
328 0 : task->d.iovs, task->d.iovcnt,
329 0 : task->s.iovs, task->s.iovcnt,
330 0 : task->dif.num_blocks, task->dif.ctx, flags,
331 0 : dsa_done, idxd_task);
332 0 : break;
333 : case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
334 : /* For DIF strip operations, DSA may incorrectly report an overlapping buffer
335 : * error if the destination buffer immediately precedes the source buffer.
336 : * This is because DSA uses the transfer size in the descriptor for both
337 : * the source and destination buffers when checking for buffer overlap.
338 : * Since the transfer size applies to the source buffer, which is larger
339 : * than the destination buffer by metadata, it should not be used as
340 : * the destination buffer size. To avoid reporting errors by DSA, the software
341 : * checks whether such an error condition can occur, and if so the software
342 : * fallback is performed. */
343 0 : rc = check_dsa_dif_strip_overlap_bufs(task);
344 0 : if (rc == 0) {
345 0 : rc = spdk_idxd_submit_dif_strip(chan->chan,
346 0 : task->d.iovs, task->d.iovcnt,
347 0 : task->s.iovs, task->s.iovcnt,
348 0 : task->dif.num_blocks, task->dif.ctx, flags,
349 0 : dsa_done, idxd_task);
350 0 : } else if (rc == -EFAULT) {
351 0 : rc = spdk_dif_verify_copy(task->d.iovs,
352 0 : task->d.iovcnt,
353 0 : task->s.iovs,
354 0 : task->s.iovcnt,
355 0 : task->dif.num_blocks,
356 0 : task->dif.ctx,
357 0 : task->dif.err);
358 0 : idxd_task->task.status = rc;
359 0 : spdk_thread_send_msg(spdk_get_thread(), spdk_accel_sw_task_complete, (void *)&idxd_task->task);
360 0 : rc = 0;
361 0 : }
362 0 : break;
363 : case SPDK_ACCEL_OPC_DIX_GENERATE:
364 0 : rc = spdk_idxd_submit_dix_generate(chan->chan, task->s.iovs, task->s.iovcnt,
365 0 : task->d.iovs, task->dif.num_blocks,
366 0 : task->dif.ctx, flags, dsa_done, idxd_task);
367 0 : break;
368 : case SPDK_ACCEL_OPC_DIX_VERIFY:
369 0 : rc = accel_dsa_dix_verify(chan, flags, idxd_task);
370 0 : break;
371 : default:
372 0 : assert(false);
373 : rc = -EINVAL;
374 : break;
375 : }
376 :
377 0 : if (rc == 0) {
378 0 : chan->num_outstanding++;
379 0 : spdk_trace_record(TRACE_ACCEL_DSA_OP_SUBMIT, 0, 0, 0, chan->num_outstanding);
380 0 : }
381 :
382 0 : return rc;
383 : }
384 :
385 : static int
386 0 : dsa_submit_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
387 : {
388 0 : struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
389 0 : int rc = 0;
390 :
391 0 : assert(STAILQ_NEXT(task, link) == NULL);
392 :
393 0 : if (spdk_unlikely(chan->state == IDXD_CHANNEL_ERROR)) {
394 0 : spdk_accel_task_complete(task, -EINVAL);
395 0 : return 0;
396 : }
397 :
398 0 : if (!STAILQ_EMPTY(&chan->queued_tasks)) {
399 0 : STAILQ_INSERT_TAIL(&chan->queued_tasks, task, link);
400 0 : return 0;
401 : }
402 :
403 0 : rc = _process_single_task(ch, task);
404 0 : if (rc == -EBUSY) {
405 0 : STAILQ_INSERT_TAIL(&chan->queued_tasks, task, link);
406 0 : } else if (rc) {
407 0 : spdk_accel_task_complete(task, rc);
408 0 : }
409 :
410 0 : return 0;
411 0 : }
412 :
413 : static int
414 0 : dsa_submit_queued_tasks(struct idxd_io_channel *chan)
415 : {
416 : struct spdk_accel_task *task, *tmp;
417 0 : struct spdk_io_channel *ch = spdk_io_channel_from_ctx(chan);
418 0 : int rc = 0;
419 :
420 0 : if (spdk_unlikely(chan->state == IDXD_CHANNEL_ERROR)) {
421 : /* Complete queued tasks with error and clear the list */
422 0 : while ((task = STAILQ_FIRST(&chan->queued_tasks))) {
423 0 : STAILQ_REMOVE_HEAD(&chan->queued_tasks, link);
424 0 : spdk_accel_task_complete(task, -EINVAL);
425 : }
426 0 : return 0;
427 : }
428 :
429 0 : STAILQ_FOREACH_SAFE(task, &chan->queued_tasks, link, tmp) {
430 0 : rc = _process_single_task(ch, task);
431 0 : if (rc == -EBUSY) {
432 0 : return rc;
433 : }
434 0 : STAILQ_REMOVE_HEAD(&chan->queued_tasks, link);
435 0 : if (rc) {
436 0 : spdk_accel_task_complete(task, rc);
437 0 : }
438 0 : }
439 :
440 0 : return 0;
441 0 : }
442 :
443 : static int
444 0 : idxd_poll(void *arg)
445 : {
446 0 : struct idxd_io_channel *chan = arg;
447 : int count;
448 :
449 0 : count = spdk_idxd_process_events(chan->chan);
450 :
451 : /* Check if there are any pending ops to process if the channel is active */
452 0 : if (!STAILQ_EMPTY(&chan->queued_tasks)) {
453 0 : dsa_submit_queued_tasks(chan);
454 0 : }
455 :
456 0 : return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
457 : }
458 :
459 : static size_t
460 0 : accel_dsa_get_ctx_size(void)
461 : {
462 0 : return sizeof(struct idxd_task);
463 : }
464 :
465 : static bool
466 0 : dsa_supports_opcode(enum spdk_accel_opcode opc)
467 : {
468 0 : if (!g_dsa_initialized) {
469 0 : assert(0);
470 : return false;
471 : }
472 :
473 0 : switch (opc) {
474 : case SPDK_ACCEL_OPC_COPY:
475 : case SPDK_ACCEL_OPC_FILL:
476 : case SPDK_ACCEL_OPC_DUALCAST:
477 : case SPDK_ACCEL_OPC_COMPARE:
478 : case SPDK_ACCEL_OPC_CRC32C:
479 : case SPDK_ACCEL_OPC_COPY_CRC32C:
480 0 : return true;
481 : case SPDK_ACCEL_OPC_DIF_VERIFY:
482 : case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
483 : case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
484 : /* In theory, DIX Generate could work without the iommu, but iommu is required
485 : * for consistency with other DIF operations.
486 : */
487 : case SPDK_ACCEL_OPC_DIX_GENERATE:
488 : case SPDK_ACCEL_OPC_DIX_VERIFY:
489 : /* Supported only if the IOMMU is enabled */
490 0 : return spdk_iommu_is_enabled();
491 : default:
492 0 : return false;
493 : }
494 0 : }
495 :
496 : static int accel_dsa_init(void);
497 : static void accel_dsa_exit(void *ctx);
498 : static void accel_dsa_write_config_json(struct spdk_json_write_ctx *w);
499 :
500 : static struct spdk_accel_module_if g_dsa_module = {
501 : .module_init = accel_dsa_init,
502 : .module_fini = accel_dsa_exit,
503 : .write_config_json = accel_dsa_write_config_json,
504 : .get_ctx_size = accel_dsa_get_ctx_size,
505 : .name = "dsa",
506 : .supports_opcode = dsa_supports_opcode,
507 : .get_io_channel = dsa_get_io_channel,
508 : .submit_tasks = dsa_submit_task
509 : };
510 :
511 : static int
512 0 : dsa_create_cb(void *io_device, void *ctx_buf)
513 : {
514 0 : struct idxd_io_channel *chan = ctx_buf;
515 : struct idxd_device *dsa;
516 : int rc;
517 :
518 0 : dsa = idxd_select_device(chan);
519 0 : if (dsa == NULL) {
520 0 : SPDK_ERRLOG("Failed to get an idxd channel\n");
521 0 : return -EINVAL;
522 : }
523 :
524 0 : chan->dev = dsa;
525 0 : chan->poller = SPDK_POLLER_REGISTER(idxd_poll, chan, 0);
526 0 : STAILQ_INIT(&chan->queued_tasks);
527 0 : chan->num_outstanding = 0;
528 0 : chan->state = IDXD_CHANNEL_ACTIVE;
529 0 : rc = spdk_iobuf_channel_init(&chan->iobuf, "accel_dsa",
530 : ACCEL_DSA_MD_IOBUF_SMALL_CACHE_SIZE,
531 : ACCEL_DSA_MD_IOBUF_LARGE_CACHE_SIZE);
532 0 : if (rc != 0) {
533 0 : SPDK_ERRLOG("Failed to create an iobuf channel in accel dsa\n");
534 0 : return -ENOMEM;
535 : }
536 :
537 0 : return 0;
538 0 : }
539 :
540 : static void
541 0 : dsa_destroy_cb(void *io_device, void *ctx_buf)
542 : {
543 0 : struct idxd_io_channel *chan = ctx_buf;
544 :
545 0 : spdk_iobuf_channel_fini(&chan->iobuf);
546 0 : spdk_poller_unregister(&chan->poller);
547 0 : spdk_idxd_put_channel(chan->chan);
548 0 : }
549 :
550 : static struct spdk_io_channel *
551 0 : dsa_get_io_channel(void)
552 : {
553 0 : return spdk_get_io_channel(&g_dsa_module);
554 : }
555 :
556 : static void
557 0 : attach_cb(void *cb_ctx, struct spdk_idxd_device *idxd)
558 : {
559 : struct idxd_device *dev;
560 :
561 0 : dev = calloc(1, sizeof(*dev));
562 0 : if (dev == NULL) {
563 0 : SPDK_ERRLOG("Failed to allocate device struct\n");
564 0 : return;
565 : }
566 :
567 0 : dev->dsa = idxd;
568 0 : if (g_next_dev == NULL) {
569 0 : g_next_dev = dev;
570 0 : }
571 :
572 0 : TAILQ_INSERT_TAIL(&g_dsa_devices, dev, tailq);
573 0 : g_num_devices++;
574 0 : }
575 :
576 : int
577 0 : accel_dsa_enable_probe(bool kernel_mode)
578 : {
579 : int rc;
580 :
581 0 : if (g_dsa_enable) {
582 0 : return -EALREADY;
583 : }
584 :
585 0 : rc = spdk_idxd_set_config(kernel_mode);
586 0 : if (rc != 0) {
587 0 : return rc;
588 : }
589 :
590 0 : spdk_accel_module_list_add(&g_dsa_module);
591 0 : g_kernel_mode = kernel_mode;
592 0 : g_dsa_enable = true;
593 :
594 0 : return 0;
595 0 : }
596 :
597 : static bool
598 0 : probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
599 : {
600 0 : if (dev->id.device_id == PCI_DEVICE_ID_INTEL_DSA) {
601 0 : return true;
602 : }
603 :
604 0 : return false;
605 0 : }
606 :
607 : static int
608 0 : accel_dsa_init(void)
609 : {
610 : int rc;
611 :
612 0 : if (!g_dsa_enable) {
613 0 : return -EINVAL;
614 : }
615 :
616 0 : if (spdk_idxd_probe(NULL, attach_cb, probe_cb) != 0) {
617 0 : SPDK_ERRLOG("spdk_idxd_probe() failed\n");
618 0 : return -EINVAL;
619 : }
620 :
621 0 : if (TAILQ_EMPTY(&g_dsa_devices)) {
622 0 : return -ENODEV;
623 : }
624 :
625 0 : rc = spdk_iobuf_register_module("accel_dsa");
626 0 : if (rc != 0) {
627 0 : SPDK_ERRLOG("Failed to register accel_dsa iobuf module\n");
628 0 : return rc;
629 : }
630 :
631 0 : g_dsa_initialized = true;
632 0 : spdk_io_device_register(&g_dsa_module, dsa_create_cb, dsa_destroy_cb,
633 : sizeof(struct idxd_io_channel), "dsa_accel_module");
634 0 : return 0;
635 0 : }
636 :
637 : static void
638 0 : accel_dsa_exit(void *ctx)
639 : {
640 : struct idxd_device *dev;
641 :
642 0 : if (g_dsa_initialized) {
643 0 : spdk_io_device_unregister(&g_dsa_module, NULL);
644 0 : g_dsa_initialized = false;
645 0 : }
646 :
647 0 : while (!TAILQ_EMPTY(&g_dsa_devices)) {
648 0 : dev = TAILQ_FIRST(&g_dsa_devices);
649 0 : TAILQ_REMOVE(&g_dsa_devices, dev, tailq);
650 0 : spdk_idxd_detach(dev->dsa);
651 0 : free(dev);
652 : }
653 :
654 0 : spdk_accel_module_finish();
655 0 : }
656 :
657 : static void
658 0 : accel_dsa_write_config_json(struct spdk_json_write_ctx *w)
659 : {
660 0 : if (g_dsa_enable) {
661 0 : spdk_json_write_object_begin(w);
662 0 : spdk_json_write_named_string(w, "method", "dsa_scan_accel_module");
663 0 : spdk_json_write_named_object_begin(w, "params");
664 0 : spdk_json_write_named_bool(w, "config_kernel_mode", g_kernel_mode);
665 0 : spdk_json_write_object_end(w);
666 0 : spdk_json_write_object_end(w);
667 0 : }
668 0 : }
669 :
670 : static void
671 0 : dsa_trace(void)
672 : {
673 0 : spdk_trace_register_description("DSA_OP_SUBMIT", TRACE_ACCEL_DSA_OP_SUBMIT, OWNER_TYPE_NONE,
674 : OBJECT_NONE, 0,
675 : SPDK_TRACE_ARG_TYPE_INT, "count");
676 0 : spdk_trace_register_description("DSA_OP_COMPLETE", TRACE_ACCEL_DSA_OP_COMPLETE, OWNER_TYPE_NONE,
677 : OBJECT_NONE,
678 : 0, SPDK_TRACE_ARG_TYPE_INT, "count");
679 0 : }
680 0 : SPDK_TRACE_REGISTER_FN(dsa_trace, "dsa", TRACE_GROUP_ACCEL_DSA)
681 :
682 0 : SPDK_LOG_REGISTER_COMPONENT(accel_dsa)
|