Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2017 Intel Corporation.
3 : * All rights reserved.
4 : */
5 :
6 : #include "spdk/stdinc.h"
7 :
8 : #include "env_internal.h"
9 : #include "pci_dpdk.h"
10 :
11 : #include <rte_config.h>
12 : #include <rte_memory.h>
13 : #include <rte_eal_memconfig.h>
14 : #include <rte_dev.h>
15 : #include <rte_pci.h>
16 :
17 : #include "spdk_internal/assert.h"
18 :
19 : #include "spdk/assert.h"
20 : #include "spdk/likely.h"
21 : #include "spdk/queue.h"
22 : #include "spdk/util.h"
23 : #include "spdk/memory.h"
24 : #include "spdk/env_dpdk.h"
25 : #include "spdk/log.h"
26 :
27 : #ifdef __linux__
28 : #include <linux/version.h>
29 : #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
30 : #include <linux/vfio.h>
31 : #include <rte_vfio.h>
32 :
33 : struct spdk_vfio_dma_map {
34 : struct vfio_iommu_type1_dma_map map;
35 : TAILQ_ENTRY(spdk_vfio_dma_map) tailq;
36 : };
37 :
38 : struct vfio_cfg {
39 : int fd;
40 : bool enabled;
41 : bool noiommu_enabled;
42 : unsigned device_ref;
43 : TAILQ_HEAD(, spdk_vfio_dma_map) maps;
44 : pthread_mutex_t mutex;
45 : };
46 :
47 : static struct vfio_cfg g_vfio = {
48 : .fd = -1,
49 : .enabled = false,
50 : .noiommu_enabled = false,
51 : .device_ref = 0,
52 : .maps = TAILQ_HEAD_INITIALIZER(g_vfio.maps),
53 : .mutex = PTHREAD_MUTEX_INITIALIZER
54 : };
55 : #endif
56 : #endif
57 :
58 : #if DEBUG
59 : #define DEBUG_PRINT(...) SPDK_ERRLOG(__VA_ARGS__)
60 : #else
61 : #define DEBUG_PRINT(...)
62 : #endif
63 :
64 : #define FN_2MB_TO_4KB(fn) (fn << (SHIFT_2MB - SHIFT_4KB))
65 : #define FN_4KB_TO_2MB(fn) (fn >> (SHIFT_2MB - SHIFT_4KB))
66 :
67 : #define MAP_256TB_IDX(vfn_2mb) ((vfn_2mb) >> (SHIFT_1GB - SHIFT_2MB))
68 : #define MAP_1GB_IDX(vfn_2mb) ((vfn_2mb) & ((1ULL << (SHIFT_1GB - SHIFT_2MB)) - 1))
69 :
70 : /* Page is registered */
71 : #define REG_MAP_REGISTERED (1ULL << 62)
72 :
73 : /* A notification region barrier. The 2MB translation entry that's marked
74 : * with this flag must be unregistered separately. This allows contiguous
75 : * regions to be unregistered in the same chunks they were registered.
76 : */
77 : #define REG_MAP_NOTIFY_START (1ULL << 63)
78 :
79 : /* Translation of a single 2MB page. */
80 : struct map_2mb {
81 : uint64_t translation_2mb;
82 : };
83 :
84 : /* Second-level map table indexed by bits [21..29] of the virtual address.
85 : * Each entry contains the address translation or error for entries that haven't
86 : * been retrieved yet.
87 : */
88 : struct map_1gb {
89 : struct map_2mb map[1ULL << (SHIFT_1GB - SHIFT_2MB)];
90 : };
91 :
92 : /* Top-level map table indexed by bits [30..47] of the virtual address.
93 : * Each entry points to a second-level map table or NULL.
94 : */
95 : struct map_256tb {
96 : struct map_1gb *map[1ULL << (SHIFT_256TB - SHIFT_1GB)];
97 : };
98 :
99 : /* Page-granularity memory address translation */
100 : struct spdk_mem_map {
101 : struct map_256tb map_256tb;
102 : pthread_mutex_t mutex;
103 : uint64_t default_translation;
104 : struct spdk_mem_map_ops ops;
105 : void *cb_ctx;
106 : TAILQ_ENTRY(spdk_mem_map) tailq;
107 : };
108 :
109 : /* Registrations map. The 64 bit translations are bit fields with the
110 : * following layout (starting with the low bits):
111 : * 0 - 61 : reserved
112 : * 62 - 63 : flags
113 : */
114 : static struct spdk_mem_map *g_mem_reg_map;
115 : static TAILQ_HEAD(spdk_mem_map_head, spdk_mem_map) g_spdk_mem_maps =
116 : TAILQ_HEAD_INITIALIZER(g_spdk_mem_maps);
117 : static pthread_mutex_t g_spdk_mem_map_mutex = PTHREAD_MUTEX_INITIALIZER;
118 :
119 : static bool g_legacy_mem;
120 : static bool g_huge_pages = true;
121 :
122 : /*
123 : * Walk the currently registered memory via the main memory registration map
124 : * and call the new map's notify callback for each virtually contiguous region.
125 : */
126 : static int
127 0 : mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_action action)
128 : {
129 : size_t idx_256tb;
130 : uint64_t idx_1gb;
131 0 : uint64_t contig_start = UINT64_MAX;
132 0 : uint64_t contig_end = UINT64_MAX;
133 : struct map_1gb *map_1gb;
134 : int rc;
135 :
136 0 : if (!g_mem_reg_map) {
137 0 : return -EINVAL;
138 : }
139 :
140 : /* Hold the memory registration map mutex so no new registrations can be added while we are looping. */
141 0 : pthread_mutex_lock(&g_mem_reg_map->mutex);
142 :
143 0 : for (idx_256tb = 0;
144 0 : idx_256tb < sizeof(g_mem_reg_map->map_256tb.map) / sizeof(g_mem_reg_map->map_256tb.map[0]);
145 0 : idx_256tb++) {
146 0 : map_1gb = g_mem_reg_map->map_256tb.map[idx_256tb];
147 :
148 0 : if (!map_1gb) {
149 0 : if (contig_start != UINT64_MAX) {
150 : /* End of of a virtually contiguous range */
151 0 : rc = map->ops.notify_cb(map->cb_ctx, map, action,
152 0 : (void *)contig_start,
153 0 : contig_end - contig_start + VALUE_2MB);
154 : /* Don't bother handling unregister failures. It can't be any worse */
155 0 : if (rc != 0 && action == SPDK_MEM_MAP_NOTIFY_REGISTER) {
156 0 : goto err_unregister;
157 : }
158 0 : }
159 0 : contig_start = UINT64_MAX;
160 0 : continue;
161 : }
162 :
163 0 : for (idx_1gb = 0; idx_1gb < sizeof(map_1gb->map) / sizeof(map_1gb->map[0]); idx_1gb++) {
164 0 : if ((map_1gb->map[idx_1gb].translation_2mb & REG_MAP_REGISTERED) &&
165 0 : (contig_start == UINT64_MAX ||
166 0 : (map_1gb->map[idx_1gb].translation_2mb & REG_MAP_NOTIFY_START) == 0)) {
167 : /* Rebuild the virtual address from the indexes */
168 0 : uint64_t vaddr = (idx_256tb << SHIFT_1GB) | (idx_1gb << SHIFT_2MB);
169 :
170 0 : if (contig_start == UINT64_MAX) {
171 0 : contig_start = vaddr;
172 0 : }
173 :
174 0 : contig_end = vaddr;
175 0 : } else {
176 0 : if (contig_start != UINT64_MAX) {
177 : /* End of of a virtually contiguous range */
178 0 : rc = map->ops.notify_cb(map->cb_ctx, map, action,
179 0 : (void *)contig_start,
180 0 : contig_end - contig_start + VALUE_2MB);
181 : /* Don't bother handling unregister failures. It can't be any worse */
182 0 : if (rc != 0 && action == SPDK_MEM_MAP_NOTIFY_REGISTER) {
183 0 : goto err_unregister;
184 : }
185 :
186 : /* This page might be a part of a neighbour region, so process
187 : * it again. The idx_1gb will be incremented immediately.
188 : */
189 0 : idx_1gb--;
190 0 : }
191 0 : contig_start = UINT64_MAX;
192 : }
193 0 : }
194 0 : }
195 :
196 0 : pthread_mutex_unlock(&g_mem_reg_map->mutex);
197 0 : return 0;
198 :
199 : err_unregister:
200 : /* Unwind to the first empty translation so we don't unregister
201 : * a region that just failed to register.
202 : */
203 0 : idx_256tb = MAP_256TB_IDX((contig_start >> SHIFT_2MB) - 1);
204 0 : idx_1gb = MAP_1GB_IDX((contig_start >> SHIFT_2MB) - 1);
205 0 : contig_start = UINT64_MAX;
206 0 : contig_end = UINT64_MAX;
207 :
208 : /* Unregister any memory we managed to register before the failure */
209 0 : for (; idx_256tb < SIZE_MAX; idx_256tb--) {
210 0 : map_1gb = g_mem_reg_map->map_256tb.map[idx_256tb];
211 :
212 0 : if (!map_1gb) {
213 0 : if (contig_end != UINT64_MAX) {
214 : /* End of of a virtually contiguous range */
215 0 : map->ops.notify_cb(map->cb_ctx, map,
216 : SPDK_MEM_MAP_NOTIFY_UNREGISTER,
217 0 : (void *)contig_start,
218 0 : contig_end - contig_start + VALUE_2MB);
219 0 : }
220 0 : contig_end = UINT64_MAX;
221 0 : continue;
222 : }
223 :
224 0 : for (; idx_1gb < UINT64_MAX; idx_1gb--) {
225 : /* Rebuild the virtual address from the indexes */
226 0 : uint64_t vaddr = (idx_256tb << SHIFT_1GB) | (idx_1gb << SHIFT_2MB);
227 0 : if ((map_1gb->map[idx_1gb].translation_2mb & REG_MAP_REGISTERED) &&
228 0 : (contig_end == UINT64_MAX || (map_1gb->map[idx_1gb].translation_2mb & REG_MAP_NOTIFY_START) == 0)) {
229 :
230 0 : if (contig_end == UINT64_MAX) {
231 0 : contig_end = vaddr;
232 0 : }
233 0 : contig_start = vaddr;
234 0 : } else {
235 0 : if (contig_end != UINT64_MAX) {
236 0 : if (map_1gb->map[idx_1gb].translation_2mb & REG_MAP_NOTIFY_START) {
237 0 : contig_start = vaddr;
238 0 : }
239 : /* End of of a virtually contiguous range */
240 0 : map->ops.notify_cb(map->cb_ctx, map,
241 : SPDK_MEM_MAP_NOTIFY_UNREGISTER,
242 0 : (void *)contig_start,
243 0 : contig_end - contig_start + VALUE_2MB);
244 0 : }
245 0 : contig_end = UINT64_MAX;
246 : }
247 0 : }
248 0 : idx_1gb = sizeof(map_1gb->map) / sizeof(map_1gb->map[0]) - 1;
249 0 : }
250 :
251 0 : pthread_mutex_unlock(&g_mem_reg_map->mutex);
252 0 : return rc;
253 0 : }
254 :
255 : struct spdk_mem_map *
256 0 : spdk_mem_map_alloc(uint64_t default_translation, const struct spdk_mem_map_ops *ops, void *cb_ctx)
257 : {
258 : struct spdk_mem_map *map;
259 : int rc;
260 : size_t i;
261 :
262 0 : map = calloc(1, sizeof(*map));
263 0 : if (map == NULL) {
264 0 : return NULL;
265 : }
266 :
267 0 : if (pthread_mutex_init(&map->mutex, NULL)) {
268 0 : free(map);
269 0 : return NULL;
270 : }
271 :
272 0 : map->default_translation = default_translation;
273 0 : map->cb_ctx = cb_ctx;
274 0 : if (ops) {
275 0 : map->ops = *ops;
276 0 : }
277 :
278 0 : if (ops && ops->notify_cb) {
279 0 : pthread_mutex_lock(&g_spdk_mem_map_mutex);
280 0 : rc = mem_map_notify_walk(map, SPDK_MEM_MAP_NOTIFY_REGISTER);
281 0 : if (rc != 0) {
282 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
283 0 : DEBUG_PRINT("Initial mem_map notify failed\n");
284 0 : pthread_mutex_destroy(&map->mutex);
285 0 : for (i = 0; i < sizeof(map->map_256tb.map) / sizeof(map->map_256tb.map[0]); i++) {
286 0 : free(map->map_256tb.map[i]);
287 0 : }
288 0 : free(map);
289 0 : return NULL;
290 : }
291 0 : TAILQ_INSERT_TAIL(&g_spdk_mem_maps, map, tailq);
292 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
293 0 : }
294 :
295 0 : return map;
296 0 : }
297 :
298 : void
299 0 : spdk_mem_map_free(struct spdk_mem_map **pmap)
300 : {
301 : struct spdk_mem_map *map;
302 : size_t i;
303 :
304 0 : if (!pmap) {
305 0 : return;
306 : }
307 :
308 0 : map = *pmap;
309 :
310 0 : if (!map) {
311 0 : return;
312 : }
313 :
314 0 : if (map->ops.notify_cb) {
315 0 : pthread_mutex_lock(&g_spdk_mem_map_mutex);
316 0 : mem_map_notify_walk(map, SPDK_MEM_MAP_NOTIFY_UNREGISTER);
317 0 : TAILQ_REMOVE(&g_spdk_mem_maps, map, tailq);
318 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
319 0 : }
320 :
321 0 : for (i = 0; i < sizeof(map->map_256tb.map) / sizeof(map->map_256tb.map[0]); i++) {
322 0 : free(map->map_256tb.map[i]);
323 0 : }
324 :
325 0 : pthread_mutex_destroy(&map->mutex);
326 :
327 0 : free(map);
328 0 : *pmap = NULL;
329 0 : }
330 :
331 : int
332 0 : spdk_mem_register(void *_vaddr, size_t len)
333 : {
334 : struct spdk_mem_map *map;
335 : int rc;
336 0 : uint64_t vaddr = (uintptr_t)_vaddr;
337 : uint64_t seg_vaddr;
338 : size_t seg_len;
339 : uint64_t reg;
340 :
341 0 : if ((uintptr_t)vaddr & ~MASK_256TB) {
342 0 : DEBUG_PRINT("invalid usermode virtual address %jx\n", vaddr);
343 0 : return -EINVAL;
344 : }
345 :
346 0 : if (((uintptr_t)vaddr & MASK_2MB) || (len & MASK_2MB)) {
347 0 : DEBUG_PRINT("invalid %s parameters, vaddr=%jx len=%ju\n",
348 : __func__, vaddr, len);
349 0 : return -EINVAL;
350 : }
351 :
352 0 : if (len == 0) {
353 0 : return 0;
354 : }
355 :
356 0 : pthread_mutex_lock(&g_spdk_mem_map_mutex);
357 :
358 0 : seg_vaddr = vaddr;
359 0 : seg_len = len;
360 0 : while (seg_len > 0) {
361 0 : reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)seg_vaddr, NULL);
362 0 : if (reg & REG_MAP_REGISTERED) {
363 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
364 0 : return -EBUSY;
365 : }
366 0 : seg_vaddr += VALUE_2MB;
367 0 : seg_len -= VALUE_2MB;
368 : }
369 :
370 0 : seg_vaddr = vaddr;
371 0 : seg_len = 0;
372 0 : while (len > 0) {
373 0 : spdk_mem_map_set_translation(g_mem_reg_map, (uint64_t)vaddr, VALUE_2MB,
374 0 : seg_len == 0 ? REG_MAP_REGISTERED | REG_MAP_NOTIFY_START : REG_MAP_REGISTERED);
375 0 : seg_len += VALUE_2MB;
376 0 : vaddr += VALUE_2MB;
377 0 : len -= VALUE_2MB;
378 : }
379 :
380 0 : TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
381 0 : rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_REGISTER,
382 0 : (void *)seg_vaddr, seg_len);
383 0 : if (rc != 0) {
384 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
385 0 : return rc;
386 : }
387 0 : }
388 :
389 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
390 0 : return 0;
391 0 : }
392 :
393 : int
394 0 : spdk_mem_unregister(void *_vaddr, size_t len)
395 : {
396 : struct spdk_mem_map *map;
397 : int rc;
398 0 : uint64_t vaddr = (uintptr_t)_vaddr;
399 : uint64_t seg_vaddr;
400 : size_t seg_len;
401 : uint64_t reg, newreg;
402 :
403 0 : if ((uintptr_t)vaddr & ~MASK_256TB) {
404 0 : DEBUG_PRINT("invalid usermode virtual address %jx\n", vaddr);
405 0 : return -EINVAL;
406 : }
407 :
408 0 : if (((uintptr_t)vaddr & MASK_2MB) || (len & MASK_2MB)) {
409 0 : DEBUG_PRINT("invalid %s parameters, vaddr=%jx len=%ju\n",
410 : __func__, vaddr, len);
411 0 : return -EINVAL;
412 : }
413 :
414 0 : pthread_mutex_lock(&g_spdk_mem_map_mutex);
415 :
416 : /* The first page must be a start of a region. Also check if it's
417 : * registered to make sure we don't return -ERANGE for non-registered
418 : * regions.
419 : */
420 0 : reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)vaddr, NULL);
421 0 : if ((reg & REG_MAP_REGISTERED) && (reg & REG_MAP_NOTIFY_START) == 0) {
422 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
423 0 : return -ERANGE;
424 : }
425 :
426 0 : seg_vaddr = vaddr;
427 0 : seg_len = len;
428 0 : while (seg_len > 0) {
429 0 : reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)seg_vaddr, NULL);
430 0 : if ((reg & REG_MAP_REGISTERED) == 0) {
431 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
432 0 : return -EINVAL;
433 : }
434 0 : seg_vaddr += VALUE_2MB;
435 0 : seg_len -= VALUE_2MB;
436 : }
437 :
438 0 : newreg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)seg_vaddr, NULL);
439 : /* If the next page is registered, it must be a start of a region as well,
440 : * otherwise we'd be unregistering only a part of a region.
441 : */
442 0 : if ((newreg & REG_MAP_NOTIFY_START) == 0 && (newreg & REG_MAP_REGISTERED)) {
443 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
444 0 : return -ERANGE;
445 : }
446 0 : seg_vaddr = vaddr;
447 0 : seg_len = 0;
448 :
449 0 : while (len > 0) {
450 0 : reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)vaddr, NULL);
451 0 : spdk_mem_map_set_translation(g_mem_reg_map, (uint64_t)vaddr, VALUE_2MB, 0);
452 :
453 0 : if (seg_len > 0 && (reg & REG_MAP_NOTIFY_START)) {
454 0 : TAILQ_FOREACH_REVERSE(map, &g_spdk_mem_maps, spdk_mem_map_head, tailq) {
455 0 : rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_UNREGISTER,
456 0 : (void *)seg_vaddr, seg_len);
457 0 : if (rc != 0) {
458 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
459 0 : return rc;
460 : }
461 0 : }
462 :
463 0 : seg_vaddr = vaddr;
464 0 : seg_len = VALUE_2MB;
465 0 : } else {
466 0 : seg_len += VALUE_2MB;
467 : }
468 :
469 0 : vaddr += VALUE_2MB;
470 0 : len -= VALUE_2MB;
471 : }
472 :
473 0 : if (seg_len > 0) {
474 0 : TAILQ_FOREACH_REVERSE(map, &g_spdk_mem_maps, spdk_mem_map_head, tailq) {
475 0 : rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_UNREGISTER,
476 0 : (void *)seg_vaddr, seg_len);
477 0 : if (rc != 0) {
478 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
479 0 : return rc;
480 : }
481 0 : }
482 0 : }
483 :
484 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
485 0 : return 0;
486 0 : }
487 :
488 : int
489 0 : spdk_mem_reserve(void *vaddr, size_t len)
490 : {
491 : struct spdk_mem_map *map;
492 : void *seg_vaddr;
493 : size_t seg_len;
494 : uint64_t reg;
495 :
496 0 : if ((uintptr_t)vaddr & ~MASK_256TB) {
497 0 : DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
498 0 : return -EINVAL;
499 : }
500 :
501 0 : if (((uintptr_t)vaddr & MASK_2MB) || (len & MASK_2MB)) {
502 0 : DEBUG_PRINT("invalid %s parameters, vaddr=%p len=%ju\n",
503 : __func__, vaddr, len);
504 0 : return -EINVAL;
505 : }
506 :
507 0 : if (len == 0) {
508 0 : return 0;
509 : }
510 :
511 0 : pthread_mutex_lock(&g_spdk_mem_map_mutex);
512 :
513 : /* Check if any part of this range is already registered */
514 0 : seg_vaddr = vaddr;
515 0 : seg_len = len;
516 0 : while (seg_len > 0) {
517 0 : reg = spdk_mem_map_translate(g_mem_reg_map, (uint64_t)seg_vaddr, NULL);
518 0 : if (reg & REG_MAP_REGISTERED) {
519 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
520 0 : return -EBUSY;
521 : }
522 0 : seg_vaddr += VALUE_2MB;
523 0 : seg_len -= VALUE_2MB;
524 : }
525 :
526 : /* Simply set the translation to the memory map's default. This allocates the space in the
527 : * map but does not provide a valid translation. */
528 0 : spdk_mem_map_set_translation(g_mem_reg_map, (uint64_t)vaddr, len,
529 0 : g_mem_reg_map->default_translation);
530 :
531 0 : TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
532 0 : spdk_mem_map_set_translation(map, (uint64_t)vaddr, len, map->default_translation);
533 0 : }
534 :
535 0 : pthread_mutex_unlock(&g_spdk_mem_map_mutex);
536 0 : return 0;
537 0 : }
538 :
539 : static struct map_1gb *
540 0 : mem_map_get_map_1gb(struct spdk_mem_map *map, uint64_t vfn_2mb)
541 : {
542 : struct map_1gb *map_1gb;
543 0 : uint64_t idx_256tb = MAP_256TB_IDX(vfn_2mb);
544 : size_t i;
545 :
546 0 : if (spdk_unlikely(idx_256tb >= SPDK_COUNTOF(map->map_256tb.map))) {
547 0 : return NULL;
548 : }
549 :
550 0 : map_1gb = map->map_256tb.map[idx_256tb];
551 :
552 0 : if (!map_1gb) {
553 0 : pthread_mutex_lock(&map->mutex);
554 :
555 : /* Recheck to make sure nobody else got the mutex first. */
556 0 : map_1gb = map->map_256tb.map[idx_256tb];
557 0 : if (!map_1gb) {
558 0 : map_1gb = malloc(sizeof(struct map_1gb));
559 0 : if (map_1gb) {
560 : /* initialize all entries to default translation */
561 0 : for (i = 0; i < SPDK_COUNTOF(map_1gb->map); i++) {
562 0 : map_1gb->map[i].translation_2mb = map->default_translation;
563 0 : }
564 0 : map->map_256tb.map[idx_256tb] = map_1gb;
565 0 : }
566 0 : }
567 :
568 0 : pthread_mutex_unlock(&map->mutex);
569 :
570 0 : if (!map_1gb) {
571 0 : DEBUG_PRINT("allocation failed\n");
572 0 : return NULL;
573 : }
574 0 : }
575 :
576 0 : return map_1gb;
577 0 : }
578 :
579 : int
580 0 : spdk_mem_map_set_translation(struct spdk_mem_map *map, uint64_t vaddr, uint64_t size,
581 : uint64_t translation)
582 : {
583 : uint64_t vfn_2mb;
584 : struct map_1gb *map_1gb;
585 : uint64_t idx_1gb;
586 : struct map_2mb *map_2mb;
587 :
588 0 : if ((uintptr_t)vaddr & ~MASK_256TB) {
589 0 : DEBUG_PRINT("invalid usermode virtual address %" PRIu64 "\n", vaddr);
590 0 : return -EINVAL;
591 : }
592 :
593 : /* For now, only 2 MB-aligned registrations are supported */
594 0 : if (((uintptr_t)vaddr & MASK_2MB) || (size & MASK_2MB)) {
595 0 : DEBUG_PRINT("invalid %s parameters, vaddr=%" PRIu64 " len=%" PRIu64 "\n",
596 : __func__, vaddr, size);
597 0 : return -EINVAL;
598 : }
599 :
600 0 : vfn_2mb = vaddr >> SHIFT_2MB;
601 :
602 0 : while (size) {
603 0 : map_1gb = mem_map_get_map_1gb(map, vfn_2mb);
604 0 : if (!map_1gb) {
605 0 : DEBUG_PRINT("could not get %p map\n", (void *)vaddr);
606 0 : return -ENOMEM;
607 : }
608 :
609 0 : idx_1gb = MAP_1GB_IDX(vfn_2mb);
610 0 : map_2mb = &map_1gb->map[idx_1gb];
611 0 : map_2mb->translation_2mb = translation;
612 :
613 0 : size -= VALUE_2MB;
614 0 : vfn_2mb++;
615 : }
616 :
617 0 : return 0;
618 0 : }
619 :
620 : int
621 0 : spdk_mem_map_clear_translation(struct spdk_mem_map *map, uint64_t vaddr, uint64_t size)
622 : {
623 0 : return spdk_mem_map_set_translation(map, vaddr, size, map->default_translation);
624 : }
625 :
626 : inline uint64_t
627 0 : spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
628 : {
629 : const struct map_1gb *map_1gb;
630 : const struct map_2mb *map_2mb;
631 : uint64_t idx_256tb;
632 : uint64_t idx_1gb;
633 : uint64_t vfn_2mb;
634 : uint64_t cur_size;
635 : uint64_t prev_translation;
636 : uint64_t orig_translation;
637 :
638 0 : if (spdk_unlikely(vaddr & ~MASK_256TB)) {
639 0 : DEBUG_PRINT("invalid usermode virtual address %p\n", (void *)vaddr);
640 0 : return map->default_translation;
641 : }
642 :
643 0 : vfn_2mb = vaddr >> SHIFT_2MB;
644 0 : idx_256tb = MAP_256TB_IDX(vfn_2mb);
645 0 : idx_1gb = MAP_1GB_IDX(vfn_2mb);
646 :
647 0 : map_1gb = map->map_256tb.map[idx_256tb];
648 0 : if (spdk_unlikely(!map_1gb)) {
649 0 : return map->default_translation;
650 : }
651 :
652 0 : cur_size = VALUE_2MB - _2MB_OFFSET(vaddr);
653 0 : map_2mb = &map_1gb->map[idx_1gb];
654 0 : if (size == NULL || map->ops.are_contiguous == NULL ||
655 0 : map_2mb->translation_2mb == map->default_translation) {
656 0 : if (size != NULL) {
657 0 : *size = spdk_min(*size, cur_size);
658 0 : }
659 0 : return map_2mb->translation_2mb;
660 : }
661 :
662 0 : orig_translation = map_2mb->translation_2mb;
663 0 : prev_translation = orig_translation;
664 0 : while (cur_size < *size) {
665 0 : vfn_2mb++;
666 0 : idx_256tb = MAP_256TB_IDX(vfn_2mb);
667 0 : idx_1gb = MAP_1GB_IDX(vfn_2mb);
668 :
669 0 : map_1gb = map->map_256tb.map[idx_256tb];
670 0 : if (spdk_unlikely(!map_1gb)) {
671 0 : break;
672 : }
673 :
674 0 : map_2mb = &map_1gb->map[idx_1gb];
675 0 : if (!map->ops.are_contiguous(prev_translation, map_2mb->translation_2mb)) {
676 0 : break;
677 : }
678 :
679 0 : cur_size += VALUE_2MB;
680 0 : prev_translation = map_2mb->translation_2mb;
681 : }
682 :
683 0 : *size = spdk_min(*size, cur_size);
684 0 : return orig_translation;
685 0 : }
686 :
687 : static void
688 0 : memory_hotplug_cb(enum rte_mem_event event_type,
689 : const void *addr, size_t len, void *arg)
690 : {
691 0 : if (event_type == RTE_MEM_EVENT_ALLOC) {
692 0 : spdk_mem_register((void *)addr, len);
693 :
694 0 : if (!spdk_env_dpdk_external_init()) {
695 0 : return;
696 : }
697 :
698 : /* When the user initialized DPDK separately, we can't
699 : * be sure that --match-allocations RTE flag was specified.
700 : * Without this flag, DPDK can free memory in different units
701 : * than it was allocated. It doesn't work with things like RDMA MRs.
702 : *
703 : * For such cases, we mark segments so they aren't freed.
704 : */
705 0 : while (len > 0) {
706 : struct rte_memseg *seg;
707 :
708 0 : seg = rte_mem_virt2memseg(addr, NULL);
709 0 : assert(seg != NULL);
710 0 : seg->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
711 0 : addr = (void *)((uintptr_t)addr + seg->hugepage_sz);
712 0 : len -= seg->hugepage_sz;
713 : }
714 0 : } else if (event_type == RTE_MEM_EVENT_FREE) {
715 0 : spdk_mem_unregister((void *)addr, len);
716 0 : }
717 0 : }
718 :
719 : static int
720 0 : memory_iter_cb(const struct rte_memseg_list *msl,
721 : const struct rte_memseg *ms, size_t len, void *arg)
722 : {
723 0 : return spdk_mem_register(ms->addr, len);
724 : }
725 :
726 : static bool g_mem_event_cb_registered = false;
727 :
728 : static int
729 0 : mem_map_mem_event_callback_register(void)
730 : {
731 : int rc;
732 :
733 0 : rc = rte_mem_event_callback_register("spdk", memory_hotplug_cb, NULL);
734 0 : if (rc != 0) {
735 0 : DEBUG_PRINT("memory event callback registration failed, rc = %d\n", rc);
736 0 : return -errno;
737 : }
738 :
739 0 : g_mem_event_cb_registered = true;
740 0 : return 0;
741 0 : }
742 :
743 : static void
744 0 : mem_map_mem_event_callback_unregister(void)
745 : {
746 0 : if (g_mem_event_cb_registered) {
747 0 : g_mem_event_cb_registered = false;
748 0 : rte_mem_event_callback_unregister("spdk", NULL);
749 0 : }
750 0 : }
751 :
752 : int
753 0 : mem_map_init(bool legacy_mem)
754 : {
755 : int rc;
756 :
757 0 : g_legacy_mem = legacy_mem;
758 :
759 0 : g_mem_reg_map = spdk_mem_map_alloc(0, NULL, NULL);
760 0 : if (g_mem_reg_map == NULL) {
761 0 : DEBUG_PRINT("memory registration map allocation failed\n");
762 0 : return -ENOMEM;
763 : }
764 :
765 0 : if (!g_huge_pages) {
766 0 : return 0;
767 : }
768 :
769 0 : if (!g_legacy_mem) {
770 : /**
771 : * To prevent DPDK complaining, only register the callback when
772 : * we are not in legacy mem mode.
773 : */
774 0 : rc = mem_map_mem_event_callback_register();
775 0 : if (rc != 0) {
776 0 : DEBUG_PRINT("memory event callback registration failed, rc = %d\n", rc);
777 0 : goto err_free_reg_map;
778 : }
779 0 : }
780 :
781 : /*
782 : * Walk all DPDK memory segments and register them
783 : * with the main memory map
784 : */
785 0 : rc = rte_memseg_contig_walk(memory_iter_cb, NULL);
786 0 : if (rc != 0) {
787 0 : DEBUG_PRINT("memory segments walking failed, rc = %d\n", rc);
788 0 : goto err_unregister_mem_cb;
789 : }
790 :
791 0 : return 0;
792 :
793 : err_unregister_mem_cb:
794 0 : mem_map_mem_event_callback_unregister();
795 : err_free_reg_map:
796 0 : spdk_mem_map_free(&g_mem_reg_map);
797 0 : return rc;
798 0 : }
799 :
800 : void
801 0 : mem_map_fini(void)
802 : {
803 0 : mem_map_mem_event_callback_unregister();
804 0 : spdk_mem_map_free(&g_mem_reg_map);
805 0 : }
806 :
807 : bool
808 0 : spdk_iommu_is_enabled(void)
809 : {
810 : #if VFIO_ENABLED
811 : return g_vfio.enabled && !g_vfio.noiommu_enabled;
812 : #else
813 0 : return false;
814 : #endif
815 : }
816 :
817 : struct spdk_vtophys_pci_device {
818 : struct rte_pci_device *pci_device;
819 : TAILQ_ENTRY(spdk_vtophys_pci_device) tailq;
820 : };
821 :
822 : static pthread_mutex_t g_vtophys_pci_devices_mutex = PTHREAD_MUTEX_INITIALIZER;
823 : static TAILQ_HEAD(, spdk_vtophys_pci_device) g_vtophys_pci_devices =
824 : TAILQ_HEAD_INITIALIZER(g_vtophys_pci_devices);
825 :
826 : static struct spdk_mem_map *g_vtophys_map;
827 : static struct spdk_mem_map *g_phys_ref_map;
828 : static struct spdk_mem_map *g_numa_map;
829 :
830 : #if VFIO_ENABLED
831 : static int
832 : _vfio_iommu_map_dma(uint64_t vaddr, uint64_t iova, uint64_t size)
833 : {
834 : struct spdk_vfio_dma_map *dma_map;
835 : int ret;
836 :
837 : dma_map = calloc(1, sizeof(*dma_map));
838 : if (dma_map == NULL) {
839 : return -ENOMEM;
840 : }
841 :
842 : dma_map->map.argsz = sizeof(dma_map->map);
843 : dma_map->map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
844 : dma_map->map.vaddr = vaddr;
845 : dma_map->map.iova = iova;
846 : dma_map->map.size = size;
847 :
848 : if (g_vfio.device_ref == 0) {
849 : /* VFIO requires at least one device (IOMMU group) to be added to
850 : * a VFIO container before it is possible to perform any IOMMU
851 : * operations on that container. This memory will be mapped once
852 : * the first device (IOMMU group) is hotplugged.
853 : *
854 : * Since the vfio container is managed internally by DPDK, it is
855 : * also possible that some device is already in that container, but
856 : * it's not managed by SPDK - e.g. an NIC attached internally
857 : * inside DPDK. We could map the memory straight away in such
858 : * scenario, but there's no need to do it. DPDK devices clearly
859 : * don't need our mappings and hence we defer the mapping
860 : * unconditionally until the first SPDK-managed device is
861 : * hotplugged.
862 : */
863 : goto out_insert;
864 : }
865 :
866 : ret = ioctl(g_vfio.fd, VFIO_IOMMU_MAP_DMA, &dma_map->map);
867 : if (ret) {
868 : /* There are cases the vfio container doesn't have IOMMU group, it's safe for this case */
869 : SPDK_NOTICELOG("Cannot set up DMA mapping, error %d, ignored\n", errno);
870 : }
871 :
872 : out_insert:
873 : TAILQ_INSERT_TAIL(&g_vfio.maps, dma_map, tailq);
874 : return 0;
875 : }
876 :
877 :
878 : static int
879 : vtophys_iommu_map_dma(uint64_t vaddr, uint64_t iova, uint64_t size)
880 : {
881 : uint64_t refcount;
882 : int ret;
883 :
884 : refcount = spdk_mem_map_translate(g_phys_ref_map, iova, NULL);
885 : assert(refcount < UINT64_MAX);
886 : if (refcount > 0) {
887 : spdk_mem_map_set_translation(g_phys_ref_map, iova, size, refcount + 1);
888 : return 0;
889 : }
890 :
891 : pthread_mutex_lock(&g_vfio.mutex);
892 : ret = _vfio_iommu_map_dma(vaddr, iova, size);
893 : pthread_mutex_unlock(&g_vfio.mutex);
894 : if (ret) {
895 : return ret;
896 : }
897 :
898 : spdk_mem_map_set_translation(g_phys_ref_map, iova, size, refcount + 1);
899 : return 0;
900 : }
901 :
902 : int
903 : vtophys_iommu_map_dma_bar(uint64_t vaddr, uint64_t iova, uint64_t size)
904 : {
905 : int ret;
906 :
907 : pthread_mutex_lock(&g_vfio.mutex);
908 : ret = _vfio_iommu_map_dma(vaddr, iova, size);
909 : pthread_mutex_unlock(&g_vfio.mutex);
910 :
911 : return ret;
912 : }
913 :
914 : static int
915 : _vfio_iommu_unmap_dma(struct spdk_vfio_dma_map *dma_map)
916 : {
917 : struct vfio_iommu_type1_dma_unmap unmap = {};
918 : int ret;
919 :
920 : if (g_vfio.device_ref == 0) {
921 : /* Memory is not mapped anymore, just remove it's references */
922 : goto out_remove;
923 : }
924 :
925 : unmap.argsz = sizeof(unmap);
926 : unmap.flags = 0;
927 : unmap.iova = dma_map->map.iova;
928 : unmap.size = dma_map->map.size;
929 : ret = ioctl(g_vfio.fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
930 : if (ret) {
931 : SPDK_NOTICELOG("Cannot clear DMA mapping, error %d, ignored\n", errno);
932 : }
933 :
934 : out_remove:
935 : TAILQ_REMOVE(&g_vfio.maps, dma_map, tailq);
936 : free(dma_map);
937 : return 0;
938 : }
939 :
940 : static int
941 : vtophys_iommu_unmap_dma(uint64_t iova, uint64_t size)
942 : {
943 : struct spdk_vfio_dma_map *dma_map;
944 : uint64_t refcount;
945 : int ret;
946 :
947 : pthread_mutex_lock(&g_vfio.mutex);
948 : TAILQ_FOREACH(dma_map, &g_vfio.maps, tailq) {
949 : if (dma_map->map.iova == iova) {
950 : break;
951 : }
952 : }
953 :
954 : if (dma_map == NULL) {
955 : DEBUG_PRINT("Cannot clear DMA mapping for IOVA %"PRIx64" - it's not mapped\n", iova);
956 : pthread_mutex_unlock(&g_vfio.mutex);
957 : return -ENXIO;
958 : }
959 :
960 : refcount = spdk_mem_map_translate(g_phys_ref_map, iova, NULL);
961 : assert(refcount < UINT64_MAX);
962 : if (refcount > 0) {
963 : spdk_mem_map_set_translation(g_phys_ref_map, iova, size, refcount - 1);
964 : }
965 :
966 : /* We still have outstanding references, don't clear it. */
967 : if (refcount > 1) {
968 : pthread_mutex_unlock(&g_vfio.mutex);
969 : return 0;
970 : }
971 :
972 : /** don't support partial or multiple-page unmap for now */
973 : assert(dma_map->map.size == size);
974 :
975 : ret = _vfio_iommu_unmap_dma(dma_map);
976 : pthread_mutex_unlock(&g_vfio.mutex);
977 :
978 : return ret;
979 : }
980 :
981 : int
982 : vtophys_iommu_unmap_dma_bar(uint64_t vaddr)
983 : {
984 : struct spdk_vfio_dma_map *dma_map;
985 : int ret;
986 :
987 : pthread_mutex_lock(&g_vfio.mutex);
988 : TAILQ_FOREACH(dma_map, &g_vfio.maps, tailq) {
989 : if (dma_map->map.vaddr == vaddr) {
990 : break;
991 : }
992 : }
993 :
994 : if (dma_map == NULL) {
995 : DEBUG_PRINT("Cannot clear DMA mapping for address %"PRIx64" - it's not mapped\n", vaddr);
996 : pthread_mutex_unlock(&g_vfio.mutex);
997 : return -ENXIO;
998 : }
999 :
1000 : ret = _vfio_iommu_unmap_dma(dma_map);
1001 : pthread_mutex_unlock(&g_vfio.mutex);
1002 : return ret;
1003 : }
1004 : #endif
1005 :
1006 : static uint64_t
1007 0 : vtophys_get_paddr_memseg(uint64_t vaddr)
1008 : {
1009 : uintptr_t paddr;
1010 : struct rte_memseg *seg;
1011 :
1012 0 : seg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
1013 0 : if (seg != NULL) {
1014 0 : paddr = seg->iova;
1015 0 : if (paddr == RTE_BAD_IOVA) {
1016 0 : return SPDK_VTOPHYS_ERROR;
1017 : }
1018 0 : paddr += (vaddr - (uintptr_t)seg->addr);
1019 0 : return paddr;
1020 : }
1021 :
1022 0 : return SPDK_VTOPHYS_ERROR;
1023 0 : }
1024 :
1025 : /* Try to get the paddr from /proc/self/pagemap */
1026 : static uint64_t
1027 0 : vtophys_get_paddr_pagemap(uint64_t vaddr)
1028 : {
1029 : uintptr_t paddr;
1030 :
1031 : /* Silence static analyzers */
1032 0 : assert(vaddr != 0);
1033 0 : paddr = rte_mem_virt2iova((void *)vaddr);
1034 0 : if (paddr == RTE_BAD_IOVA) {
1035 : /*
1036 : * The vaddr may be valid but doesn't have a backing page
1037 : * assigned yet. Touch the page to ensure a backing page
1038 : * gets assigned, then try to translate again.
1039 : */
1040 0 : rte_atomic64_read((rte_atomic64_t *)vaddr);
1041 0 : paddr = rte_mem_virt2iova((void *)vaddr);
1042 0 : }
1043 0 : if (paddr == RTE_BAD_IOVA) {
1044 : /* Unable to get to the physical address. */
1045 0 : return SPDK_VTOPHYS_ERROR;
1046 : }
1047 :
1048 0 : return paddr;
1049 0 : }
1050 :
1051 : static uint64_t
1052 0 : pci_device_vtophys(struct rte_pci_device *dev, uint64_t vaddr, size_t len)
1053 : {
1054 : struct rte_mem_resource *res;
1055 : uint64_t paddr;
1056 : unsigned r;
1057 :
1058 0 : for (r = 0; r < PCI_MAX_RESOURCE; r++) {
1059 0 : res = dpdk_pci_device_get_mem_resource(dev, r);
1060 :
1061 0 : if (res->phys_addr == 0 || vaddr < (uint64_t)res->addr ||
1062 0 : (vaddr + len) >= (uint64_t)res->addr + res->len) {
1063 0 : continue;
1064 : }
1065 :
1066 : #if VFIO_ENABLED
1067 : if (spdk_iommu_is_enabled() && rte_eal_iova_mode() == RTE_IOVA_VA) {
1068 : /*
1069 : * The IOMMU is on and we're using IOVA == VA. The BAR was
1070 : * automatically registered when it was mapped, so just return
1071 : * the virtual address here.
1072 : */
1073 : return vaddr;
1074 : }
1075 : #endif
1076 0 : paddr = res->phys_addr + (vaddr - (uint64_t)res->addr);
1077 0 : return paddr;
1078 : }
1079 :
1080 0 : return SPDK_VTOPHYS_ERROR;
1081 0 : }
1082 :
1083 : /* Try to get the paddr from pci devices */
1084 : static uint64_t
1085 0 : vtophys_get_paddr_pci(uint64_t vaddr, size_t len)
1086 : {
1087 : struct spdk_vtophys_pci_device *vtophys_dev;
1088 : uintptr_t paddr;
1089 : struct rte_pci_device *dev;
1090 :
1091 0 : pthread_mutex_lock(&g_vtophys_pci_devices_mutex);
1092 0 : TAILQ_FOREACH(vtophys_dev, &g_vtophys_pci_devices, tailq) {
1093 0 : dev = vtophys_dev->pci_device;
1094 0 : paddr = pci_device_vtophys(dev, vaddr, len);
1095 0 : if (paddr != SPDK_VTOPHYS_ERROR) {
1096 0 : pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
1097 0 : return paddr;
1098 : }
1099 0 : }
1100 0 : pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
1101 :
1102 0 : return SPDK_VTOPHYS_ERROR;
1103 0 : }
1104 :
1105 : static int
1106 0 : vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
1107 : enum spdk_mem_map_notify_action action,
1108 : void *vaddr, size_t len)
1109 : {
1110 0 : int rc = 0;
1111 : uint64_t paddr;
1112 :
1113 0 : if ((uintptr_t)vaddr & ~MASK_256TB) {
1114 0 : DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
1115 0 : return -EINVAL;
1116 : }
1117 :
1118 0 : if (((uintptr_t)vaddr & MASK_2MB) || (len & MASK_2MB)) {
1119 0 : DEBUG_PRINT("invalid parameters, vaddr=%p len=%ju\n",
1120 : vaddr, len);
1121 0 : return -EINVAL;
1122 : }
1123 :
1124 : /* Get the physical address from the DPDK memsegs */
1125 0 : paddr = vtophys_get_paddr_memseg((uint64_t)vaddr);
1126 :
1127 0 : switch (action) {
1128 : case SPDK_MEM_MAP_NOTIFY_REGISTER:
1129 0 : if (paddr == SPDK_VTOPHYS_ERROR) {
1130 : /* This is not an address that DPDK is managing. */
1131 :
1132 : /* Check if this is a PCI BAR. They need special handling */
1133 0 : paddr = vtophys_get_paddr_pci((uint64_t)vaddr, len);
1134 0 : if (paddr != SPDK_VTOPHYS_ERROR) {
1135 : /* Get paddr for each 2MB chunk in this address range */
1136 0 : while (len > 0) {
1137 0 : paddr = vtophys_get_paddr_pci((uint64_t)vaddr, VALUE_2MB);
1138 0 : if (paddr == SPDK_VTOPHYS_ERROR) {
1139 0 : DEBUG_PRINT("could not get phys addr for %p\n", vaddr);
1140 0 : return -EFAULT;
1141 : }
1142 :
1143 0 : rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, VALUE_2MB, paddr);
1144 0 : if (rc != 0) {
1145 0 : return rc;
1146 : }
1147 :
1148 0 : vaddr += VALUE_2MB;
1149 0 : len -= VALUE_2MB;
1150 : }
1151 :
1152 0 : return 0;
1153 : }
1154 :
1155 : #if VFIO_ENABLED
1156 : enum rte_iova_mode iova_mode;
1157 :
1158 : iova_mode = rte_eal_iova_mode();
1159 :
1160 : if (spdk_iommu_is_enabled() && iova_mode == RTE_IOVA_VA) {
1161 : /* We'll use the virtual address as the iova to match DPDK. */
1162 : paddr = (uint64_t)vaddr;
1163 : rc = vtophys_iommu_map_dma((uint64_t)vaddr, paddr, len);
1164 : if (rc) {
1165 : return -EFAULT;
1166 : }
1167 : while (len > 0) {
1168 : rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, VALUE_2MB, paddr);
1169 : if (rc != 0) {
1170 : return rc;
1171 : }
1172 : vaddr += VALUE_2MB;
1173 : paddr += VALUE_2MB;
1174 : len -= VALUE_2MB;
1175 : }
1176 : } else
1177 : #endif
1178 : {
1179 : /* Get the physical address from /proc/self/pagemap. */
1180 0 : paddr = vtophys_get_paddr_pagemap((uint64_t)vaddr);
1181 0 : if (paddr == SPDK_VTOPHYS_ERROR) {
1182 0 : DEBUG_PRINT("could not get phys addr for %p\n", vaddr);
1183 0 : return -EFAULT;
1184 : }
1185 :
1186 : /* Get paddr for each 2MB chunk in this address range */
1187 0 : while (len > 0) {
1188 : /* Get the physical address from /proc/self/pagemap. */
1189 0 : paddr = vtophys_get_paddr_pagemap((uint64_t)vaddr);
1190 :
1191 0 : if (paddr == SPDK_VTOPHYS_ERROR) {
1192 0 : DEBUG_PRINT("could not get phys addr for %p\n", vaddr);
1193 0 : return -EFAULT;
1194 : }
1195 :
1196 0 : if (paddr & MASK_2MB) {
1197 0 : DEBUG_PRINT("invalid paddr 0x%" PRIx64 " - must be 2MB aligned\n", paddr);
1198 0 : return -EINVAL;
1199 : }
1200 : #if VFIO_ENABLED
1201 : /* If the IOMMU is on, but DPDK is using iova-mode=pa, we want to register this memory
1202 : * with the IOMMU using the physical address to match. */
1203 : if (spdk_iommu_is_enabled()) {
1204 : rc = vtophys_iommu_map_dma((uint64_t)vaddr, paddr, VALUE_2MB);
1205 : if (rc) {
1206 : DEBUG_PRINT("Unable to assign vaddr %p to paddr 0x%" PRIx64 "\n", vaddr, paddr);
1207 : return -EFAULT;
1208 : }
1209 : }
1210 : #endif
1211 :
1212 0 : rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, VALUE_2MB, paddr);
1213 0 : if (rc != 0) {
1214 0 : return rc;
1215 : }
1216 :
1217 0 : vaddr += VALUE_2MB;
1218 0 : len -= VALUE_2MB;
1219 : }
1220 : }
1221 0 : } else {
1222 : /* This is an address managed by DPDK. Just setup the translations. */
1223 0 : while (len > 0) {
1224 0 : paddr = vtophys_get_paddr_memseg((uint64_t)vaddr);
1225 0 : if (paddr == SPDK_VTOPHYS_ERROR) {
1226 0 : DEBUG_PRINT("could not get phys addr for %p\n", vaddr);
1227 0 : return -EFAULT;
1228 : }
1229 :
1230 0 : rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, VALUE_2MB, paddr);
1231 0 : if (rc != 0) {
1232 0 : return rc;
1233 : }
1234 :
1235 0 : vaddr += VALUE_2MB;
1236 0 : len -= VALUE_2MB;
1237 : }
1238 : }
1239 :
1240 0 : break;
1241 : case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
1242 : #if VFIO_ENABLED
1243 : if (paddr == SPDK_VTOPHYS_ERROR) {
1244 : /*
1245 : * This is not an address that DPDK is managing.
1246 : */
1247 :
1248 : /* Check if this is a PCI BAR. They need special handling */
1249 : paddr = vtophys_get_paddr_pci((uint64_t)vaddr, len);
1250 : if (paddr != SPDK_VTOPHYS_ERROR) {
1251 : /* Get paddr for each 2MB chunk in this address range */
1252 : while (len > 0) {
1253 : paddr = vtophys_get_paddr_pci((uint64_t)vaddr, VALUE_2MB);
1254 : if (paddr == SPDK_VTOPHYS_ERROR) {
1255 : DEBUG_PRINT("could not get phys addr for %p\n", vaddr);
1256 : return -EFAULT;
1257 : }
1258 :
1259 : rc = spdk_mem_map_clear_translation(map, (uint64_t)vaddr, VALUE_2MB);
1260 : if (rc != 0) {
1261 : return rc;
1262 : }
1263 :
1264 : vaddr += VALUE_2MB;
1265 : len -= VALUE_2MB;
1266 : }
1267 :
1268 : return 0;
1269 : }
1270 :
1271 : /* If vfio is enabled,
1272 : * we need to unmap the range from the IOMMU
1273 : */
1274 : if (spdk_iommu_is_enabled()) {
1275 : uint64_t buffer_len = len;
1276 : uint8_t *va = vaddr;
1277 : enum rte_iova_mode iova_mode;
1278 :
1279 : iova_mode = rte_eal_iova_mode();
1280 : /*
1281 : * In virtual address mode, the region is contiguous and can be done in
1282 : * one unmap.
1283 : */
1284 : if (iova_mode == RTE_IOVA_VA) {
1285 : paddr = spdk_mem_map_translate(map, (uint64_t)va, &buffer_len);
1286 : if (buffer_len != len || paddr != (uintptr_t)va) {
1287 : DEBUG_PRINT("Unmapping %p with length %lu failed because "
1288 : "translation had address 0x%" PRIx64 " and length %lu\n",
1289 : va, len, paddr, buffer_len);
1290 : return -EINVAL;
1291 : }
1292 : rc = vtophys_iommu_unmap_dma(paddr, len);
1293 : if (rc) {
1294 : DEBUG_PRINT("Failed to iommu unmap paddr 0x%" PRIx64 "\n", paddr);
1295 : return -EFAULT;
1296 : }
1297 : } else if (iova_mode == RTE_IOVA_PA) {
1298 : /* Get paddr for each 2MB chunk in this address range */
1299 : while (buffer_len > 0) {
1300 : paddr = spdk_mem_map_translate(map, (uint64_t)va, NULL);
1301 :
1302 : if (paddr == SPDK_VTOPHYS_ERROR || buffer_len < VALUE_2MB) {
1303 : DEBUG_PRINT("could not get phys addr for %p\n", va);
1304 : return -EFAULT;
1305 : }
1306 :
1307 : rc = vtophys_iommu_unmap_dma(paddr, VALUE_2MB);
1308 : if (rc) {
1309 : DEBUG_PRINT("Failed to iommu unmap paddr 0x%" PRIx64 "\n", paddr);
1310 : return -EFAULT;
1311 : }
1312 :
1313 : va += VALUE_2MB;
1314 : buffer_len -= VALUE_2MB;
1315 : }
1316 : }
1317 : }
1318 : }
1319 : #endif
1320 0 : while (len > 0) {
1321 0 : rc = spdk_mem_map_clear_translation(map, (uint64_t)vaddr, VALUE_2MB);
1322 0 : if (rc != 0) {
1323 0 : return rc;
1324 : }
1325 :
1326 0 : vaddr += VALUE_2MB;
1327 0 : len -= VALUE_2MB;
1328 : }
1329 :
1330 0 : break;
1331 : default:
1332 0 : SPDK_UNREACHABLE();
1333 : }
1334 :
1335 0 : return rc;
1336 0 : }
1337 :
1338 : static int
1339 0 : numa_notify(void *cb_ctx, struct spdk_mem_map *map,
1340 : enum spdk_mem_map_notify_action action,
1341 : void *vaddr, size_t len)
1342 : {
1343 : struct rte_memseg *seg;
1344 :
1345 : /* We always return 0 from here, even if we aren't able to get a
1346 : * memseg for the address. This can happen in non-DPDK memory
1347 : * registration paths, for example vhost or vfio-user. That is OK,
1348 : * spdk_mem_get_numa_id() just returns SPDK_ENV_NUMA_ID_ANY for
1349 : * that kind of memory. If we return an error here, the
1350 : * spdk_mem_register() from vhost or vfio-user would fail which is
1351 : * not what we want.
1352 : */
1353 0 : seg = rte_mem_virt2memseg(vaddr, NULL);
1354 0 : if (seg == NULL) {
1355 0 : return 0;
1356 : }
1357 :
1358 0 : switch (action) {
1359 : case SPDK_MEM_MAP_NOTIFY_REGISTER:
1360 0 : spdk_mem_map_set_translation(map, (uint64_t)vaddr, len, seg->socket_id);
1361 0 : break;
1362 : case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
1363 0 : spdk_mem_map_clear_translation(map, (uint64_t)vaddr, len);
1364 0 : break;
1365 : default:
1366 0 : break;
1367 : }
1368 :
1369 0 : return 0;
1370 0 : }
1371 :
1372 : static int
1373 0 : vtophys_check_contiguous_entries(uint64_t paddr1, uint64_t paddr2)
1374 : {
1375 : /* This function is always called with paddrs for two subsequent
1376 : * 2MB chunks in virtual address space, so those chunks will be only
1377 : * physically contiguous if the physical addresses are 2MB apart
1378 : * from each other as well.
1379 : */
1380 0 : return (paddr2 - paddr1 == VALUE_2MB);
1381 : }
1382 :
1383 : #if VFIO_ENABLED
1384 :
1385 : static bool
1386 : vfio_enabled(void)
1387 : {
1388 : return rte_vfio_is_enabled("vfio_pci");
1389 : }
1390 :
1391 : /* Check if IOMMU is enabled on the system */
1392 : static bool
1393 : has_iommu_groups(void)
1394 : {
1395 : int count = 0;
1396 : DIR *dir = opendir("/sys/kernel/iommu_groups");
1397 :
1398 : if (dir == NULL) {
1399 : return false;
1400 : }
1401 :
1402 : while (count < 3 && readdir(dir) != NULL) {
1403 : count++;
1404 : }
1405 :
1406 : closedir(dir);
1407 : /* there will always be ./ and ../ entries */
1408 : return count > 2;
1409 : }
1410 :
1411 : static bool
1412 : vfio_noiommu_enabled(void)
1413 : {
1414 : return rte_vfio_noiommu_is_enabled();
1415 : }
1416 :
1417 : static void
1418 : vtophys_iommu_init(void)
1419 : {
1420 : char proc_fd_path[PATH_MAX + 1];
1421 : char link_path[PATH_MAX + 1];
1422 : const char vfio_path[] = "/dev/vfio/vfio";
1423 : DIR *dir;
1424 : struct dirent *d;
1425 :
1426 : if (!vfio_enabled()) {
1427 : return;
1428 : }
1429 :
1430 : if (vfio_noiommu_enabled()) {
1431 : g_vfio.noiommu_enabled = true;
1432 : } else if (!has_iommu_groups()) {
1433 : return;
1434 : }
1435 :
1436 : dir = opendir("/proc/self/fd");
1437 : if (!dir) {
1438 : DEBUG_PRINT("Failed to open /proc/self/fd (%d)\n", errno);
1439 : return;
1440 : }
1441 :
1442 : while ((d = readdir(dir)) != NULL) {
1443 : if (d->d_type != DT_LNK) {
1444 : continue;
1445 : }
1446 :
1447 : snprintf(proc_fd_path, sizeof(proc_fd_path), "/proc/self/fd/%s", d->d_name);
1448 : if (readlink(proc_fd_path, link_path, sizeof(link_path)) != (sizeof(vfio_path) - 1)) {
1449 : continue;
1450 : }
1451 :
1452 : if (memcmp(link_path, vfio_path, sizeof(vfio_path) - 1) == 0) {
1453 : sscanf(d->d_name, "%d", &g_vfio.fd);
1454 : break;
1455 : }
1456 : }
1457 :
1458 : closedir(dir);
1459 :
1460 : if (g_vfio.fd < 0) {
1461 : DEBUG_PRINT("Failed to discover DPDK VFIO container fd.\n");
1462 : return;
1463 : }
1464 :
1465 : g_vfio.enabled = true;
1466 :
1467 : return;
1468 : }
1469 :
1470 : #endif
1471 :
1472 : void
1473 0 : vtophys_pci_device_added(struct rte_pci_device *pci_device)
1474 : {
1475 : struct spdk_vtophys_pci_device *vtophys_dev;
1476 :
1477 0 : pthread_mutex_lock(&g_vtophys_pci_devices_mutex);
1478 :
1479 0 : vtophys_dev = calloc(1, sizeof(*vtophys_dev));
1480 0 : if (vtophys_dev) {
1481 0 : vtophys_dev->pci_device = pci_device;
1482 0 : TAILQ_INSERT_TAIL(&g_vtophys_pci_devices, vtophys_dev, tailq);
1483 0 : } else {
1484 0 : DEBUG_PRINT("Memory allocation error\n");
1485 : }
1486 0 : pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
1487 :
1488 : #if VFIO_ENABLED
1489 : struct spdk_vfio_dma_map *dma_map;
1490 : int ret;
1491 :
1492 : if (!g_vfio.enabled) {
1493 : return;
1494 : }
1495 :
1496 : pthread_mutex_lock(&g_vfio.mutex);
1497 : g_vfio.device_ref++;
1498 : if (g_vfio.device_ref > 1) {
1499 : pthread_mutex_unlock(&g_vfio.mutex);
1500 : return;
1501 : }
1502 :
1503 : /* This is the first SPDK device using DPDK vfio. This means that the first
1504 : * IOMMU group might have been just been added to the DPDK vfio container.
1505 : * From this point it is certain that the memory can be mapped now.
1506 : */
1507 : TAILQ_FOREACH(dma_map, &g_vfio.maps, tailq) {
1508 : ret = ioctl(g_vfio.fd, VFIO_IOMMU_MAP_DMA, &dma_map->map);
1509 : if (ret) {
1510 : DEBUG_PRINT("Cannot update DMA mapping, error %d\n", errno);
1511 : break;
1512 : }
1513 : }
1514 : pthread_mutex_unlock(&g_vfio.mutex);
1515 : #endif
1516 0 : }
1517 :
1518 : void
1519 0 : vtophys_pci_device_removed(struct rte_pci_device *pci_device)
1520 : {
1521 : struct spdk_vtophys_pci_device *vtophys_dev;
1522 :
1523 0 : pthread_mutex_lock(&g_vtophys_pci_devices_mutex);
1524 0 : TAILQ_FOREACH(vtophys_dev, &g_vtophys_pci_devices, tailq) {
1525 0 : if (vtophys_dev->pci_device == pci_device) {
1526 0 : TAILQ_REMOVE(&g_vtophys_pci_devices, vtophys_dev, tailq);
1527 0 : free(vtophys_dev);
1528 0 : break;
1529 : }
1530 0 : }
1531 0 : pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
1532 :
1533 : #if VFIO_ENABLED
1534 : struct spdk_vfio_dma_map *dma_map;
1535 : int ret;
1536 :
1537 : if (!g_vfio.enabled) {
1538 : return;
1539 : }
1540 :
1541 : pthread_mutex_lock(&g_vfio.mutex);
1542 : assert(g_vfio.device_ref > 0);
1543 : g_vfio.device_ref--;
1544 : if (g_vfio.device_ref > 0) {
1545 : pthread_mutex_unlock(&g_vfio.mutex);
1546 : return;
1547 : }
1548 :
1549 : /* This is the last SPDK device using DPDK vfio. If DPDK doesn't have
1550 : * any additional devices using it's vfio container, all the mappings
1551 : * will be automatically removed by the Linux vfio driver. We unmap
1552 : * the memory manually to be able to easily re-map it later regardless
1553 : * of other, external factors.
1554 : */
1555 : TAILQ_FOREACH(dma_map, &g_vfio.maps, tailq) {
1556 : struct vfio_iommu_type1_dma_unmap unmap = {};
1557 : unmap.argsz = sizeof(unmap);
1558 : unmap.flags = 0;
1559 : unmap.iova = dma_map->map.iova;
1560 : unmap.size = dma_map->map.size;
1561 : ret = ioctl(g_vfio.fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
1562 : if (ret) {
1563 : DEBUG_PRINT("Cannot unmap DMA memory, error %d\n", errno);
1564 : break;
1565 : }
1566 : }
1567 : pthread_mutex_unlock(&g_vfio.mutex);
1568 : #endif
1569 0 : }
1570 :
1571 : int
1572 0 : vtophys_init(void)
1573 : {
1574 0 : const struct spdk_mem_map_ops vtophys_map_ops = {
1575 : .notify_cb = vtophys_notify,
1576 : .are_contiguous = vtophys_check_contiguous_entries,
1577 : };
1578 :
1579 0 : const struct spdk_mem_map_ops phys_ref_map_ops = {
1580 : .notify_cb = NULL,
1581 : .are_contiguous = NULL,
1582 : };
1583 :
1584 0 : const struct spdk_mem_map_ops numa_map_ops = {
1585 : .notify_cb = numa_notify,
1586 : .are_contiguous = NULL,
1587 : };
1588 :
1589 : #if VFIO_ENABLED
1590 : vtophys_iommu_init();
1591 : #endif
1592 :
1593 0 : g_phys_ref_map = spdk_mem_map_alloc(0, &phys_ref_map_ops, NULL);
1594 0 : if (g_phys_ref_map == NULL) {
1595 0 : DEBUG_PRINT("phys_ref map allocation failed.\n");
1596 0 : return -ENOMEM;
1597 : }
1598 :
1599 0 : g_numa_map = spdk_mem_map_alloc(SPDK_ENV_NUMA_ID_ANY, &numa_map_ops, NULL);
1600 0 : if (g_numa_map == NULL) {
1601 0 : DEBUG_PRINT("numa map allocation failed.\n");
1602 0 : spdk_mem_map_free(&g_phys_ref_map);
1603 0 : return -ENOMEM;
1604 : }
1605 :
1606 0 : if (g_huge_pages) {
1607 0 : g_vtophys_map = spdk_mem_map_alloc(SPDK_VTOPHYS_ERROR, &vtophys_map_ops, NULL);
1608 0 : if (g_vtophys_map == NULL) {
1609 0 : DEBUG_PRINT("vtophys map allocation failed\n");
1610 0 : spdk_mem_map_free(&g_numa_map);
1611 0 : spdk_mem_map_free(&g_phys_ref_map);
1612 0 : return -ENOMEM;
1613 : }
1614 0 : }
1615 0 : return 0;
1616 0 : }
1617 :
1618 : void
1619 0 : vtophys_fini(void)
1620 : {
1621 0 : spdk_mem_map_free(&g_vtophys_map);
1622 0 : spdk_mem_map_free(&g_numa_map);
1623 0 : spdk_mem_map_free(&g_phys_ref_map);
1624 0 : }
1625 :
1626 : uint64_t
1627 0 : spdk_vtophys(const void *buf, uint64_t *size)
1628 : {
1629 : uint64_t vaddr, paddr_2mb;
1630 :
1631 0 : if (!g_huge_pages) {
1632 0 : return SPDK_VTOPHYS_ERROR;
1633 : }
1634 :
1635 0 : vaddr = (uint64_t)buf;
1636 0 : paddr_2mb = spdk_mem_map_translate(g_vtophys_map, vaddr, size);
1637 :
1638 : /*
1639 : * SPDK_VTOPHYS_ERROR has all bits set, so if the lookup returned SPDK_VTOPHYS_ERROR,
1640 : * we will still bitwise-or it with the buf offset below, but the result will still be
1641 : * SPDK_VTOPHYS_ERROR. However now that we do + rather than | (due to PCI vtophys being
1642 : * unaligned) we must now check the return value before addition.
1643 : */
1644 : SPDK_STATIC_ASSERT(SPDK_VTOPHYS_ERROR == UINT64_C(-1), "SPDK_VTOPHYS_ERROR should be all 1s");
1645 0 : if (paddr_2mb == SPDK_VTOPHYS_ERROR) {
1646 0 : return SPDK_VTOPHYS_ERROR;
1647 : } else {
1648 0 : return paddr_2mb + (vaddr & MASK_2MB);
1649 : }
1650 0 : }
1651 :
1652 : int32_t
1653 0 : spdk_mem_get_numa_id(const void *buf, uint64_t *size)
1654 : {
1655 0 : return spdk_mem_map_translate(g_numa_map, (uint64_t)buf, size);
1656 : }
1657 :
1658 : int
1659 0 : spdk_mem_get_fd_and_offset(void *vaddr, uint64_t *offset)
1660 : {
1661 : struct rte_memseg *seg;
1662 : int ret, fd;
1663 :
1664 0 : seg = rte_mem_virt2memseg(vaddr, NULL);
1665 0 : if (!seg) {
1666 0 : SPDK_ERRLOG("memory %p doesn't exist\n", vaddr);
1667 0 : return -ENOENT;
1668 : }
1669 :
1670 0 : fd = rte_memseg_get_fd_thread_unsafe(seg);
1671 0 : if (fd < 0) {
1672 0 : return fd;
1673 : }
1674 :
1675 0 : ret = rte_memseg_get_fd_offset_thread_unsafe(seg, offset);
1676 0 : if (ret < 0) {
1677 0 : return ret;
1678 : }
1679 :
1680 0 : return fd;
1681 0 : }
1682 :
1683 : void
1684 0 : mem_disable_huge_pages(void)
1685 : {
1686 0 : g_huge_pages = false;
1687 0 : }
|