Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2022 Intel Corporation.
3 : * All rights reserved.
4 : */
5 :
6 : /*
7 : * virtio over vfio-user common library
8 : */
9 : #include "spdk/env.h"
10 : #include "spdk/bdev.h"
11 : #include "spdk/bdev_module.h"
12 : #include "spdk/stdinc.h"
13 : #include "spdk/assert.h"
14 : #include "spdk/barrier.h"
15 : #include "spdk/thread.h"
16 : #include "spdk/memory.h"
17 : #include "spdk/util.h"
18 : #include "spdk/log.h"
19 : #include "spdk/string.h"
20 : #include "spdk/likely.h"
21 :
22 : #include "vfu_virtio_internal.h"
23 :
24 : static int vfu_virtio_dev_start(struct vfu_virtio_dev *dev);
25 : static int vfu_virtio_dev_stop(struct vfu_virtio_dev *dev);
26 :
27 : static inline void
28 0 : vfu_virtio_unmap_q(struct vfu_virtio_dev *dev, struct q_mapping *mapping)
29 : {
30 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
31 :
32 0 : if (mapping->addr != NULL) {
33 0 : spdk_vfu_unmap_sg(virtio_endpoint->endpoint, mapping->sg,
34 0 : &mapping->iov, 1);
35 0 : mapping->addr = NULL;
36 0 : mapping->len = 0;
37 0 : }
38 0 : }
39 :
40 : static inline int
41 0 : vfu_virtio_map_q(struct vfu_virtio_dev *dev, struct q_mapping *mapping, uint64_t phys_addr,
42 : uint64_t len)
43 : {
44 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
45 0 : void *addr;
46 :
47 0 : if (!mapping->addr && len && phys_addr) {
48 0 : addr = spdk_vfu_map_one(virtio_endpoint->endpoint, phys_addr, len,
49 0 : mapping->sg, &mapping->iov, PROT_READ | PROT_WRITE);
50 0 : if (addr == NULL) {
51 0 : return -EINVAL;
52 : }
53 0 : mapping->phys_addr = phys_addr;
54 0 : mapping->len = len;
55 0 : mapping->addr = addr;
56 0 : }
57 :
58 0 : return 0;
59 0 : }
60 :
61 : static int
62 0 : virtio_dev_map_vq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
63 : {
64 0 : int ret;
65 0 : uint64_t phys_addr, len;
66 :
67 0 : if (!vq->enabled || (vq->q_state == VFU_VQ_ACTIVE)) {
68 0 : return 0;
69 : }
70 :
71 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: try to map vq %u\n", dev->name, vq->id);
72 :
73 0 : len = virtio_queue_desc_size(dev, vq);
74 0 : phys_addr = ((((uint64_t)vq->desc_hi) << 32) | vq->desc_lo);
75 0 : ret = vfu_virtio_map_q(dev, &vq->desc, phys_addr, len);
76 0 : if (ret) {
77 0 : SPDK_DEBUGLOG(vfu_virtio, "Error to map descs\n");
78 0 : return ret;
79 : }
80 :
81 0 : len = virtio_queue_avail_size(dev, vq);
82 0 : phys_addr = ((((uint64_t)vq->avail_hi) << 32) | vq->avail_lo);
83 0 : ret = vfu_virtio_map_q(dev, &vq->avail, phys_addr, len);
84 0 : if (ret) {
85 0 : vfu_virtio_unmap_q(dev, &vq->desc);
86 0 : SPDK_DEBUGLOG(vfu_virtio, "Error to map available ring\n");
87 0 : return ret;
88 : }
89 :
90 0 : len = virtio_queue_used_size(dev, vq);
91 0 : phys_addr = ((((uint64_t)vq->used_hi) << 32) | vq->used_lo);
92 0 : ret = vfu_virtio_map_q(dev, &vq->used, phys_addr, len);
93 0 : if (ret) {
94 0 : vfu_virtio_unmap_q(dev, &vq->desc);
95 0 : vfu_virtio_unmap_q(dev, &vq->avail);
96 0 : SPDK_DEBUGLOG(vfu_virtio, "Error to map used ring\n");
97 0 : return ret;
98 : }
99 :
100 : /* We're running with polling mode */
101 0 : if (virtio_guest_has_feature(dev, VIRTIO_F_RING_PACKED)) {
102 0 : vq->used.device_event->flags = VRING_PACKED_EVENT_FLAG_DISABLE;
103 0 : } else {
104 0 : vq->used.used->flags = VRING_USED_F_NO_NOTIFY;
105 : }
106 :
107 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: map vq %u successfully\n", dev->name, vq->id);
108 0 : vq->q_state = VFU_VQ_ACTIVE;
109 :
110 0 : return 0;
111 0 : }
112 :
113 : static void
114 0 : virtio_dev_unmap_vq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
115 : {
116 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: unmap vq %u\n", dev->name, vq->id);
117 0 : vq->q_state = VFU_VQ_INACTIVE;
118 :
119 0 : vfu_virtio_unmap_q(dev, &vq->desc);
120 0 : vfu_virtio_unmap_q(dev, &vq->avail);
121 0 : vfu_virtio_unmap_q(dev, &vq->used);
122 0 : }
123 :
124 : static bool
125 0 : vfu_virtio_vq_should_unmap(struct vfu_virtio_vq *vq, void *map_start, void *map_end)
126 : {
127 : /* always do unmap when stopping the device */
128 0 : if (!map_start || !map_end) {
129 0 : return true;
130 : }
131 :
132 0 : if (vq->desc.addr >= map_start && vq->desc.addr < map_end) {
133 0 : return true;
134 : }
135 :
136 0 : if (vq->avail.addr >= map_start && vq->avail.addr < map_end) {
137 0 : return true;
138 : }
139 :
140 0 : if (vq->used.addr >= map_start && vq->used.addr < map_end) {
141 0 : return true;
142 : }
143 :
144 0 : return false;
145 0 : }
146 :
147 : static void
148 0 : vfu_virtio_dev_unmap_vqs(struct vfu_virtio_dev *dev, void *map_start, void *map_end)
149 : {
150 0 : uint32_t i;
151 0 : struct vfu_virtio_vq *vq;
152 :
153 0 : for (i = 0; i < dev->num_queues; i++) {
154 0 : vq = &dev->vqs[i];
155 0 : if (!vq->enabled) {
156 0 : continue;
157 : }
158 :
159 0 : if (!vfu_virtio_vq_should_unmap(vq, map_start, map_end)) {
160 0 : continue;
161 : }
162 0 : virtio_dev_unmap_vq(dev, vq);
163 0 : }
164 0 : }
165 :
166 : /* This function is used to notify VM that the device
167 : * configuration space has been changed.
168 : */
169 : void
170 0 : vfu_virtio_notify_config(struct vfu_virtio_endpoint *virtio_endpoint)
171 : {
172 0 : struct spdk_vfu_endpoint *endpoint = virtio_endpoint->endpoint;
173 :
174 0 : if (virtio_endpoint->dev == NULL) {
175 0 : return;
176 : }
177 :
178 0 : virtio_endpoint->dev->cfg.isr = 1;
179 0 : virtio_endpoint->dev->cfg.config_generation++;
180 :
181 0 : vfu_irq_trigger(spdk_vfu_get_vfu_ctx(endpoint), virtio_endpoint->dev->cfg.msix_config);
182 0 : }
183 :
184 : static void
185 0 : vfu_virtio_dev_reset(struct vfu_virtio_dev *dev)
186 : {
187 0 : uint32_t i;
188 0 : struct vfu_virtio_vq *vq;
189 :
190 0 : SPDK_DEBUGLOG(vfu_virtio, "device %s resetting\n", dev->name);
191 :
192 0 : for (i = 0; i < dev->num_queues; i++) {
193 0 : vq = &dev->vqs[i];
194 :
195 0 : vq->q_state = VFU_VQ_CREATED;
196 0 : vq->vector = 0;
197 0 : vq->enabled = false;
198 0 : vq->last_avail_idx = 0;
199 0 : vq->last_used_idx = 0;
200 :
201 0 : vq->packed.packed_ring = false;
202 0 : vq->packed.avail_phase = 0;
203 0 : vq->packed.used_phase = 0;
204 0 : }
205 :
206 0 : memset(&dev->cfg, 0, sizeof(struct virtio_pci_cfg));
207 0 : }
208 :
209 : static int
210 0 : virtio_dev_set_status(struct vfu_virtio_dev *dev, uint8_t status)
211 : {
212 0 : int ret = 0;
213 :
214 0 : SPDK_DEBUGLOG(vfu_virtio, "device current status %x, set status %x\n", dev->cfg.device_status,
215 : status);
216 :
217 0 : if (!(virtio_dev_is_started(dev))) {
218 0 : if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
219 0 : ret = vfu_virtio_dev_start(dev);
220 0 : }
221 0 : } else {
222 0 : if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
223 0 : ret = vfu_virtio_dev_stop(dev);
224 0 : }
225 : }
226 :
227 0 : if (ret) {
228 0 : SPDK_ERRLOG("Failed to start/stop device\n");
229 0 : return ret;
230 : }
231 :
232 0 : dev->cfg.device_status = status;
233 :
234 0 : if (status == 0) {
235 0 : vfu_virtio_dev_reset(dev);
236 0 : }
237 :
238 0 : return 0;
239 0 : }
240 :
241 : static int
242 0 : virtio_dev_set_features(struct vfu_virtio_dev *dev, uint64_t features)
243 : {
244 0 : if (dev->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK) {
245 0 : SPDK_ERRLOG("Feature negotiation has finished\n");
246 0 : return -EINVAL;
247 : }
248 :
249 0 : if (features & ~dev->host_features) {
250 0 : SPDK_ERRLOG("Host features 0x%"PRIx64", guest features 0x%"PRIx64"\n",
251 : dev->host_features, features);
252 0 : return -ENOTSUP;
253 : }
254 :
255 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: negotiated features 0x%"PRIx64"\n", dev->name,
256 : features);
257 0 : dev->cfg.guest_features = features;
258 :
259 0 : return 0;
260 0 : }
261 :
262 : static int
263 0 : virtio_dev_enable_vq(struct vfu_virtio_dev *dev, uint16_t qid)
264 : {
265 0 : struct vfu_virtio_vq *vq;
266 :
267 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: enable vq %u\n", dev->name, qid);
268 :
269 0 : vq = &dev->vqs[qid];
270 0 : if (vq->enabled) {
271 0 : SPDK_ERRLOG("Queue %u is enabled\n", qid);
272 0 : return -EINVAL;
273 : }
274 0 : vq->enabled = true;
275 :
276 0 : if (virtio_dev_map_vq(dev, vq)) {
277 0 : SPDK_ERRLOG("Queue %u failed to map\n", qid);
278 0 : return 0;
279 : }
280 :
281 0 : vq->avail.avail->idx = 0;
282 0 : vq->last_avail_idx = 0;
283 0 : vq->used.used->idx = 0;
284 0 : vq->last_used_idx = 0;
285 :
286 0 : if (virtio_guest_has_feature(dev, VIRTIO_F_RING_PACKED)) {
287 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: vq %u PACKED RING ENABLED\n", dev->name, qid);
288 0 : vq->packed.packed_ring = true;
289 0 : vq->packed.avail_phase = true;
290 0 : vq->packed.used_phase = true;
291 0 : }
292 :
293 0 : return 0;
294 0 : }
295 :
296 : static int
297 0 : virtio_dev_disable_vq(struct vfu_virtio_dev *dev, uint16_t qid)
298 : {
299 0 : struct vfu_virtio_vq *vq;
300 :
301 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: disable vq %u\n", dev->name, qid);
302 :
303 0 : vq = &dev->vqs[qid];
304 0 : if (!vq->enabled) {
305 0 : SPDK_NOTICELOG("Queue %u isn't enabled\n", qid);
306 0 : return 0;
307 : }
308 :
309 0 : virtio_dev_unmap_vq(dev, vq);
310 :
311 0 : vq->q_state = VFU_VQ_CREATED;
312 0 : vq->vector = 0;
313 0 : vq->enabled = false;
314 0 : vq->last_avail_idx = 0;
315 0 : vq->last_used_idx = 0;
316 0 : vq->packed.packed_ring = false;
317 0 : vq->packed.avail_phase = 0;
318 0 : vq->packed.used_phase = 0;
319 :
320 0 : return 0;
321 0 : }
322 :
323 : static int
324 0 : virtio_dev_split_get_avail_reqs(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq,
325 : uint16_t *reqs, uint16_t max_reqs)
326 : {
327 0 : uint16_t count, i, avail_idx, last_idx;
328 :
329 0 : last_idx = vq->last_avail_idx;
330 0 : avail_idx = vq->avail.avail->idx;
331 :
332 0 : spdk_smp_rmb();
333 :
334 0 : count = avail_idx - last_idx;
335 0 : if (count == 0) {
336 0 : return 0;
337 : }
338 :
339 0 : count = spdk_min(count, max_reqs);
340 0 : vq->last_avail_idx += count;
341 :
342 0 : for (i = 0; i < count; i++) {
343 0 : reqs[i] = vq->avail.avail->ring[(last_idx + i) & (vq->qsize - 1)];
344 0 : }
345 :
346 0 : SPDK_DEBUGLOG(vfu_virtio_io,
347 : "AVAIL: vq %u last_idx=%"PRIu16" avail_idx=%"PRIu16" count=%"PRIu16"\n",
348 : vq->id, last_idx, avail_idx, count);
349 :
350 0 : return count;
351 0 : }
352 :
353 : static int
354 0 : virtio_vring_split_desc_get_next(struct vring_desc **desc,
355 : struct vring_desc *desc_table,
356 : uint32_t desc_table_size)
357 : {
358 0 : struct vring_desc *old_desc = *desc;
359 0 : uint16_t next_idx;
360 :
361 0 : if ((old_desc->flags & VRING_DESC_F_NEXT) == 0) {
362 0 : *desc = NULL;
363 0 : return 0;
364 : }
365 :
366 0 : next_idx = old_desc->next;
367 0 : if (spdk_unlikely(next_idx >= desc_table_size)) {
368 0 : *desc = NULL;
369 0 : return -1;
370 : }
371 :
372 0 : *desc = &desc_table[next_idx];
373 0 : return 0;
374 0 : }
375 :
376 : static inline void *
377 0 : virtio_vring_desc_to_iov(struct vfu_virtio_dev *dev, struct vring_desc *desc,
378 : dma_sg_t *sg, struct iovec *iov)
379 : {
380 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
381 :
382 0 : return spdk_vfu_map_one(virtio_endpoint->endpoint, desc->addr, desc->len,
383 0 : sg, iov, PROT_READ | PROT_WRITE);
384 0 : }
385 :
386 : static int
387 0 : virtio_split_vring_get_desc(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq, uint16_t desc_idx,
388 : struct vring_desc **desc, struct vring_desc **desc_table,
389 : uint32_t *desc_table_size,
390 : dma_sg_t *sg, struct iovec *iov)
391 : {
392 0 : *desc = &vq->desc.desc[desc_idx];
393 :
394 0 : if (virtio_vring_split_desc_is_indirect(*desc)) {
395 0 : *desc_table_size = (*desc)->len / sizeof(struct vring_desc);
396 0 : *desc_table = virtio_vring_desc_to_iov(dev, *desc, sg, iov);
397 0 : *desc = *desc_table;
398 0 : if (*desc == NULL) {
399 0 : return -EINVAL;
400 : }
401 0 : return 0;
402 : }
403 :
404 0 : *desc_table = vq->desc.desc;
405 0 : *desc_table_size = vq->qsize;
406 :
407 0 : return 0;
408 0 : }
409 :
410 : static inline dma_sg_t *
411 0 : virtio_req_to_sg_t(struct vfu_virtio_req *req, uint32_t iovcnt)
412 : {
413 0 : return (dma_sg_t *)(req->sg + iovcnt * dma_sg_size());
414 : }
415 :
416 : static inline struct vfu_virtio_req *
417 0 : vfu_virtio_dev_get_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq)
418 : {
419 0 : struct vfu_virtio_req *req;
420 :
421 0 : req = STAILQ_FIRST(&vq->free_reqs);
422 0 : if (req == NULL) {
423 0 : return NULL;
424 : }
425 0 : STAILQ_REMOVE_HEAD(&vq->free_reqs, link);
426 :
427 0 : req->iovcnt = 0;
428 0 : req->used_len = 0;
429 0 : req->payload_size = 0;
430 0 : req->req_idx = 0;
431 0 : req->buffer_id = 0;
432 0 : req->num_descs = 0;
433 :
434 0 : return req;
435 0 : }
436 :
437 : void
438 0 : vfu_virtio_dev_put_req(struct vfu_virtio_req *req)
439 : {
440 0 : struct vfu_virtio_dev *dev = req->dev;
441 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
442 0 : vfu_ctx_t *vfu_ctx = spdk_vfu_get_vfu_ctx(virtio_endpoint->endpoint);
443 :
444 0 : if (req->indirect_iov->iov_base) {
445 0 : vfu_sgl_put(vfu_ctx, req->indirect_sg, req->indirect_iov, 1);
446 0 : req->indirect_iov->iov_base = NULL;
447 0 : req->indirect_iov->iov_len = 0;
448 0 : }
449 :
450 0 : if (req->iovcnt) {
451 0 : vfu_sgl_put(vfu_ctx, virtio_req_to_sg_t(req, 0), req->iovs, req->iovcnt);
452 0 : req->iovcnt = 0;
453 0 : }
454 :
455 0 : STAILQ_INSERT_HEAD(&req->vq->free_reqs, req, link);
456 0 : }
457 :
458 : void
459 0 : vfu_virtio_finish_req(struct vfu_virtio_req *req)
460 : {
461 0 : struct vfu_virtio_dev *dev = req->dev;
462 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
463 :
464 0 : assert(virtio_endpoint->io_outstanding);
465 0 : virtio_endpoint->io_outstanding--;
466 :
467 0 : if (!virtio_guest_has_feature(req->dev, VIRTIO_F_RING_PACKED)) {
468 0 : virtio_vq_used_ring_split_enqueue(req->vq, req->req_idx, req->used_len);
469 0 : } else {
470 0 : virtio_vq_used_ring_packed_enqueue(req->vq, req->buffer_id, req->num_descs, req->used_len);
471 : }
472 :
473 0 : vfu_virtio_dev_put_req(req);
474 0 : }
475 :
476 : static inline void
477 0 : vfu_virtio_dev_free_reqs(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_dev *dev)
478 : {
479 0 : struct vfu_virtio_req *req;
480 0 : struct vfu_virtio_vq *vq;
481 0 : uint32_t i;
482 :
483 0 : for (i = 0; i < dev->num_queues; i++) {
484 0 : vq = &dev->vqs[i];
485 0 : while (!STAILQ_EMPTY(&vq->free_reqs)) {
486 0 : req = STAILQ_FIRST(&vq->free_reqs);
487 0 : STAILQ_REMOVE_HEAD(&vq->free_reqs, link);
488 0 : vfu_virtio_vq_free_req(virtio_endpoint, vq, req);
489 : }
490 0 : }
491 0 : }
492 :
493 : static int
494 0 : virtio_dev_split_iovs_setup(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq,
495 : uint16_t desc_idx, struct vfu_virtio_req *req)
496 : {
497 0 : struct vring_desc *desc, *desc_table;
498 0 : uint32_t desc_table_size, len = 0;
499 0 : uint32_t desc_handled_cnt = 0;
500 0 : int rc;
501 :
502 0 : rc = virtio_split_vring_get_desc(dev, vq, desc_idx, &desc,
503 : &desc_table, &desc_table_size,
504 0 : req->indirect_sg, req->indirect_iov);
505 0 : if (spdk_unlikely(rc)) {
506 0 : SPDK_ERRLOG("Invalid descriptor at index %"PRIu16".\n", desc_idx);
507 0 : return rc;
508 : }
509 :
510 0 : assert(req->iovcnt == 0);
511 :
512 0 : while (true) {
513 0 : if (spdk_unlikely(!virtio_vring_desc_to_iov(dev, desc, virtio_req_to_sg_t(req, req->iovcnt),
514 : &req->iovs[req->iovcnt]))) {
515 0 : return -EINVAL;
516 : }
517 0 : req->desc_writeable[req->iovcnt] = false;
518 0 : if (virtio_vring_split_desc_is_wr(desc)) {
519 0 : req->desc_writeable[req->iovcnt] = true;
520 0 : }
521 :
522 0 : req->iovcnt++;
523 0 : len += desc->len;
524 :
525 0 : rc = virtio_vring_split_desc_get_next(&desc, desc_table, desc_table_size);
526 0 : if (spdk_unlikely(rc)) {
527 0 : return rc;
528 0 : } else if (desc == NULL) {
529 0 : break;
530 : }
531 :
532 0 : desc_handled_cnt++;
533 0 : if (spdk_unlikely(desc_handled_cnt > desc_table_size)) {
534 0 : return -EINVAL;
535 : }
536 : }
537 :
538 0 : req->payload_size = len;
539 :
540 0 : return 0;
541 0 : }
542 :
543 : void
544 0 : virtio_vq_used_ring_split_enqueue(struct vfu_virtio_vq *vq, uint16_t req_idx, uint32_t used_len)
545 : {
546 0 : uint16_t last_idx = vq->last_used_idx & (vq->qsize - 1);
547 :
548 0 : SPDK_DEBUGLOG(vfu_virtio_io,
549 : "Queue %u - USED RING: last_idx=%"PRIu16" req_idx=%"PRIu16" used_len=%"PRIu32"\n",
550 : vq->id, last_idx, req_idx, used_len);
551 :
552 0 : vq->used.used->ring[last_idx].id = req_idx;
553 0 : vq->used.used->ring[last_idx].len = used_len;
554 0 : vq->last_used_idx++;
555 :
556 0 : spdk_smp_wmb();
557 :
558 0 : *(volatile uint16_t *)&vq->used.used->idx = vq->last_used_idx;
559 :
560 0 : vq->used_req_cnt++;
561 0 : }
562 :
563 : void
564 0 : virtio_vq_used_ring_packed_enqueue(struct vfu_virtio_vq *vq, uint16_t buffer_id, uint32_t num_descs,
565 : uint32_t used_len)
566 : {
567 0 : struct vring_packed_desc *desc = &vq->desc.desc_packed[vq->last_used_idx];
568 :
569 0 : SPDK_DEBUGLOG(vfu_virtio_io,
570 : "Queue %u - USED RING: buffer_id=%"PRIu16" num_descs=%u used_len=%"PRIu32"\n",
571 : vq->id, buffer_id, num_descs, used_len);
572 :
573 0 : if (spdk_unlikely(virtio_vring_packed_is_used(desc, vq->packed.used_phase))) {
574 0 : SPDK_ERRLOG("descriptor has been used before\n");
575 0 : return;
576 : }
577 :
578 : /* In used desc addr is unused and len specifies the buffer length
579 : * that has been written to by the device.
580 : */
581 0 : desc->addr = 0;
582 0 : desc->len = used_len;
583 :
584 : /* This bit specifies whether any data has been written by the device */
585 0 : if (used_len != 0) {
586 0 : desc->flags |= VRING_DESC_F_WRITE;
587 0 : }
588 :
589 : /* Buffer ID is included in the last descriptor in the list.
590 : * The driver needs to keep track of the size of the list corresponding
591 : * to each buffer ID.
592 : */
593 0 : desc->id = buffer_id;
594 :
595 : /* A device MUST NOT make the descriptor used before buffer_id is
596 : * written to the descriptor.
597 : */
598 0 : spdk_smp_wmb();
599 :
600 : /* To mark a desc as used, the device sets the F_USED bit in flags to match
601 : * the internal Device ring wrap counter. It also sets the F_AVAIL bit to
602 : * match the same value.
603 : */
604 0 : if (vq->packed.used_phase) {
605 0 : desc->flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
606 0 : desc->flags |= (1 << VRING_PACKED_DESC_F_USED);
607 0 : } else {
608 0 : desc->flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
609 0 : desc->flags &= ~(1 << VRING_PACKED_DESC_F_USED);
610 : }
611 :
612 0 : vq->last_used_idx += num_descs;
613 0 : if (vq->last_used_idx >= vq->qsize) {
614 0 : vq->last_used_idx -= vq->qsize;
615 0 : vq->packed.used_phase = !vq->packed.used_phase;
616 0 : }
617 :
618 0 : vq->used_req_cnt++;
619 0 : }
620 :
621 : static int
622 0 : vfu_virtio_vq_post_irq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
623 : {
624 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
625 0 : vfu_ctx_t *vfu_ctx = spdk_vfu_get_vfu_ctx(virtio_endpoint->endpoint);
626 :
627 0 : vq->used_req_cnt = 0;
628 :
629 0 : if (spdk_vfu_endpoint_msix_enabled(virtio_endpoint->endpoint)) {
630 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: Queue %u post MSIX IV %u\n",
631 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
632 : vq->id, vq->vector);
633 0 : return vfu_irq_trigger(vfu_ctx, vq->vector);
634 : } else {
635 0 : if (!spdk_vfu_endpoint_intx_enabled(virtio_endpoint->endpoint)) {
636 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: IRQ disabled\n",
637 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint));
638 0 : return 0;
639 : }
640 :
641 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: Queue %u post ISR\n",
642 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), vq->id);
643 0 : dev->cfg.isr = 1;
644 0 : return vfu_irq_trigger(vfu_ctx, 0);
645 : }
646 0 : }
647 :
648 : void
649 0 : vfu_virtio_vq_flush_irq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
650 : {
651 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
652 0 : uint32_t delay_us;
653 :
654 0 : if (vq->used_req_cnt == 0) {
655 0 : return;
656 : }
657 :
658 : /* No need to notify client */
659 0 : if (virtio_queue_event_is_suppressed(dev, vq)) {
660 0 : return;
661 : }
662 :
663 : /* Interrupt coalescing disabled */
664 0 : if (!virtio_endpoint->coalescing_delay_us) {
665 0 : vfu_virtio_vq_post_irq(dev, vq);
666 0 : return;
667 : }
668 :
669 : /* No need for event right now */
670 0 : if (spdk_get_ticks() < vq->next_event_time) {
671 0 : return;
672 : }
673 :
674 0 : vfu_virtio_vq_post_irq(dev, vq);
675 :
676 0 : delay_us = virtio_endpoint->coalescing_delay_us;
677 0 : vq->next_event_time = spdk_get_ticks() + delay_us * spdk_get_ticks_hz() / (1000000ULL);
678 0 : }
679 :
680 : int
681 0 : vfu_virtio_dev_process_split_ring(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
682 : {
683 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
684 0 : struct vfu_virtio_req *req;
685 0 : uint16_t reqs_idx[VIRTIO_DEV_VRING_MAX_REQS];
686 0 : uint16_t reqs_cnt, i;
687 0 : int ret;
688 :
689 0 : reqs_cnt = virtio_dev_split_get_avail_reqs(dev, vq, reqs_idx, VIRTIO_DEV_VRING_MAX_REQS);
690 0 : if (!reqs_cnt) {
691 0 : return 0;
692 : }
693 :
694 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: get %u descriptors\n", dev->name, reqs_cnt);
695 :
696 0 : for (i = 0; i < reqs_cnt; i++) {
697 0 : req = vfu_virtio_dev_get_req(virtio_endpoint, vq);
698 0 : if (spdk_unlikely(!req)) {
699 0 : SPDK_ERRLOG("Error to get request\n");
700 : /* TODO: address the error case */
701 0 : return -EIO;
702 : }
703 :
704 0 : req->req_idx = reqs_idx[i];
705 0 : ret = virtio_dev_split_iovs_setup(dev, vq, req->req_idx, req);
706 0 : if (spdk_unlikely(ret)) {
707 : /* let the device to response this error */
708 0 : SPDK_ERRLOG("Split vring setup failed with index %u\n", i);
709 0 : }
710 :
711 0 : assert(virtio_endpoint->virtio_ops.exec_request);
712 0 : virtio_endpoint->io_outstanding++;
713 0 : virtio_endpoint->virtio_ops.exec_request(virtio_endpoint, vq, req);
714 0 : }
715 :
716 0 : return i;
717 0 : }
718 :
719 : struct vfu_virtio_req *
720 0 : virito_dev_split_ring_get_next_avail_req(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
721 : {
722 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
723 0 : struct vfu_virtio_req *req;
724 0 : uint16_t reqs_idx[VIRTIO_DEV_VRING_MAX_REQS];
725 0 : uint16_t reqs_cnt;
726 0 : int ret;
727 :
728 0 : reqs_cnt = virtio_dev_split_get_avail_reqs(dev, vq, reqs_idx, 1);
729 0 : if (!reqs_cnt) {
730 0 : return NULL;
731 : }
732 0 : assert(reqs_cnt == 1);
733 :
734 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: get 1 descriptors\n", dev->name);
735 :
736 0 : req = vfu_virtio_dev_get_req(virtio_endpoint, vq);
737 0 : if (!req) {
738 0 : SPDK_ERRLOG("Error to get request\n");
739 0 : return NULL;
740 : }
741 :
742 0 : req->req_idx = reqs_idx[0];
743 0 : ret = virtio_dev_split_iovs_setup(dev, vq, req->req_idx, req);
744 0 : if (ret) {
745 0 : SPDK_ERRLOG("Split vring setup failed\n");
746 0 : vfu_virtio_dev_put_req(req);
747 0 : return NULL;
748 : }
749 :
750 0 : return req;
751 0 : }
752 :
753 : static inline void *
754 0 : virtio_vring_packed_desc_to_iov(struct vfu_virtio_dev *dev, struct vring_packed_desc *desc,
755 : dma_sg_t *sg, struct iovec *iov)
756 : {
757 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
758 :
759 0 : return spdk_vfu_map_one(virtio_endpoint->endpoint, desc->addr, desc->len,
760 0 : sg, iov, PROT_READ | PROT_WRITE);
761 0 : }
762 :
763 : static int
764 0 : virtio_dev_packed_iovs_setup(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq,
765 : uint16_t last_avail_idx,
766 : struct vring_packed_desc *current_desc, struct vfu_virtio_req *req)
767 : {
768 0 : struct vring_packed_desc *desc, *desc_table = NULL;
769 0 : uint16_t new_idx, num_descs, desc_table_size = 0;
770 0 : uint32_t len = 0;
771 :
772 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: last avail idx %u, req %p\n", dev->name, last_avail_idx, req);
773 :
774 0 : desc = NULL;
775 0 : num_descs = 1;
776 0 : if (virtio_vring_packed_desc_is_indirect(current_desc)) {
777 0 : req->buffer_id = current_desc->id;
778 0 : desc_table = virtio_vring_packed_desc_to_iov(dev, current_desc, req->indirect_sg,
779 0 : req->indirect_iov);
780 0 : if (spdk_unlikely(desc_table == NULL)) {
781 0 : SPDK_ERRLOG("Map Indirect Desc to IOV failed\n");
782 0 : return -EINVAL;
783 : }
784 0 : desc_table_size = current_desc->len / sizeof(struct vring_packed_desc);
785 0 : desc = desc_table;
786 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: indirect desc %p, desc size %u, req %p\n",
787 : dev->name, desc_table, desc_table_size, req);
788 0 : } else {
789 0 : desc = current_desc;
790 : }
791 :
792 0 : assert(req->iovcnt == 0);
793 : /* Map descs to IOVs */
794 0 : new_idx = last_avail_idx;
795 0 : while (1) {
796 0 : assert(desc != NULL);
797 0 : if (spdk_unlikely(req->iovcnt == VIRTIO_DEV_MAX_IOVS)) {
798 0 : SPDK_ERRLOG("Max IOVs in request reached (iovcnt = %d).\n", req->iovcnt);
799 0 : return -EINVAL;
800 : }
801 :
802 0 : if (spdk_unlikely(!virtio_vring_packed_desc_to_iov(dev, desc, virtio_req_to_sg_t(req, req->iovcnt),
803 : &req->iovs[req->iovcnt]))) {
804 0 : SPDK_ERRLOG("Map Desc to IOV failed (iovcnt = %d).\n", req->iovcnt);
805 0 : return -EINVAL;
806 : }
807 0 : req->desc_writeable[req->iovcnt] = false;
808 0 : if (virtio_vring_packed_desc_is_wr(desc)) {
809 0 : req->desc_writeable[req->iovcnt] = true;
810 0 : }
811 :
812 0 : req->iovcnt++;
813 0 : len += desc->len;
814 :
815 : /* get next desc */
816 0 : if (desc_table) {
817 0 : if (req->iovcnt < desc_table_size) {
818 0 : desc = &desc_table[req->iovcnt];
819 0 : } else {
820 0 : desc = NULL;
821 : }
822 0 : } else {
823 0 : if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
824 0 : req->buffer_id = desc->id;
825 0 : desc = NULL;
826 0 : } else {
827 0 : new_idx = (new_idx + 1) % vq->qsize;
828 0 : desc = &vq->desc.desc_packed[new_idx];
829 0 : num_descs++;
830 0 : req->buffer_id = desc->id;
831 : }
832 : }
833 :
834 0 : if (desc == NULL) {
835 0 : break;
836 : }
837 : }
838 :
839 0 : req->num_descs = num_descs;
840 0 : vq->last_avail_idx = (new_idx + 1) % vq->qsize;
841 0 : if (vq->last_avail_idx < last_avail_idx) {
842 0 : vq->packed.avail_phase = !vq->packed.avail_phase;
843 0 : }
844 :
845 0 : req->payload_size = len;
846 :
847 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: req %p, iovcnt %u, num_descs %u\n",
848 : dev->name, req, req->iovcnt, num_descs);
849 0 : return 0;
850 0 : }
851 :
852 : int
853 0 : vfu_virtio_dev_process_packed_ring(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
854 : {
855 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
856 0 : struct vring_packed_desc *desc;
857 0 : int ret;
858 0 : struct vfu_virtio_req *req;
859 0 : uint16_t i, max_reqs;
860 :
861 0 : max_reqs = VIRTIO_DEV_VRING_MAX_REQS;
862 0 : for (i = 0; i < max_reqs; i++) {
863 0 : desc = &vq->desc.desc_packed[vq->last_avail_idx];
864 0 : if (!virtio_vring_packed_is_avail(desc, vq->packed.avail_phase)) {
865 0 : return i;
866 : }
867 :
868 0 : req = vfu_virtio_dev_get_req(virtio_endpoint, vq);
869 0 : if (spdk_unlikely(!req)) {
870 0 : SPDK_ERRLOG("Error to get request\n");
871 : /* TODO: address the error case */
872 0 : assert(false);
873 : return -EIO;
874 : }
875 :
876 0 : ret = virtio_dev_packed_iovs_setup(dev, vq, vq->last_avail_idx, desc, req);
877 0 : if (spdk_unlikely(ret)) {
878 : /* let the device to response the error */
879 0 : SPDK_ERRLOG("virtio_dev_packed_iovs_setup failed\n");
880 0 : }
881 :
882 0 : assert(virtio_endpoint->virtio_ops.exec_request);
883 0 : virtio_endpoint->io_outstanding++;
884 0 : virtio_endpoint->virtio_ops.exec_request(virtio_endpoint, vq, req);
885 0 : }
886 :
887 0 : return i;
888 0 : }
889 :
890 : struct vfu_virtio_req *
891 0 : virito_dev_packed_ring_get_next_avail_req(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
892 : {
893 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
894 0 : struct vring_packed_desc *desc;
895 0 : int ret;
896 0 : struct vfu_virtio_req *req;
897 :
898 0 : desc = &vq->desc.desc_packed[vq->last_avail_idx];
899 0 : if (!virtio_vring_packed_is_avail(desc, vq->packed.avail_phase)) {
900 0 : return NULL;
901 : }
902 :
903 0 : SPDK_DEBUGLOG(vfu_virtio_io, "%s: get 1 descriptors\n", dev->name);
904 :
905 0 : req = vfu_virtio_dev_get_req(virtio_endpoint, vq);
906 0 : if (!req) {
907 0 : SPDK_ERRLOG("Error to get request\n");
908 0 : return NULL;
909 : }
910 :
911 0 : ret = virtio_dev_packed_iovs_setup(dev, vq, vq->last_avail_idx, desc, req);
912 0 : if (ret) {
913 0 : SPDK_ERRLOG("virtio_dev_packed_iovs_setup failed\n");
914 0 : vfu_virtio_dev_put_req(req);
915 0 : return NULL;
916 : }
917 :
918 0 : return req;
919 0 : }
920 :
921 : static int
922 0 : virtio_vfu_pci_common_cfg(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
923 : size_t count, loff_t pos, bool is_write)
924 : {
925 0 : struct vfu_virtio_dev *dev = virtio_endpoint->dev;
926 0 : uint32_t offset, value = 0;
927 0 : int ret;
928 :
929 0 : assert(count <= 4);
930 0 : offset = pos - VIRTIO_PCI_COMMON_CFG_OFFSET;
931 :
932 0 : if (is_write) {
933 0 : memcpy(&value, buf, count);
934 0 : switch (offset) {
935 : case VIRTIO_PCI_COMMON_DFSELECT:
936 0 : dev->cfg.host_feature_select = value;
937 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_DFSELECT with 0x%x\n",
938 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
939 : value);
940 0 : break;
941 : case VIRTIO_PCI_COMMON_GFSELECT:
942 0 : dev->cfg.guest_feature_select = value;
943 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_GFSELECT with 0x%x\n",
944 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
945 : value);
946 0 : break;
947 : case VIRTIO_PCI_COMMON_GF:
948 0 : assert(dev->cfg.guest_feature_select <= 1);
949 0 : if (dev->cfg.guest_feature_select) {
950 0 : dev->cfg.guest_feat_hi = value;
951 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_GF_HI with 0x%x\n",
952 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
953 : value);
954 0 : } else {
955 0 : dev->cfg.guest_feat_lo = value;
956 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_GF_LO with 0x%x\n",
957 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
958 : value);
959 : }
960 :
961 0 : ret = virtio_dev_set_features(dev,
962 0 : (((uint64_t)dev->cfg.guest_feat_hi << 32) | dev->cfg.guest_feat_lo));
963 0 : if (ret) {
964 0 : return ret;
965 : }
966 0 : break;
967 : case VIRTIO_PCI_COMMON_MSIX:
968 0 : dev->cfg.msix_config = value;
969 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_MSIX with 0x%x\n",
970 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
971 : value);
972 0 : break;
973 : case VIRTIO_PCI_COMMON_STATUS:
974 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_STATUS with 0x%x\n",
975 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
976 : value);
977 0 : ret = virtio_dev_set_status(dev, value);
978 0 : if (ret) {
979 0 : return ret;
980 : }
981 0 : break;
982 : case VIRTIO_PCI_COMMON_Q_SELECT:
983 0 : if (value < VIRTIO_DEV_MAX_VQS) {
984 0 : dev->cfg.queue_select = value;
985 0 : }
986 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_Q_SELECT with 0x%x\n",
987 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
988 : value);
989 0 : break;
990 : case VIRTIO_PCI_COMMON_Q_SIZE:
991 0 : dev->vqs[dev->cfg.queue_select].qsize = value;
992 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_Q_SIZE with 0x%x\n",
993 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
994 : value);
995 0 : break;
996 : case VIRTIO_PCI_COMMON_Q_MSIX:
997 0 : dev->vqs[dev->cfg.queue_select].vector = value;
998 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_Q_MSIX with 0x%x\n",
999 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1000 : value);
1001 0 : break;
1002 : case VIRTIO_PCI_COMMON_Q_ENABLE:
1003 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_Q_ENABLE with 0x%x\n",
1004 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1005 : value);
1006 0 : if (value == 1) {
1007 0 : ret = virtio_dev_enable_vq(dev, dev->cfg.queue_select);
1008 0 : if (ret) {
1009 0 : return ret;
1010 : }
1011 0 : } else {
1012 0 : ret = virtio_dev_disable_vq(dev, dev->cfg.queue_select);
1013 0 : if (ret) {
1014 0 : return ret;
1015 : }
1016 : }
1017 0 : break;
1018 : case VIRTIO_PCI_COMMON_Q_DESCLO:
1019 0 : dev->vqs[dev->cfg.queue_select].desc_lo = value;
1020 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_DESCLO with 0x%x\n",
1021 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1022 0 : break;
1023 : case VIRTIO_PCI_COMMON_Q_DESCHI:
1024 0 : dev->vqs[dev->cfg.queue_select].desc_hi = value;
1025 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_DESCHI with 0x%x\n",
1026 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1027 0 : break;
1028 : case VIRTIO_PCI_COMMON_Q_AVAILLO:
1029 0 : dev->vqs[dev->cfg.queue_select].avail_lo = value;
1030 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_AVAILLO with 0x%x\n",
1031 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1032 0 : break;
1033 : case VIRTIO_PCI_COMMON_Q_AVAILHI:
1034 0 : dev->vqs[dev->cfg.queue_select].avail_hi = value;
1035 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_AVAILHI with 0x%x\n",
1036 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1037 0 : break;
1038 : case VIRTIO_PCI_COMMON_Q_USEDLO:
1039 0 : dev->vqs[dev->cfg.queue_select].used_lo = value;
1040 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_USEDLO with 0x%x\n",
1041 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1042 0 : break;
1043 : case VIRTIO_PCI_COMMON_Q_USEDHI:
1044 0 : dev->vqs[dev->cfg.queue_select].used_hi = value;
1045 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_USEDHI with 0x%x\n",
1046 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1047 0 : break;
1048 :
1049 : default:
1050 0 : SPDK_ERRLOG("%s: WRITE UNSUPPORTED offset 0x%x\n",
1051 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), offset);
1052 0 : errno = EIO;
1053 0 : return -1;
1054 : }
1055 0 : } else {
1056 0 : switch (offset) {
1057 : case VIRTIO_PCI_COMMON_DFSELECT:
1058 0 : value = dev->cfg.host_feature_select;
1059 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_DFSELECT with 0x%x\n",
1060 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1061 : value);
1062 0 : break;
1063 : case VIRTIO_PCI_COMMON_DF:
1064 0 : assert(dev->cfg.host_feature_select <= 1);
1065 0 : if (dev->cfg.host_feature_select) {
1066 0 : value = dev->host_features >> 32;
1067 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_DF_HI with 0x%x\n",
1068 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1069 : value);
1070 0 : } else {
1071 0 : value = dev->host_features;
1072 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_DF_LO with 0x%x\n",
1073 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1074 : value);
1075 : }
1076 0 : break;
1077 : case VIRTIO_PCI_COMMON_GFSELECT:
1078 0 : value = dev->cfg.guest_feature_select;
1079 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_GFSELECT with 0x%x\n",
1080 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1081 : value);
1082 0 : break;
1083 : case VIRTIO_PCI_COMMON_GF:
1084 0 : assert(dev->cfg.guest_feature_select <= 1);
1085 0 : if (dev->cfg.guest_feature_select) {
1086 0 : value = dev->cfg.guest_feat_hi;
1087 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_GF_HI with 0x%x\n",
1088 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1089 : value);
1090 0 : } else {
1091 0 : value = dev->cfg.guest_feat_lo;
1092 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_GF_LO with 0x%x\n",
1093 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1094 : value);
1095 : }
1096 0 : break;
1097 : case VIRTIO_PCI_COMMON_MSIX:
1098 0 : value = dev->cfg.msix_config;
1099 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_MSIX with 0x%x\n",
1100 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1101 : value);
1102 0 : break;
1103 : case VIRTIO_PCI_COMMON_NUMQ:
1104 0 : value = dev->num_queues;
1105 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_NUMQ with 0x%x\n",
1106 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1107 : value);
1108 0 : break;
1109 : case VIRTIO_PCI_COMMON_STATUS:
1110 0 : value = dev->cfg.device_status;
1111 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_STATUS with 0x%x\n",
1112 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1113 : value);
1114 0 : break;
1115 : case VIRTIO_PCI_COMMON_CFGGENERATION:
1116 0 : value = dev->cfg.config_generation;
1117 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_CFGGENERATION with 0x%x\n",
1118 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1119 : value);
1120 0 : break;
1121 : case VIRTIO_PCI_COMMON_Q_NOFF:
1122 0 : value = dev->cfg.queue_select;
1123 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_Q_NOFF with 0x%x\n",
1124 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1125 : value);
1126 0 : break;
1127 : case VIRTIO_PCI_COMMON_Q_SELECT:
1128 0 : value = dev->cfg.queue_select;
1129 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_Q_SELECT with 0x%x\n",
1130 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1131 : value);
1132 0 : break;
1133 : case VIRTIO_PCI_COMMON_Q_SIZE:
1134 0 : value = dev->vqs[dev->cfg.queue_select].qsize;
1135 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_SIZE with 0x%x\n",
1136 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1137 : dev->cfg.queue_select, value);
1138 0 : break;
1139 : case VIRTIO_PCI_COMMON_Q_MSIX:
1140 0 : value = dev->vqs[dev->cfg.queue_select].vector;
1141 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_MSIX with 0x%x\n",
1142 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1143 : dev->cfg.queue_select, value);
1144 0 : break;
1145 : case VIRTIO_PCI_COMMON_Q_ENABLE:
1146 0 : value = dev->vqs[dev->cfg.queue_select].enabled;
1147 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_ENABLE with 0x%x\n",
1148 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1149 0 : break;
1150 : case VIRTIO_PCI_COMMON_Q_DESCLO:
1151 0 : value = dev->vqs[dev->cfg.queue_select].desc_lo;
1152 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_DESCLO with 0x%x\n",
1153 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1154 0 : break;
1155 : case VIRTIO_PCI_COMMON_Q_DESCHI:
1156 0 : value = dev->vqs[dev->cfg.queue_select].desc_hi;
1157 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_DESCHI with 0x%x\n",
1158 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1159 0 : break;
1160 : case VIRTIO_PCI_COMMON_Q_AVAILLO:
1161 0 : value = dev->vqs[dev->cfg.queue_select].avail_lo;
1162 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_AVAILLO with 0x%x\n",
1163 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1164 0 : break;
1165 : case VIRTIO_PCI_COMMON_Q_AVAILHI:
1166 0 : value = dev->vqs[dev->cfg.queue_select].avail_hi;
1167 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_AVAILHI with 0x%x\n",
1168 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1169 0 : break;
1170 : case VIRTIO_PCI_COMMON_Q_USEDLO:
1171 0 : value = dev->vqs[dev->cfg.queue_select].used_lo;
1172 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_USEDLO with 0x%x\n",
1173 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1174 0 : break;
1175 : case VIRTIO_PCI_COMMON_Q_USEDHI:
1176 0 : value = dev->vqs[dev->cfg.queue_select].used_hi;
1177 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_USEDHI with 0x%x\n",
1178 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1179 0 : break;
1180 : default:
1181 0 : SPDK_ERRLOG("%s: READ UNSUPPORTED offset 0x%x\n",
1182 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), offset);
1183 0 : errno = EIO;
1184 0 : return -1;
1185 : }
1186 0 : memcpy(buf, &value, count);
1187 : }
1188 :
1189 0 : return count;
1190 0 : }
1191 :
1192 : static int
1193 0 : virtio_vfu_device_specific_cfg(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
1194 : size_t count, loff_t pos, bool is_write)
1195 : {
1196 0 : loff_t offset;
1197 0 : int ret = -1;
1198 :
1199 0 : assert(count <= 8);
1200 0 : offset = pos - VIRTIO_PCI_SPECIFIC_CFG_OFFSET;
1201 0 : if (!is_write) {
1202 0 : if (virtio_endpoint->virtio_ops.get_config) {
1203 0 : ret = virtio_endpoint->virtio_ops.get_config(virtio_endpoint, buf, offset, count);
1204 0 : }
1205 0 : } else {
1206 0 : if (virtio_endpoint->virtio_ops.set_config) {
1207 0 : ret = virtio_endpoint->virtio_ops.set_config(virtio_endpoint, buf, offset, count);
1208 0 : }
1209 : }
1210 :
1211 0 : if (ret < 0) {
1212 0 : return ret;
1213 : }
1214 :
1215 0 : return count;
1216 0 : }
1217 :
1218 : static int
1219 0 : virtio_vfu_pci_isr(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
1220 : size_t count, bool is_write)
1221 : {
1222 0 : uint8_t *isr;
1223 :
1224 0 : if (count != 1) {
1225 0 : SPDK_ERRLOG("ISR register is 1 byte\n");
1226 0 : errno = EIO;
1227 0 : return -1;
1228 : }
1229 :
1230 0 : isr = buf;
1231 :
1232 0 : if (!is_write) {
1233 0 : SPDK_DEBUGLOG(vfu_virtio, "READ PCI ISR\n");
1234 : /* Read-Acknowledge Clear */
1235 0 : *isr = virtio_endpoint->dev->cfg.isr;
1236 0 : virtio_endpoint->dev->cfg.isr = 0;
1237 0 : } else {
1238 0 : SPDK_ERRLOG("ISR register is RO\n");
1239 0 : errno = EIO;
1240 0 : return -1;
1241 : }
1242 :
1243 0 : return count;
1244 0 : }
1245 :
1246 : static ssize_t
1247 0 : virtio_vfu_access_bar4(vfu_ctx_t *vfu_ctx, char *buf, size_t count,
1248 : loff_t pos,
1249 : bool is_write)
1250 : {
1251 0 : struct spdk_vfu_endpoint *endpoint = vfu_get_private(vfu_ctx);
1252 0 : struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1253 0 : uint64_t start, end;
1254 :
1255 0 : start = pos;
1256 0 : end = start + count;
1257 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: %s bar4 0x%"PRIX64"-0x%"PRIX64", len = %lu\n",
1258 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1259 : is_write ? "write" : "read", start, end - 1, count);
1260 :
1261 0 : if (end < VIRTIO_PCI_COMMON_CFG_OFFSET + VIRTIO_PCI_COMMON_CFG_LENGTH) {
1262 : /* virtio PCI common configuration */
1263 0 : return virtio_vfu_pci_common_cfg(virtio_endpoint, buf, count, pos, is_write);
1264 0 : } else if (start >= VIRTIO_PCI_ISR_ACCESS_OFFSET &&
1265 0 : end < VIRTIO_PCI_ISR_ACCESS_OFFSET + VIRTIO_PCI_ISR_ACCESS_LENGTH) {
1266 : /* ISR access */
1267 0 : return virtio_vfu_pci_isr(virtio_endpoint, buf, count, is_write);
1268 0 : } else if (start >= VIRTIO_PCI_SPECIFIC_CFG_OFFSET &&
1269 0 : end < VIRTIO_PCI_SPECIFIC_CFG_OFFSET + VIRTIO_PCI_SPECIFIC_CFG_LENGTH) {
1270 : /* Device specific configuration */
1271 0 : return virtio_vfu_device_specific_cfg(virtio_endpoint, buf, count, pos, is_write);
1272 0 : } else if (start >= VIRTIO_PCI_NOTIFICATIONS_OFFSET &&
1273 0 : end < VIRTIO_PCI_NOTIFICATIONS_OFFSET + VIRTIO_PCI_NOTIFICATIONS_LENGTH) {
1274 : /* Notifications */
1275 : /* Sparse mmap region by default, there are no MMIO R/W messages */
1276 0 : assert(false);
1277 : return count;
1278 : } else {
1279 0 : assert(false);
1280 : }
1281 :
1282 : return 0;
1283 0 : }
1284 :
1285 : int
1286 0 : vfu_virtio_post_memory_add(struct spdk_vfu_endpoint *endpoint, void *map_start, void *map_end)
1287 : {
1288 0 : struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1289 0 : struct vfu_virtio_dev *dev = virtio_endpoint->dev;
1290 0 : uint32_t i;
1291 :
1292 0 : if (!dev) {
1293 0 : return 0;
1294 : }
1295 :
1296 0 : for (i = 0; i < dev->num_queues; i++) {
1297 : /* Try to remap VQs if necessary */
1298 0 : virtio_dev_map_vq(dev, &dev->vqs[i]);
1299 0 : }
1300 :
1301 0 : return 0;
1302 0 : }
1303 :
1304 : int
1305 0 : vfu_virtio_pre_memory_remove(struct spdk_vfu_endpoint *endpoint, void *map_start, void *map_end)
1306 : {
1307 0 : struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1308 :
1309 0 : if (virtio_endpoint->dev != NULL) {
1310 0 : vfu_virtio_dev_unmap_vqs(virtio_endpoint->dev, map_start, map_end);
1311 0 : }
1312 :
1313 0 : return 0;
1314 0 : }
1315 :
1316 : int
1317 0 : vfu_virtio_pci_reset_cb(struct spdk_vfu_endpoint *endpoint)
1318 : {
1319 0 : struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1320 :
1321 0 : if (virtio_endpoint->dev) {
1322 0 : vfu_virtio_dev_stop(virtio_endpoint->dev);
1323 0 : vfu_virtio_dev_reset(virtio_endpoint->dev);
1324 0 : }
1325 :
1326 0 : return 0;
1327 0 : }
1328 :
1329 : static ssize_t
1330 0 : access_pci_config(vfu_ctx_t *vfu_ctx, char *buf, size_t count, loff_t offset,
1331 : bool is_write)
1332 : {
1333 0 : struct spdk_vfu_endpoint *endpoint = vfu_get_private(vfu_ctx);
1334 0 : void *pci_config = spdk_vfu_endpoint_get_pci_config(endpoint);
1335 :
1336 0 : SPDK_DEBUGLOG(vfu_virtio,
1337 : "%s: PCI_CFG %s %#lx-%#lx\n",
1338 : spdk_vfu_get_endpoint_id(endpoint), is_write ? "write" : "read",
1339 : offset, offset + count);
1340 :
1341 0 : if (is_write) {
1342 0 : SPDK_ERRLOG("write %#lx-%#lx not supported\n",
1343 : offset, offset + count);
1344 0 : errno = EINVAL;
1345 0 : return -1;
1346 : }
1347 :
1348 0 : if (offset + count > 0x1000) {
1349 0 : SPDK_ERRLOG("access past end of extended PCI configuration space, want=%ld+%ld, max=%d\n",
1350 : offset, count, 0x1000);
1351 0 : errno = ERANGE;
1352 0 : return -1;
1353 : }
1354 :
1355 0 : memcpy(buf, ((unsigned char *)pci_config) + offset, count);
1356 0 : return count;
1357 0 : }
1358 :
1359 : static int
1360 0 : vfu_virtio_dev_start(struct vfu_virtio_dev *dev)
1361 : {
1362 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
1363 0 : int ret = 0;
1364 :
1365 0 : SPDK_DEBUGLOG(vfu_virtio, "start %s\n", dev->name);
1366 :
1367 0 : if (virtio_dev_is_started(dev)) {
1368 0 : SPDK_ERRLOG("Device %s is already started\n", dev->name);
1369 0 : return -EFAULT;
1370 : }
1371 :
1372 0 : if (virtio_endpoint->virtio_ops.start_device) {
1373 0 : virtio_endpoint->io_outstanding = 0;
1374 0 : ret = virtio_endpoint->virtio_ops.start_device(virtio_endpoint);
1375 0 : }
1376 :
1377 0 : SPDK_DEBUGLOG(vfu_virtio, "%s is started with ret %d\n", dev->name, ret);
1378 :
1379 0 : return ret;
1380 0 : }
1381 :
1382 : static int
1383 0 : vfu_virtio_dev_stop(struct vfu_virtio_dev *dev)
1384 : {
1385 0 : struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
1386 0 : int ret = 0;
1387 :
1388 0 : SPDK_DEBUGLOG(vfu_virtio, "stop %s\n", dev->name);
1389 :
1390 0 : if (!virtio_dev_is_started(dev)) {
1391 0 : SPDK_DEBUGLOG(vfu_virtio, "%s isn't started\n", dev->name);
1392 0 : return 0;
1393 : }
1394 :
1395 0 : if (virtio_endpoint->virtio_ops.stop_device) {
1396 0 : ret = virtio_endpoint->virtio_ops.stop_device(virtio_endpoint);
1397 0 : assert(ret == 0);
1398 0 : }
1399 :
1400 : /* Unmap all VQs */
1401 0 : vfu_virtio_dev_unmap_vqs(dev, NULL, NULL);
1402 :
1403 0 : return ret;
1404 0 : }
1405 :
1406 : int
1407 0 : vfu_virtio_detach_device(struct spdk_vfu_endpoint *endpoint)
1408 : {
1409 0 : struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1410 0 : struct vfu_virtio_dev *dev = virtio_endpoint->dev;
1411 :
1412 0 : if (virtio_endpoint->dev == NULL) {
1413 0 : return 0;
1414 : }
1415 :
1416 0 : SPDK_DEBUGLOG(vfu_virtio, "detach device %s\n", dev->name);
1417 :
1418 0 : vfu_virtio_dev_stop(dev);
1419 0 : vfu_virtio_dev_free_reqs(virtio_endpoint, dev);
1420 0 : virtio_endpoint->dev = NULL;
1421 0 : free(dev);
1422 :
1423 0 : return 0;
1424 0 : }
1425 :
1426 : int
1427 0 : vfu_virtio_attach_device(struct spdk_vfu_endpoint *endpoint)
1428 : {
1429 0 : struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1430 0 : uint64_t supported_features = 0;
1431 0 : struct vfu_virtio_dev *dev;
1432 0 : struct vfu_virtio_vq *vq;
1433 0 : struct vfu_virtio_req *req;
1434 0 : uint32_t i, j;
1435 0 : int ret = 0;
1436 :
1437 0 : dev = calloc(1, sizeof(*dev) + virtio_endpoint->num_queues * 3 * dma_sg_size());
1438 0 : if (dev == NULL) {
1439 0 : return -ENOMEM;
1440 : }
1441 :
1442 0 : dev->num_queues = virtio_endpoint->num_queues;
1443 0 : for (i = 0; i < dev->num_queues; i++) {
1444 0 : vq = &dev->vqs[i];
1445 0 : vq->id = i;
1446 0 : vq->qsize = virtio_endpoint->qsize;
1447 0 : vq->avail.sg = (dma_sg_t *)(dev->sg + i * dma_sg_size() * 3);
1448 0 : vq->used.sg = (dma_sg_t *)((uint8_t *)vq->avail.sg + dma_sg_size());
1449 0 : vq->desc.sg = (dma_sg_t *)((uint8_t *)vq->used.sg + dma_sg_size());
1450 :
1451 0 : STAILQ_INIT(&vq->free_reqs);
1452 0 : for (j = 0; j <= vq->qsize; j++) {
1453 0 : req = vfu_virtio_vq_alloc_req(virtio_endpoint, vq);
1454 0 : if (!req) {
1455 0 : SPDK_ERRLOG("Error to allocate req\n");
1456 0 : ret = -ENOMEM;
1457 0 : goto out;
1458 : }
1459 0 : req->indirect_iov = &req->iovs[VIRTIO_DEV_MAX_IOVS];
1460 0 : req->indirect_sg = virtio_req_to_sg_t(req, VIRTIO_DEV_MAX_IOVS);
1461 0 : req->dev = dev;
1462 0 : req->vq = vq;
1463 0 : STAILQ_INSERT_TAIL(&vq->free_reqs, req, link);
1464 0 : }
1465 0 : }
1466 :
1467 0 : if (virtio_endpoint->virtio_ops.get_device_features) {
1468 0 : supported_features = virtio_endpoint->virtio_ops.get_device_features(virtio_endpoint);
1469 0 : }
1470 0 : dev->host_features = supported_features;
1471 :
1472 0 : snprintf(dev->name, SPDK_VFU_MAX_NAME_LEN, "%s",
1473 0 : spdk_vfu_get_endpoint_name(virtio_endpoint->endpoint));
1474 0 : virtio_endpoint->dev = dev;
1475 0 : dev->virtio_endpoint = virtio_endpoint;
1476 0 : virtio_endpoint->thread = spdk_get_thread();
1477 0 : return 0;
1478 :
1479 : out:
1480 0 : vfu_virtio_dev_free_reqs(virtio_endpoint, dev);
1481 0 : return ret;
1482 0 : }
1483 :
1484 : int
1485 0 : vfu_virtio_endpoint_setup(struct vfu_virtio_endpoint *virtio_endpoint,
1486 : struct spdk_vfu_endpoint *endpoint,
1487 : char *basename, const char *endpoint_name,
1488 : struct vfu_virtio_ops *ops)
1489 : {
1490 0 : char path[PATH_MAX] = "";
1491 0 : int ret;
1492 :
1493 0 : if (!ops) {
1494 0 : return -EINVAL;
1495 : }
1496 :
1497 0 : ret = snprintf(path, PATH_MAX, "%s%s_bar4", basename, endpoint_name);
1498 0 : if (ret < 0 || ret >= PATH_MAX) {
1499 0 : SPDK_ERRLOG("%s: error to get socket path: %s.\n", basename, spdk_strerror(errno));
1500 0 : return -EINVAL;
1501 : }
1502 :
1503 0 : ret = open(path, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
1504 0 : if (ret == -1) {
1505 0 : SPDK_ERRLOG("%s: failed to open device memory at %s.\n",
1506 : path, spdk_strerror(errno));
1507 0 : return ret;
1508 : }
1509 0 : unlink(path);
1510 :
1511 0 : virtio_endpoint->devmem_fd = ret;
1512 0 : ret = ftruncate(virtio_endpoint->devmem_fd, VIRTIO_PCI_BAR4_LENGTH);
1513 0 : if (ret != 0) {
1514 0 : SPDK_ERRLOG("%s: error to ftruncate file %s.\n", path,
1515 : spdk_strerror(errno));
1516 0 : close(virtio_endpoint->devmem_fd);
1517 0 : return ret;
1518 : }
1519 :
1520 0 : virtio_endpoint->doorbells = mmap(NULL, VIRTIO_PCI_NOTIFICATIONS_LENGTH, PROT_READ | PROT_WRITE,
1521 : MAP_SHARED,
1522 0 : virtio_endpoint->devmem_fd, VIRTIO_PCI_NOTIFICATIONS_OFFSET);
1523 0 : if (virtio_endpoint->doorbells == MAP_FAILED) {
1524 0 : SPDK_ERRLOG("%s: error to mmap file %s.\n", path, spdk_strerror(errno));
1525 0 : close(virtio_endpoint->devmem_fd);
1526 0 : return -EFAULT;
1527 : }
1528 0 : virtio_endpoint->endpoint = endpoint;
1529 0 : virtio_endpoint->virtio_ops = *ops;
1530 0 : virtio_endpoint->num_queues = VIRTIO_DEV_MAX_VQS;
1531 0 : virtio_endpoint->qsize = VIRTIO_VQ_DEFAULT_SIZE;
1532 :
1533 0 : SPDK_DEBUGLOG(vfu_virtio, "mmap file %s, devmem_fd %d\n", path, virtio_endpoint->devmem_fd);
1534 0 : return 0;
1535 0 : }
1536 :
1537 : int
1538 0 : vfu_virtio_endpoint_destruct(struct vfu_virtio_endpoint *virtio_endpoint)
1539 : {
1540 0 : if (virtio_endpoint->doorbells) {
1541 0 : munmap((void *)virtio_endpoint->doorbells, VIRTIO_PCI_NOTIFICATIONS_LENGTH);
1542 0 : }
1543 :
1544 0 : if (virtio_endpoint->devmem_fd) {
1545 0 : close(virtio_endpoint->devmem_fd);
1546 0 : }
1547 :
1548 0 : return 0;
1549 : }
1550 :
1551 : static int
1552 0 : vfu_virtio_quiesce_poll(void *ctx)
1553 : {
1554 0 : struct vfu_virtio_endpoint *virtio_endpoint = ctx;
1555 0 : vfu_ctx_t *vfu_ctx = spdk_vfu_get_vfu_ctx(virtio_endpoint->endpoint);
1556 :
1557 0 : if (virtio_endpoint->io_outstanding) {
1558 0 : return SPDK_POLLER_IDLE;
1559 : }
1560 :
1561 0 : spdk_poller_unregister(&virtio_endpoint->quiesce_poller);
1562 0 : virtio_endpoint->quiesce_in_progress = false;
1563 0 : vfu_device_quiesced(vfu_ctx, 0);
1564 :
1565 0 : return SPDK_POLLER_BUSY;
1566 0 : }
1567 :
1568 : int
1569 0 : vfu_virtio_quiesce_cb(struct spdk_vfu_endpoint *endpoint)
1570 : {
1571 0 : struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1572 :
1573 0 : if (virtio_endpoint->quiesce_in_progress) {
1574 0 : return -EBUSY;
1575 : }
1576 :
1577 0 : if (!virtio_endpoint->io_outstanding) {
1578 0 : return 0;
1579 : }
1580 :
1581 0 : virtio_endpoint->quiesce_in_progress = true;
1582 0 : virtio_endpoint->quiesce_poller = SPDK_POLLER_REGISTER(vfu_virtio_quiesce_poll, virtio_endpoint,
1583 : 10);
1584 :
1585 0 : return -EBUSY;
1586 0 : }
1587 :
1588 : static struct spdk_vfu_pci_device vfu_virtio_device_info = {
1589 : .id = {
1590 : .vid = SPDK_PCI_VID_VIRTIO,
1591 : /* Realize when calling get device information */
1592 : .did = 0x0,
1593 : .ssvid = SPDK_PCI_VID_VIRTIO,
1594 : .ssid = 0x0,
1595 : },
1596 :
1597 : .class = {
1598 : /* 0x01, mass storage controller */
1599 : .bcc = 0x01,
1600 : /* 0x00, SCSI controller */
1601 : .scc = 0x00,
1602 : /* 0x00, SCSI controller - vendor specific interface */
1603 : .pi = 0x00,
1604 : },
1605 :
1606 : .pmcap = {
1607 : .hdr.id = PCI_CAP_ID_PM,
1608 : .pmcs.nsfrst = 0x1,
1609 : },
1610 :
1611 : .pxcap = {
1612 : .hdr.id = PCI_CAP_ID_EXP,
1613 : .pxcaps.ver = 0x2,
1614 : .pxdcap = {.rer = 0x1, .flrc = 0x1},
1615 : .pxdcap2.ctds = 0x1,
1616 : },
1617 :
1618 : .msixcap = {
1619 : .hdr.id = PCI_CAP_ID_MSIX,
1620 : .mxc.ts = VIRTIO_DEV_MAX_VQS - 1,
1621 : .mtab = {.tbir = 0x1, .to = 0x0},
1622 : .mpba = {.pbir = 0x2, .pbao = 0x0},
1623 : },
1624 :
1625 : .nr_vendor_caps = 4,
1626 :
1627 : .intr_ipin = 0x1,
1628 : .nr_int_irqs = 0x1,
1629 : .nr_msix_irqs = VIRTIO_DEV_MAX_VQS,
1630 :
1631 : .regions = {
1632 : /* BAR0 */
1633 : {0},
1634 : /* BAR1 */
1635 : {
1636 : .access_cb = NULL,
1637 : .offset = 0,
1638 : .fd = -1,
1639 : .len = 0x1000,
1640 : .flags = VFU_REGION_FLAG_RW,
1641 : .nr_sparse_mmaps = 0,
1642 : },
1643 : /* BAR2 */
1644 : {
1645 : .access_cb = NULL,
1646 : .offset = 0,
1647 : .fd = -1,
1648 : .len = 0x1000,
1649 : .flags = VFU_REGION_FLAG_RW,
1650 : .nr_sparse_mmaps = 0,
1651 : },
1652 : /* BAR3 */
1653 : {0},
1654 : /* BAR4 */
1655 : {
1656 : .access_cb = virtio_vfu_access_bar4,
1657 : .offset = 0,
1658 : .fd = -1,
1659 : .len = VIRTIO_PCI_BAR4_LENGTH,
1660 : .flags = VFU_REGION_FLAG_RW | VFU_REGION_FLAG_MEM,
1661 : .nr_sparse_mmaps = 1,
1662 : .mmaps = {
1663 : {
1664 : .offset = VIRTIO_PCI_NOTIFICATIONS_OFFSET,
1665 : .len = VIRTIO_PCI_NOTIFICATIONS_LENGTH,
1666 : },
1667 : },
1668 : },
1669 : /* BAR5 */
1670 : {0},
1671 : /* BAR6 */
1672 : {0},
1673 : /* ROM */
1674 : {0},
1675 : /* PCI Config */
1676 : {
1677 : .access_cb = access_pci_config,
1678 : .offset = 0,
1679 : .fd = -1,
1680 : .len = 0x1000,
1681 : .flags = VFU_REGION_FLAG_RW,
1682 : .nr_sparse_mmaps = 0,
1683 : },
1684 : },
1685 : };
1686 :
1687 : void
1688 0 : vfu_virtio_get_device_info(struct vfu_virtio_endpoint *virtio_endpoint,
1689 : struct spdk_vfu_pci_device *device_info)
1690 : {
1691 0 : memcpy(device_info, &vfu_virtio_device_info, sizeof(*device_info));
1692 :
1693 : /* BAR4 Region FD */
1694 0 : device_info->regions[VFU_PCI_DEV_BAR4_REGION_IDX].fd = virtio_endpoint->devmem_fd;
1695 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: get device information, fd %d\n",
1696 : spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1697 : virtio_endpoint->devmem_fd);
1698 0 : }
1699 :
1700 : static struct virtio_pci_cap common_cap = {
1701 : .cap_vndr = PCI_CAP_ID_VNDR,
1702 : .cap_len = sizeof(common_cap),
1703 : .cfg_type = VIRTIO_PCI_CAP_COMMON_CFG,
1704 : .bar = 4,
1705 : .offset = VIRTIO_PCI_COMMON_CFG_OFFSET,
1706 : .length = VIRTIO_PCI_COMMON_CFG_LENGTH,
1707 : };
1708 :
1709 : static struct virtio_pci_cap isr_cap = {
1710 : .cap_vndr = PCI_CAP_ID_VNDR,
1711 : .cap_len = sizeof(isr_cap),
1712 : .cfg_type = VIRTIO_PCI_CAP_ISR_CFG,
1713 : .bar = 4,
1714 : .offset = VIRTIO_PCI_ISR_ACCESS_OFFSET,
1715 : .length = VIRTIO_PCI_ISR_ACCESS_LENGTH,
1716 : };
1717 :
1718 : static struct virtio_pci_cap dev_cap = {
1719 : .cap_vndr = PCI_CAP_ID_VNDR,
1720 : .cap_len = sizeof(dev_cap),
1721 : .cfg_type = VIRTIO_PCI_CAP_DEVICE_CFG,
1722 : .bar = 4,
1723 : .offset = VIRTIO_PCI_SPECIFIC_CFG_OFFSET,
1724 : .length = VIRTIO_PCI_SPECIFIC_CFG_LENGTH,
1725 : };
1726 :
1727 : static struct virtio_pci_notify_cap notify_cap = {
1728 : .cap = {
1729 : .cap_vndr = PCI_CAP_ID_VNDR,
1730 : .cap_len = sizeof(notify_cap),
1731 : .cfg_type = VIRTIO_PCI_CAP_NOTIFY_CFG,
1732 : .bar = 4,
1733 : .offset = VIRTIO_PCI_NOTIFICATIONS_OFFSET,
1734 : .length = VIRTIO_PCI_NOTIFICATIONS_LENGTH,
1735 : },
1736 : .notify_off_multiplier = 4,
1737 : };
1738 :
1739 : uint16_t
1740 0 : vfu_virtio_get_vendor_capability(struct spdk_vfu_endpoint *endpoint, char *buf,
1741 : uint16_t buf_len,
1742 : uint16_t idx)
1743 : {
1744 0 : uint16_t len;
1745 :
1746 0 : SPDK_DEBUGLOG(vfu_virtio, "%s: get vendor capability, idx %u\n",
1747 : spdk_vfu_get_endpoint_id(endpoint), idx);
1748 :
1749 0 : switch (idx) {
1750 : case 0:
1751 0 : assert(buf_len > sizeof(common_cap));
1752 0 : memcpy(buf, &common_cap, sizeof(common_cap));
1753 0 : len = sizeof(common_cap);
1754 0 : break;
1755 : case 1:
1756 0 : assert(buf_len > sizeof(isr_cap));
1757 0 : memcpy(buf, &isr_cap, sizeof(isr_cap));
1758 0 : len = sizeof(isr_cap);
1759 0 : break;
1760 : case 2:
1761 0 : assert(buf_len > sizeof(dev_cap));
1762 0 : memcpy(buf, &dev_cap, sizeof(dev_cap));
1763 0 : len = sizeof(dev_cap);
1764 0 : break;
1765 : case 3:
1766 0 : assert(buf_len > sizeof(notify_cap));
1767 0 : memcpy(buf, ¬ify_cap, sizeof(notify_cap));
1768 0 : len = sizeof(notify_cap);
1769 0 : break;
1770 : default:
1771 0 : return 0;
1772 : }
1773 :
1774 0 : return len;
1775 0 : }
1776 :
1777 0 : SPDK_LOG_REGISTER_COMPONENT(vfu_virtio)
1778 0 : SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_io)
|