LCOV - code coverage report
Current view: top level - lib/ioat - ioat.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 1 405 0.2 %
Date: 2024-12-04 01:11:45 Functions: 1 33 3.0 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2015 Intel Corporation.
       3             :  *   All rights reserved.
       4             :  */
       5             : 
       6             : #include "spdk/stdinc.h"
       7             : 
       8             : #include "ioat_internal.h"
       9             : 
      10             : #include "spdk/env.h"
      11             : #include "spdk/util.h"
      12             : #include "spdk/memory.h"
      13             : 
      14             : #include "spdk/log.h"
      15             : 
      16             : struct ioat_driver {
      17             :         pthread_mutex_t                 lock;
      18             :         TAILQ_HEAD(, spdk_ioat_chan)    attached_chans;
      19             : };
      20             : 
      21             : static struct ioat_driver g_ioat_driver = {
      22             :         .lock = PTHREAD_MUTEX_INITIALIZER,
      23             :         .attached_chans = TAILQ_HEAD_INITIALIZER(g_ioat_driver.attached_chans),
      24             : };
      25             : 
      26             : static uint64_t
      27           0 : ioat_get_chansts(struct spdk_ioat_chan *ioat)
      28             : {
      29           0 :         return spdk_mmio_read_8(&ioat->regs->chansts);
      30             : }
      31             : 
      32             : static void
      33           0 : ioat_write_chancmp(struct spdk_ioat_chan *ioat, uint64_t addr)
      34             : {
      35           0 :         spdk_mmio_write_8(&ioat->regs->chancmp, addr);
      36           0 : }
      37             : 
      38             : static void
      39           0 : ioat_write_chainaddr(struct spdk_ioat_chan *ioat, uint64_t addr)
      40             : {
      41           0 :         spdk_mmio_write_8(&ioat->regs->chainaddr, addr);
      42           0 : }
      43             : 
      44             : static inline void
      45           0 : ioat_suspend(struct spdk_ioat_chan *ioat)
      46             : {
      47           0 :         ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
      48           0 : }
      49             : 
      50             : static inline void
      51           0 : ioat_reset(struct spdk_ioat_chan *ioat)
      52             : {
      53           0 :         ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
      54           0 : }
      55             : 
      56             : static inline uint32_t
      57           0 : ioat_reset_pending(struct spdk_ioat_chan *ioat)
      58             : {
      59           0 :         uint8_t cmd;
      60             : 
      61           0 :         cmd = ioat->regs->chancmd;
      62           0 :         return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET;
      63           0 : }
      64             : 
      65             : static int
      66           0 : ioat_map_pci_bar(struct spdk_ioat_chan *ioat)
      67             : {
      68           0 :         int regs_bar, rc;
      69           0 :         void *addr;
      70           0 :         uint64_t phys_addr, size;
      71             : 
      72           0 :         regs_bar = 0;
      73           0 :         rc = spdk_pci_device_map_bar(ioat->device, regs_bar, &addr, &phys_addr, &size);
      74           0 :         if (rc != 0 || addr == NULL) {
      75           0 :                 SPDK_ERRLOG("pci_device_map_range failed with error code %d\n",
      76             :                             rc);
      77           0 :                 return -1;
      78             :         }
      79             : 
      80           0 :         ioat->regs = (volatile struct spdk_ioat_registers *)addr;
      81             : 
      82           0 :         return 0;
      83           0 : }
      84             : 
      85             : static int
      86           0 : ioat_unmap_pci_bar(struct spdk_ioat_chan *ioat)
      87             : {
      88           0 :         int rc = 0;
      89           0 :         void *addr = (void *)ioat->regs;
      90             : 
      91           0 :         if (addr) {
      92           0 :                 rc = spdk_pci_device_unmap_bar(ioat->device, 0, addr);
      93           0 :         }
      94           0 :         return rc;
      95           0 : }
      96             : 
      97             : 
      98             : static inline uint32_t
      99           0 : ioat_get_active(struct spdk_ioat_chan *ioat)
     100             : {
     101           0 :         return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
     102             : }
     103             : 
     104             : static inline uint32_t
     105           0 : ioat_get_ring_space(struct spdk_ioat_chan *ioat)
     106             : {
     107           0 :         return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
     108             : }
     109             : 
     110             : static uint32_t
     111           0 : ioat_get_ring_index(struct spdk_ioat_chan *ioat, uint32_t index)
     112             : {
     113           0 :         return index & ((1 << ioat->ring_size_order) - 1);
     114             : }
     115             : 
     116             : static void
     117           0 : ioat_get_ring_entry(struct spdk_ioat_chan *ioat, uint32_t index,
     118             :                     struct ioat_descriptor **desc,
     119             :                     union spdk_ioat_hw_desc **hw_desc)
     120             : {
     121           0 :         uint32_t i = ioat_get_ring_index(ioat, index);
     122             : 
     123           0 :         *desc = &ioat->ring[i];
     124           0 :         *hw_desc = &ioat->hw_ring[i];
     125           0 : }
     126             : 
     127             : static void
     128           0 : ioat_submit_single(struct spdk_ioat_chan *ioat)
     129             : {
     130           0 :         ioat->head++;
     131           0 : }
     132             : 
     133             : void
     134           0 : spdk_ioat_flush(struct spdk_ioat_chan *ioat)
     135             : {
     136           0 :         uint32_t index = ioat_get_ring_index(ioat, ioat->head - 1);
     137           0 :         union spdk_ioat_hw_desc *hw_desc;
     138             : 
     139           0 :         hw_desc = &ioat->hw_ring[index];
     140           0 :         hw_desc->dma.u.control.completion_update = 1;
     141           0 :         ioat->regs->dmacount = (uint16_t)ioat->head;
     142           0 : }
     143             : 
     144             : static struct ioat_descriptor *
     145           0 : ioat_prep_null(struct spdk_ioat_chan *ioat)
     146             : {
     147           0 :         struct ioat_descriptor *desc;
     148           0 :         union spdk_ioat_hw_desc *hw_desc;
     149             : 
     150           0 :         if (ioat_get_ring_space(ioat) < 1) {
     151           0 :                 return NULL;
     152             :         }
     153             : 
     154           0 :         ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
     155             : 
     156           0 :         hw_desc->dma.u.control_raw = 0;
     157           0 :         hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
     158           0 :         hw_desc->dma.u.control.null = 1;
     159             : 
     160           0 :         hw_desc->dma.size = 8;
     161           0 :         hw_desc->dma.src_addr = 0;
     162           0 :         hw_desc->dma.dest_addr = 0;
     163             : 
     164           0 :         desc->callback_fn = NULL;
     165           0 :         desc->callback_arg = NULL;
     166             : 
     167           0 :         ioat_submit_single(ioat);
     168             : 
     169           0 :         return desc;
     170           0 : }
     171             : 
     172             : static struct ioat_descriptor *
     173           0 : ioat_prep_copy(struct spdk_ioat_chan *ioat, uint64_t dst,
     174             :                uint64_t src, uint32_t len)
     175             : {
     176           0 :         struct ioat_descriptor *desc;
     177           0 :         union spdk_ioat_hw_desc *hw_desc;
     178             : 
     179           0 :         assert(len <= ioat->max_xfer_size);
     180             : 
     181           0 :         if (ioat_get_ring_space(ioat) < 1) {
     182           0 :                 return NULL;
     183             :         }
     184             : 
     185           0 :         ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
     186             : 
     187           0 :         hw_desc->dma.u.control_raw = 0;
     188           0 :         hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
     189             : 
     190           0 :         hw_desc->dma.size = len;
     191           0 :         hw_desc->dma.src_addr = src;
     192           0 :         hw_desc->dma.dest_addr = dst;
     193             : 
     194           0 :         desc->callback_fn = NULL;
     195           0 :         desc->callback_arg = NULL;
     196             : 
     197           0 :         ioat_submit_single(ioat);
     198             : 
     199           0 :         return desc;
     200           0 : }
     201             : 
     202             : static struct ioat_descriptor *
     203           0 : ioat_prep_fill(struct spdk_ioat_chan *ioat, uint64_t dst,
     204             :                uint64_t fill_pattern, uint32_t len)
     205             : {
     206           0 :         struct ioat_descriptor *desc;
     207           0 :         union spdk_ioat_hw_desc *hw_desc;
     208             : 
     209           0 :         assert(len <= ioat->max_xfer_size);
     210             : 
     211           0 :         if (ioat_get_ring_space(ioat) < 1) {
     212           0 :                 return NULL;
     213             :         }
     214             : 
     215           0 :         ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
     216             : 
     217           0 :         hw_desc->fill.u.control_raw = 0;
     218           0 :         hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL;
     219             : 
     220           0 :         hw_desc->fill.size = len;
     221           0 :         hw_desc->fill.src_data = fill_pattern;
     222           0 :         hw_desc->fill.dest_addr = dst;
     223             : 
     224           0 :         desc->callback_fn = NULL;
     225           0 :         desc->callback_arg = NULL;
     226             : 
     227           0 :         ioat_submit_single(ioat);
     228             : 
     229           0 :         return desc;
     230           0 : }
     231             : 
     232             : static int
     233           0 : ioat_reset_hw(struct spdk_ioat_chan *ioat)
     234             : {
     235           0 :         int timeout;
     236           0 :         uint64_t status;
     237           0 :         uint32_t chanerr;
     238           0 :         int rc;
     239             : 
     240           0 :         status = ioat_get_chansts(ioat);
     241           0 :         if (is_ioat_active(status) || is_ioat_idle(status)) {
     242           0 :                 ioat_suspend(ioat);
     243           0 :         }
     244             : 
     245           0 :         timeout = 20; /* in milliseconds */
     246           0 :         while (is_ioat_active(status) || is_ioat_idle(status)) {
     247           0 :                 spdk_delay_us(1000);
     248           0 :                 timeout--;
     249           0 :                 if (timeout == 0) {
     250           0 :                         SPDK_ERRLOG("timed out waiting for suspend\n");
     251           0 :                         return -1;
     252             :                 }
     253           0 :                 status = ioat_get_chansts(ioat);
     254             :         }
     255             : 
     256             :         /*
     257             :          * Clear any outstanding errors.
     258             :          * CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything.
     259             :          */
     260           0 :         chanerr = ioat->regs->chanerr;
     261           0 :         ioat->regs->chanerr = chanerr;
     262             : 
     263           0 :         if (ioat->regs->cbver < SPDK_IOAT_VER_3_3) {
     264           0 :                 rc = spdk_pci_device_cfg_read32(ioat->device, &chanerr,
     265             :                                                 SPDK_IOAT_PCI_CHANERR_INT_OFFSET);
     266           0 :                 if (rc) {
     267           0 :                         SPDK_ERRLOG("failed to read the internal channel error register\n");
     268           0 :                         return -1;
     269             :                 }
     270             : 
     271           0 :                 spdk_pci_device_cfg_write32(ioat->device, chanerr,
     272             :                                             SPDK_IOAT_PCI_CHANERR_INT_OFFSET);
     273           0 :         }
     274             : 
     275           0 :         ioat_reset(ioat);
     276             : 
     277           0 :         timeout = 20;
     278           0 :         while (ioat_reset_pending(ioat)) {
     279           0 :                 spdk_delay_us(1000);
     280           0 :                 timeout--;
     281           0 :                 if (timeout == 0) {
     282           0 :                         SPDK_ERRLOG("timed out waiting for reset\n");
     283           0 :                         return -1;
     284             :                 }
     285             :         }
     286             : 
     287           0 :         return 0;
     288           0 : }
     289             : 
     290             : static int
     291           0 : ioat_process_channel_events(struct spdk_ioat_chan *ioat)
     292             : {
     293           0 :         struct ioat_descriptor *desc;
     294           0 :         uint64_t status, completed_descriptor, hw_desc_phys_addr, events_count = 0;
     295           0 :         uint32_t tail;
     296             : 
     297           0 :         if (ioat->head == ioat->tail) {
     298           0 :                 return 0;
     299             :         }
     300             : 
     301           0 :         status = *ioat->comp_update;
     302           0 :         completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
     303             : 
     304           0 :         if (is_ioat_halted(status)) {
     305           0 :                 SPDK_ERRLOG("Channel halted (%x)\n", ioat->regs->chanerr);
     306           0 :                 return -1;
     307             :         }
     308             : 
     309           0 :         if (completed_descriptor == ioat->last_seen) {
     310           0 :                 return 0;
     311             :         }
     312             : 
     313           0 :         do {
     314           0 :                 tail = ioat_get_ring_index(ioat, ioat->tail);
     315           0 :                 desc = &ioat->ring[tail];
     316             : 
     317           0 :                 if (desc->callback_fn) {
     318           0 :                         desc->callback_fn(desc->callback_arg);
     319           0 :                 }
     320             : 
     321           0 :                 hw_desc_phys_addr = desc->phys_addr;
     322           0 :                 ioat->tail++;
     323           0 :                 events_count++;
     324           0 :         } while (hw_desc_phys_addr != completed_descriptor);
     325             : 
     326           0 :         ioat->last_seen = hw_desc_phys_addr;
     327             : 
     328           0 :         return events_count;
     329           0 : }
     330             : 
     331             : static void
     332           0 : ioat_channel_destruct(struct spdk_ioat_chan *ioat)
     333             : {
     334           0 :         ioat_unmap_pci_bar(ioat);
     335             : 
     336           0 :         if (ioat->ring) {
     337           0 :                 free(ioat->ring);
     338           0 :         }
     339             : 
     340           0 :         if (ioat->hw_ring) {
     341           0 :                 spdk_free(ioat->hw_ring);
     342           0 :         }
     343             : 
     344           0 :         if (ioat->comp_update) {
     345           0 :                 spdk_free((void *)ioat->comp_update);
     346           0 :                 ioat->comp_update = NULL;
     347           0 :         }
     348           0 : }
     349             : 
     350             : uint32_t
     351           0 : spdk_ioat_get_max_descriptors(struct spdk_ioat_chan *ioat)
     352             : {
     353           0 :         return 1 << ioat->ring_size_order;
     354             : }
     355             : 
     356             : static int
     357           0 : ioat_channel_start(struct spdk_ioat_chan *ioat)
     358             : {
     359           0 :         uint8_t xfercap, version;
     360           0 :         uint64_t status = 0;
     361           0 :         int i, num_descriptors;
     362           0 :         uint64_t comp_update_bus_addr = 0;
     363           0 :         uint64_t phys_addr;
     364             : 
     365           0 :         if (ioat_map_pci_bar(ioat) != 0) {
     366           0 :                 SPDK_ERRLOG("ioat_map_pci_bar() failed\n");
     367           0 :                 return -1;
     368             :         }
     369             : 
     370           0 :         version = ioat->regs->cbver;
     371           0 :         if (version < SPDK_IOAT_VER_3_0) {
     372           0 :                 SPDK_ERRLOG(" unsupported IOAT version %u.%u\n",
     373             :                             version >> 4, version & 0xF);
     374           0 :                 return -1;
     375             :         }
     376             : 
     377             :         /* Always support DMA copy */
     378           0 :         ioat->dma_capabilities = SPDK_IOAT_ENGINE_COPY_SUPPORTED;
     379           0 :         if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL) {
     380           0 :                 ioat->dma_capabilities |= SPDK_IOAT_ENGINE_FILL_SUPPORTED;
     381           0 :         }
     382           0 :         xfercap = ioat->regs->xfercap;
     383             : 
     384             :         /* Only bits [4:0] are valid. */
     385           0 :         xfercap &= 0x1f;
     386           0 :         if (xfercap == 0) {
     387             :                 /* 0 means 4 GB max transfer size. */
     388           0 :                 ioat->max_xfer_size = 1ULL << 32;
     389           0 :         } else if (xfercap < 12) {
     390             :                 /* XFERCAP must be at least 12 (4 KB) according to the spec. */
     391           0 :                 SPDK_ERRLOG("invalid XFERCAP value %u\n", xfercap);
     392           0 :                 return -1;
     393             :         } else {
     394           0 :                 ioat->max_xfer_size = 1U << xfercap;
     395             :         }
     396             : 
     397           0 :         ioat->comp_update = spdk_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
     398             :                                          NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
     399           0 :         if (ioat->comp_update == NULL) {
     400           0 :                 return -1;
     401             :         }
     402             : 
     403           0 :         comp_update_bus_addr = spdk_vtophys((void *)ioat->comp_update, NULL);
     404           0 :         if (comp_update_bus_addr == SPDK_VTOPHYS_ERROR) {
     405           0 :                 return -1;
     406             :         }
     407             : 
     408           0 :         ioat->ring_size_order = IOAT_DEFAULT_ORDER;
     409             : 
     410           0 :         num_descriptors = 1 << ioat->ring_size_order;
     411             : 
     412           0 :         ioat->ring = calloc(num_descriptors, sizeof(struct ioat_descriptor));
     413           0 :         if (!ioat->ring) {
     414           0 :                 return -1;
     415             :         }
     416             : 
     417           0 :         ioat->hw_ring = spdk_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
     418             :                                      NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
     419           0 :         if (!ioat->hw_ring) {
     420           0 :                 return -1;
     421             :         }
     422             : 
     423           0 :         for (i = 0; i < num_descriptors; i++) {
     424           0 :                 phys_addr = spdk_vtophys(&ioat->hw_ring[i], NULL);
     425           0 :                 if (phys_addr == SPDK_VTOPHYS_ERROR) {
     426           0 :                         SPDK_ERRLOG("Failed to translate descriptor %u to physical address\n", i);
     427           0 :                         return -1;
     428             :                 }
     429             : 
     430           0 :                 ioat->ring[i].phys_addr = phys_addr;
     431           0 :                 ioat->hw_ring[ioat_get_ring_index(ioat, i - 1)].generic.next = phys_addr;
     432           0 :         }
     433             : 
     434           0 :         ioat->head = 0;
     435           0 :         ioat->tail = 0;
     436           0 :         ioat->last_seen = 0;
     437             : 
     438           0 :         ioat_reset_hw(ioat);
     439             : 
     440           0 :         ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
     441           0 :         ioat_write_chancmp(ioat, comp_update_bus_addr);
     442           0 :         ioat_write_chainaddr(ioat, ioat->ring[0].phys_addr);
     443             : 
     444           0 :         ioat_prep_null(ioat);
     445           0 :         spdk_ioat_flush(ioat);
     446             : 
     447           0 :         i = 100;
     448           0 :         while (i-- > 0) {
     449           0 :                 spdk_delay_us(100);
     450           0 :                 status = ioat_get_chansts(ioat);
     451           0 :                 if (is_ioat_idle(status)) {
     452           0 :                         break;
     453             :                 }
     454             :         }
     455             : 
     456           0 :         if (is_ioat_idle(status)) {
     457           0 :                 ioat_process_channel_events(ioat);
     458           0 :         } else {
     459           0 :                 SPDK_ERRLOG("could not start channel: status = %p\n error = %#x\n",
     460             :                             (void *)status, ioat->regs->chanerr);
     461           0 :                 return -1;
     462             :         }
     463             : 
     464           0 :         return 0;
     465           0 : }
     466             : 
     467             : /* Caller must hold g_ioat_driver.lock */
     468             : static struct spdk_ioat_chan *
     469           0 : ioat_attach(struct spdk_pci_device *device)
     470             : {
     471           0 :         struct spdk_ioat_chan *ioat;
     472           0 :         uint32_t cmd_reg;
     473             : 
     474           0 :         ioat = calloc(1, sizeof(struct spdk_ioat_chan));
     475           0 :         if (ioat == NULL) {
     476           0 :                 return NULL;
     477             :         }
     478             : 
     479             :         /* Enable PCI busmaster. */
     480           0 :         spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
     481           0 :         cmd_reg |= 0x4;
     482           0 :         spdk_pci_device_cfg_write32(device, cmd_reg, 4);
     483             : 
     484           0 :         ioat->device = device;
     485             : 
     486           0 :         if (ioat_channel_start(ioat) != 0) {
     487           0 :                 ioat_channel_destruct(ioat);
     488           0 :                 free(ioat);
     489           0 :                 return NULL;
     490             :         }
     491             : 
     492           0 :         return ioat;
     493           0 : }
     494             : 
     495             : struct ioat_enum_ctx {
     496             :         spdk_ioat_probe_cb probe_cb;
     497             :         spdk_ioat_attach_cb attach_cb;
     498             :         void *cb_ctx;
     499             : };
     500             : 
     501             : /* This function must only be called while holding g_ioat_driver.lock */
     502             : static int
     503           0 : ioat_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
     504             : {
     505           0 :         struct ioat_enum_ctx *enum_ctx = ctx;
     506           0 :         struct spdk_ioat_chan *ioat;
     507             : 
     508             :         /* Verify that this device is not already attached */
     509           0 :         TAILQ_FOREACH(ioat, &g_ioat_driver.attached_chans, tailq) {
     510             :                 /*
     511             :                  * NOTE: This assumes that the PCI abstraction layer will use the same device handle
     512             :                  *  across enumerations; we could compare by BDF instead if this is not true.
     513             :                  */
     514           0 :                 if (pci_dev == ioat->device) {
     515           0 :                         return 0;
     516             :                 }
     517           0 :         }
     518             : 
     519           0 :         if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
     520             :                 /*
     521             :                  * Since I/OAT init is relatively quick, just perform the full init during probing.
     522             :                  *  If this turns out to be a bottleneck later, this can be changed to work like
     523             :                  *  NVMe with a list of devices to initialize in parallel.
     524             :                  */
     525           0 :                 ioat = ioat_attach(pci_dev);
     526           0 :                 if (ioat == NULL) {
     527           0 :                         SPDK_ERRLOG("ioat_attach() failed\n");
     528           0 :                         return -1;
     529             :                 }
     530             : 
     531           0 :                 TAILQ_INSERT_TAIL(&g_ioat_driver.attached_chans, ioat, tailq);
     532             : 
     533           0 :                 enum_ctx->attach_cb(enum_ctx->cb_ctx, pci_dev, ioat);
     534           0 :         }
     535             : 
     536           0 :         return 0;
     537           0 : }
     538             : 
     539             : int
     540           0 : spdk_ioat_probe(void *cb_ctx, spdk_ioat_probe_cb probe_cb, spdk_ioat_attach_cb attach_cb)
     541             : {
     542           0 :         int rc;
     543           0 :         struct ioat_enum_ctx enum_ctx;
     544             : 
     545           0 :         pthread_mutex_lock(&g_ioat_driver.lock);
     546             : 
     547           0 :         enum_ctx.probe_cb = probe_cb;
     548           0 :         enum_ctx.attach_cb = attach_cb;
     549           0 :         enum_ctx.cb_ctx = cb_ctx;
     550             : 
     551           0 :         rc = spdk_pci_enumerate(spdk_pci_ioat_get_driver(), ioat_enum_cb, &enum_ctx);
     552             : 
     553           0 :         pthread_mutex_unlock(&g_ioat_driver.lock);
     554             : 
     555           0 :         return rc;
     556           0 : }
     557             : 
     558             : void
     559           0 : spdk_ioat_detach(struct spdk_ioat_chan *ioat)
     560             : {
     561           0 :         struct ioat_driver      *driver = &g_ioat_driver;
     562             : 
     563             :         /* ioat should be in the free list (not registered to a thread)
     564             :          * when calling ioat_detach().
     565             :          */
     566           0 :         pthread_mutex_lock(&driver->lock);
     567           0 :         TAILQ_REMOVE(&driver->attached_chans, ioat, tailq);
     568           0 :         pthread_mutex_unlock(&driver->lock);
     569             : 
     570           0 :         ioat_channel_destruct(ioat);
     571           0 :         free(ioat);
     572           0 : }
     573             : 
     574             : int
     575           0 : spdk_ioat_build_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
     576             :                      void *dst, const void *src, uint64_t nbytes)
     577             : {
     578           0 :         struct ioat_descriptor  *last_desc = NULL;
     579           0 :         uint64_t        remaining, op_size;
     580           0 :         uint64_t        vdst, vsrc;
     581           0 :         uint64_t        pdst_addr, psrc_addr, dst_len, src_len;
     582           0 :         uint32_t        orig_head;
     583             : 
     584           0 :         if (!ioat) {
     585           0 :                 return -EINVAL;
     586             :         }
     587             : 
     588           0 :         orig_head = ioat->head;
     589             : 
     590           0 :         vdst = (uint64_t)dst;
     591           0 :         vsrc = (uint64_t)src;
     592             : 
     593           0 :         remaining = nbytes;
     594           0 :         while (remaining) {
     595           0 :                 src_len = dst_len = remaining;
     596             : 
     597           0 :                 psrc_addr = spdk_vtophys((void *)vsrc, &src_len);
     598           0 :                 if (psrc_addr == SPDK_VTOPHYS_ERROR) {
     599           0 :                         return -EINVAL;
     600             :                 }
     601           0 :                 pdst_addr = spdk_vtophys((void *)vdst, &dst_len);
     602           0 :                 if (pdst_addr == SPDK_VTOPHYS_ERROR) {
     603           0 :                         return -EINVAL;
     604             :                 }
     605             : 
     606           0 :                 op_size = spdk_min(dst_len, src_len);
     607           0 :                 op_size = spdk_min(op_size, ioat->max_xfer_size);
     608           0 :                 remaining -= op_size;
     609             : 
     610           0 :                 last_desc = ioat_prep_copy(ioat, pdst_addr, psrc_addr, op_size);
     611             : 
     612           0 :                 if (remaining == 0 || last_desc == NULL) {
     613           0 :                         break;
     614             :                 }
     615             : 
     616           0 :                 vsrc += op_size;
     617           0 :                 vdst += op_size;
     618             : 
     619             :         }
     620             :         /* Issue null descriptor for null transfer */
     621           0 :         if (nbytes == 0) {
     622           0 :                 last_desc = ioat_prep_null(ioat);
     623           0 :         }
     624             : 
     625           0 :         if (last_desc) {
     626           0 :                 last_desc->callback_fn = cb_fn;
     627           0 :                 last_desc->callback_arg = cb_arg;
     628           0 :         } else {
     629             :                 /*
     630             :                  * Ran out of descriptors in the ring - reset head to leave things as they were
     631             :                  * in case we managed to fill out any descriptors.
     632             :                  */
     633           0 :                 ioat->head = orig_head;
     634           0 :                 return -ENOMEM;
     635             :         }
     636             : 
     637           0 :         return 0;
     638           0 : }
     639             : 
     640             : int
     641           0 : spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
     642             :                       void *dst, const void *src, uint64_t nbytes)
     643             : {
     644           0 :         int rc;
     645             : 
     646           0 :         rc = spdk_ioat_build_copy(ioat, cb_arg, cb_fn, dst, src, nbytes);
     647           0 :         if (rc != 0) {
     648           0 :                 return rc;
     649             :         }
     650             : 
     651           0 :         spdk_ioat_flush(ioat);
     652           0 :         return 0;
     653           0 : }
     654             : 
     655             : int
     656           0 : spdk_ioat_build_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
     657             :                      void *dst, uint64_t fill_pattern, uint64_t nbytes)
     658             : {
     659           0 :         struct ioat_descriptor  *last_desc = NULL;
     660           0 :         uint64_t        remaining, op_size;
     661           0 :         uint64_t        vdst;
     662           0 :         uint64_t        pdst_addr, dst_len;
     663           0 :         uint32_t        orig_head;
     664             : 
     665           0 :         if (!ioat) {
     666           0 :                 return -EINVAL;
     667             :         }
     668             : 
     669           0 :         if (!(ioat->dma_capabilities & SPDK_IOAT_ENGINE_FILL_SUPPORTED)) {
     670           0 :                 SPDK_ERRLOG("Channel does not support memory fill\n");
     671           0 :                 return -1;
     672             :         }
     673             : 
     674           0 :         orig_head = ioat->head;
     675             : 
     676           0 :         vdst = (uint64_t)dst;
     677           0 :         remaining = nbytes;
     678             : 
     679           0 :         while (remaining) {
     680           0 :                 dst_len = remaining;
     681           0 :                 pdst_addr = spdk_vtophys((void *)vdst, &dst_len);
     682           0 :                 if (pdst_addr == SPDK_VTOPHYS_ERROR) {
     683           0 :                         return -EINVAL;
     684             :                 }
     685             : 
     686           0 :                 op_size = spdk_min(dst_len, ioat->max_xfer_size);
     687           0 :                 remaining -= op_size;
     688             : 
     689           0 :                 last_desc = ioat_prep_fill(ioat, pdst_addr, fill_pattern, op_size);
     690             : 
     691           0 :                 if (remaining == 0 || last_desc == NULL) {
     692           0 :                         break;
     693             :                 }
     694             : 
     695           0 :                 vdst += op_size;
     696             :         }
     697             : 
     698           0 :         if (last_desc) {
     699           0 :                 last_desc->callback_fn = cb_fn;
     700           0 :                 last_desc->callback_arg = cb_arg;
     701           0 :         } else {
     702             :                 /*
     703             :                  * Ran out of descriptors in the ring - reset head to leave things as they were
     704             :                  * in case we managed to fill out any descriptors.
     705             :                  */
     706           0 :                 ioat->head = orig_head;
     707           0 :                 return -ENOMEM;
     708             :         }
     709             : 
     710           0 :         return 0;
     711           0 : }
     712             : 
     713             : int
     714           0 : spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_cb cb_fn,
     715             :                       void *dst, uint64_t fill_pattern, uint64_t nbytes)
     716             : {
     717           0 :         int rc;
     718             : 
     719           0 :         rc = spdk_ioat_build_fill(ioat, cb_arg, cb_fn, dst, fill_pattern, nbytes);
     720           0 :         if (rc != 0) {
     721           0 :                 return rc;
     722             :         }
     723             : 
     724           0 :         spdk_ioat_flush(ioat);
     725           0 :         return 0;
     726           0 : }
     727             : 
     728             : uint32_t
     729           0 : spdk_ioat_get_dma_capabilities(struct spdk_ioat_chan *ioat)
     730             : {
     731           0 :         if (!ioat) {
     732           0 :                 return 0;
     733             :         }
     734           0 :         return ioat->dma_capabilities;
     735           0 : }
     736             : 
     737             : int
     738           0 : spdk_ioat_process_events(struct spdk_ioat_chan *ioat)
     739             : {
     740           0 :         return ioat_process_channel_events(ioat);
     741             : }
     742             : 
     743           1 : SPDK_LOG_REGISTER_COMPONENT(ioat)

Generated by: LCOV version 1.15