LCOV - code coverage report
Current view: top level - lib/ftl - ftl_core.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 10 416 2.4 %
Date: 2024-07-11 16:21:08 Functions: 2 39 5.1 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2018 Intel Corporation.
       3             :  *   All rights reserved.
       4             :  */
       5             : 
       6             : #include "spdk/likely.h"
       7             : #include "spdk/stdinc.h"
       8             : #include "spdk/nvme.h"
       9             : #include "spdk/thread.h"
      10             : #include "spdk/bdev_module.h"
      11             : #include "spdk/string.h"
      12             : #include "spdk/ftl.h"
      13             : #include "spdk/crc32.h"
      14             : 
      15             : #include "ftl_core.h"
      16             : #include "ftl_band.h"
      17             : #include "ftl_io.h"
      18             : #include "ftl_debug.h"
      19             : #include "ftl_internal.h"
      20             : #include "mngt/ftl_mngt.h"
      21             : 
      22             : 
      23             : size_t
      24           0 : spdk_ftl_io_size(void)
      25             : {
      26           0 :         return sizeof(struct ftl_io);
      27             : }
      28             : 
      29             : static void
      30           0 : ftl_io_cmpl_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
      31             : {
      32           0 :         struct ftl_io *io = cb_arg;
      33           0 :         struct spdk_ftl_dev *dev = io->dev;
      34             : 
      35           0 :         ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
      36             : 
      37           0 :         if (spdk_unlikely(!success)) {
      38           0 :                 io->status = -EIO;
      39             :         }
      40             : 
      41           0 :         ftl_trace_completion(dev, io, FTL_TRACE_COMPLETION_DISK);
      42             : 
      43           0 :         ftl_io_dec_req(io);
      44           0 :         if (ftl_io_done(io)) {
      45           0 :                 ftl_io_complete(io);
      46             :         }
      47             : 
      48           0 :         spdk_bdev_free_io(bdev_io);
      49           0 : }
      50             : 
      51             : static void
      52           0 : ftl_band_erase(struct ftl_band *band)
      53             : {
      54           0 :         assert(band->md->state == FTL_BAND_STATE_CLOSED ||
      55             :                band->md->state == FTL_BAND_STATE_FREE);
      56             : 
      57           0 :         ftl_band_set_state(band, FTL_BAND_STATE_PREP);
      58           0 : }
      59             : 
      60             : static size_t
      61           0 : ftl_get_limit(const struct spdk_ftl_dev *dev, int type)
      62             : {
      63           0 :         assert(type < SPDK_FTL_LIMIT_MAX);
      64           0 :         return dev->conf.limits[type];
      65             : }
      66             : 
      67             : static bool
      68           0 : ftl_shutdown_complete(struct spdk_ftl_dev *dev)
      69             : {
      70             :         uint64_t i;
      71             : 
      72           0 :         if (dev->num_inflight) {
      73           0 :                 return false;
      74             :         }
      75             : 
      76           0 :         if (!ftl_nv_cache_is_halted(&dev->nv_cache)) {
      77           0 :                 ftl_nv_cache_halt(&dev->nv_cache);
      78           0 :                 return false;
      79             :         }
      80             : 
      81           0 :         if (!ftl_writer_is_halted(&dev->writer_user)) {
      82           0 :                 ftl_writer_halt(&dev->writer_user);
      83           0 :                 return false;
      84             :         }
      85             : 
      86           0 :         if (!ftl_reloc_is_halted(dev->reloc)) {
      87           0 :                 ftl_reloc_halt(dev->reloc);
      88           0 :                 return false;
      89             :         }
      90             : 
      91           0 :         if (!ftl_writer_is_halted(&dev->writer_gc)) {
      92           0 :                 ftl_writer_halt(&dev->writer_gc);
      93           0 :                 return false;
      94             :         }
      95             : 
      96           0 :         if (!ftl_nv_cache_chunks_busy(&dev->nv_cache)) {
      97           0 :                 return false;
      98             :         }
      99             : 
     100           0 :         for (i = 0; i < ftl_get_num_bands(dev); ++i) {
     101           0 :                 if (dev->bands[i].queue_depth ||
     102           0 :                     dev->bands[i].md->state == FTL_BAND_STATE_CLOSING) {
     103           0 :                         return false;
     104             :                 }
     105             :         }
     106             : 
     107           0 :         if (!ftl_l2p_is_halted(dev)) {
     108           0 :                 ftl_l2p_halt(dev);
     109           0 :                 return false;
     110             :         }
     111             : 
     112           0 :         return true;
     113             : }
     114             : 
     115             : void
     116           0 : ftl_apply_limits(struct spdk_ftl_dev *dev)
     117             : {
     118             :         size_t limit;
     119           0 :         struct ftl_stats *stats = &dev->stats;
     120             :         int i;
     121             : 
     122             :         /*  Clear existing limit */
     123           0 :         dev->limit = SPDK_FTL_LIMIT_MAX;
     124             : 
     125           0 :         for (i = SPDK_FTL_LIMIT_CRIT; i < SPDK_FTL_LIMIT_MAX; ++i) {
     126           0 :                 limit = ftl_get_limit(dev, i);
     127             : 
     128           0 :                 if (dev->num_free <= limit) {
     129           0 :                         stats->limits[i]++;
     130           0 :                         dev->limit = i;
     131           0 :                         break;
     132             :                 }
     133             :         }
     134             : 
     135           0 :         ftl_trace_limits(dev, dev->limit, dev->num_free);
     136           0 : }
     137             : 
     138             : void
     139           2 : ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
     140             : {
     141             :         struct ftl_band *band;
     142             :         struct ftl_p2l_map *p2l_map;
     143             : 
     144           2 :         if (ftl_addr_in_nvc(dev, addr)) {
     145           0 :                 ftl_bitmap_clear(dev->valid_map, addr);
     146           0 :                 return;
     147             :         }
     148             : 
     149           2 :         band = ftl_band_from_addr(dev, addr);
     150           2 :         p2l_map = &band->p2l_map;
     151             : 
     152             :         /* The bit might be already cleared if two writes are scheduled to the */
     153             :         /* same LBA at the same time */
     154           2 :         if (ftl_bitmap_get(dev->valid_map, addr)) {
     155           2 :                 assert(p2l_map->num_valid > 0);
     156           2 :                 ftl_bitmap_clear(dev->valid_map, addr);
     157           2 :                 p2l_map->num_valid--;
     158             :         }
     159             : 
     160             :         /* Invalidate open/full band p2l_map entry to keep p2l and l2p
     161             :          * consistency when band is going to close state */
     162           2 :         if (FTL_BAND_STATE_OPEN == band->md->state || FTL_BAND_STATE_FULL == band->md->state) {
     163           0 :                 p2l_map->band_map[ftl_band_block_offset_from_addr(band, addr)].lba = FTL_LBA_INVALID;
     164           0 :                 p2l_map->band_map[ftl_band_block_offset_from_addr(band, addr)].seq_id = 0;
     165             :         }
     166             : }
     167             : 
     168             : static int
     169           0 : ftl_read_canceled(int rc)
     170             : {
     171           0 :         return rc == -EFAULT;
     172             : }
     173             : 
     174             : static int
     175           0 : ftl_get_next_read_addr(struct ftl_io *io, ftl_addr *addr)
     176             : {
     177           0 :         struct spdk_ftl_dev *dev = io->dev;
     178             :         ftl_addr next_addr;
     179             :         size_t i;
     180           0 :         bool addr_cached = false;
     181             : 
     182           0 :         *addr = ftl_l2p_get(dev, ftl_io_current_lba(io));
     183           0 :         io->map[io->pos] = *addr;
     184             : 
     185             :         /* If the address is invalid, skip it */
     186           0 :         if (*addr == FTL_ADDR_INVALID) {
     187           0 :                 return -EFAULT;
     188             :         }
     189             : 
     190           0 :         addr_cached = ftl_addr_in_nvc(dev, *addr);
     191             : 
     192           0 :         for (i = 1; i < ftl_io_iovec_len_left(io); ++i) {
     193           0 :                 next_addr = ftl_l2p_get(dev, ftl_io_get_lba(io, io->pos + i));
     194             : 
     195           0 :                 if (next_addr == FTL_ADDR_INVALID) {
     196           0 :                         break;
     197             :                 }
     198             : 
     199             :                 /* It's not enough to check for contiguity, if user data is on the last block
     200             :                  * of base device and first nvc, then they're 'contiguous', but can't be handled
     201             :                  * with one read request.
     202             :                  */
     203           0 :                 if (addr_cached != ftl_addr_in_nvc(dev, next_addr)) {
     204           0 :                         break;
     205             :                 }
     206             : 
     207           0 :                 if (*addr + i != next_addr) {
     208           0 :                         break;
     209             :                 }
     210             : 
     211           0 :                 io->map[io->pos + i] = next_addr;
     212             :         }
     213             : 
     214           0 :         return i;
     215             : }
     216             : 
     217             : static void ftl_submit_read(struct ftl_io *io);
     218             : 
     219             : static void
     220           0 : _ftl_submit_read(void *_io)
     221             : {
     222           0 :         struct ftl_io *io = _io;
     223             : 
     224           0 :         ftl_submit_read(io);
     225           0 : }
     226             : 
     227             : static void
     228           0 : ftl_submit_read(struct ftl_io *io)
     229             : {
     230           0 :         struct spdk_ftl_dev *dev = io->dev;
     231           0 :         ftl_addr addr;
     232           0 :         int rc = 0, num_blocks;
     233             : 
     234           0 :         while (io->pos < io->num_blocks) {
     235           0 :                 num_blocks = ftl_get_next_read_addr(io, &addr);
     236           0 :                 rc = num_blocks;
     237             : 
     238             :                 /* User LBA doesn't hold valid data (trimmed or never written to), fill with 0 and skip this block */
     239           0 :                 if (ftl_read_canceled(rc)) {
     240           0 :                         memset(ftl_io_iovec_addr(io), 0, FTL_BLOCK_SIZE);
     241           0 :                         ftl_io_advance(io, 1);
     242           0 :                         continue;
     243             :                 }
     244             : 
     245           0 :                 assert(num_blocks > 0);
     246             : 
     247           0 :                 ftl_trace_submission(dev, io, addr, num_blocks);
     248             : 
     249           0 :                 if (ftl_addr_in_nvc(dev, addr)) {
     250           0 :                         rc = ftl_nv_cache_read(io, addr, num_blocks, ftl_io_cmpl_cb, io);
     251             :                 } else {
     252           0 :                         rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch,
     253             :                                                    ftl_io_iovec_addr(io),
     254             :                                                    addr, num_blocks, ftl_io_cmpl_cb, io);
     255             :                 }
     256             : 
     257           0 :                 if (spdk_unlikely(rc)) {
     258           0 :                         if (rc == -ENOMEM) {
     259             :                                 struct spdk_bdev *bdev;
     260             :                                 struct spdk_io_channel *ch;
     261             : 
     262           0 :                                 if (ftl_addr_in_nvc(dev, addr)) {
     263           0 :                                         bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
     264           0 :                                         ch = dev->nv_cache.cache_ioch;
     265             :                                 } else {
     266           0 :                                         bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
     267           0 :                                         ch = dev->base_ioch;
     268             :                                 }
     269           0 :                                 io->bdev_io_wait.bdev = bdev;
     270           0 :                                 io->bdev_io_wait.cb_fn = _ftl_submit_read;
     271           0 :                                 io->bdev_io_wait.cb_arg = io;
     272           0 :                                 spdk_bdev_queue_io_wait(bdev, ch, &io->bdev_io_wait);
     273           0 :                                 return;
     274             :                         } else {
     275           0 :                                 ftl_abort();
     276             :                         }
     277             :                 }
     278             : 
     279           0 :                 ftl_io_inc_req(io);
     280           0 :                 ftl_io_advance(io, num_blocks);
     281             :         }
     282             : 
     283             :         /* If we didn't have to read anything from the device, */
     284             :         /* complete the request right away */
     285           0 :         if (ftl_io_done(io)) {
     286           0 :                 ftl_io_complete(io);
     287             :         }
     288             : }
     289             : 
     290             : bool
     291           0 : ftl_needs_reloc(struct spdk_ftl_dev *dev)
     292             : {
     293           0 :         size_t limit = ftl_get_limit(dev, SPDK_FTL_LIMIT_START);
     294             : 
     295           0 :         if (dev->num_free <= limit) {
     296           0 :                 return true;
     297             :         }
     298             : 
     299           0 :         return false;
     300             : }
     301             : 
     302             : void
     303           0 : spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *attrs,
     304             :                        size_t attrs_size)
     305             : {
     306           0 :         attrs->num_blocks = dev->num_lbas;
     307           0 :         attrs->block_size = FTL_BLOCK_SIZE;
     308           0 :         attrs->optimum_io_size = dev->xfer_size;
     309             :         /* NOTE: check any new fields in attrs against attrs_size */
     310           0 : }
     311             : 
     312             : static void
     313           0 : ftl_io_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
     314             : {
     315           0 :         struct ftl_io *io = pin_ctx->cb_ctx;
     316             : 
     317           0 :         if (spdk_unlikely(status != 0)) {
     318             :                 /* Retry on the internal L2P fault */
     319           0 :                 io->status = -EAGAIN;
     320           0 :                 ftl_io_complete(io);
     321           0 :                 return;
     322             :         }
     323             : 
     324           0 :         io->flags |= FTL_IO_PINNED;
     325           0 :         ftl_submit_read(io);
     326             : }
     327             : 
     328             : static void
     329           0 : ftl_io_pin(struct ftl_io *io)
     330             : {
     331           0 :         if (spdk_unlikely(io->flags & FTL_IO_PINNED)) {
     332             :                 /*
     333             :                  * The IO is in a retry path and it had been pinned already.
     334             :                  * Continue with further processing.
     335             :                  */
     336           0 :                 ftl_l2p_pin_skip(io->dev, ftl_io_pin_cb, io, &io->l2p_pin_ctx);
     337             :         } else {
     338             :                 /* First time when pinning the IO */
     339           0 :                 ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
     340             :                             ftl_io_pin_cb, io, &io->l2p_pin_ctx);
     341             :         }
     342           0 : }
     343             : 
     344             : static void
     345           0 : start_io(struct ftl_io *io)
     346             : {
     347           0 :         struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(io->ioch);
     348           0 :         struct spdk_ftl_dev *dev = io->dev;
     349             : 
     350           0 :         io->map = ftl_mempool_get(ioch->map_pool);
     351           0 :         if (spdk_unlikely(!io->map)) {
     352           0 :                 io->status = -ENOMEM;
     353           0 :                 ftl_io_complete(io);
     354           0 :                 return;
     355             :         }
     356             : 
     357           0 :         switch (io->type) {
     358           0 :         case FTL_IO_READ:
     359           0 :                 TAILQ_INSERT_TAIL(&dev->rd_sq, io, queue_entry);
     360           0 :                 break;
     361           0 :         case FTL_IO_WRITE:
     362           0 :                 TAILQ_INSERT_TAIL(&dev->wr_sq, io, queue_entry);
     363           0 :                 break;
     364           0 :         case FTL_IO_UNMAP:
     365           0 :                 TAILQ_INSERT_TAIL(&dev->unmap_sq, io, queue_entry);
     366           0 :                 break;
     367           0 :         default:
     368           0 :                 io->status = -EOPNOTSUPP;
     369           0 :                 ftl_io_complete(io);
     370             :         }
     371             : }
     372             : 
     373             : static int
     374           0 : queue_io(struct spdk_ftl_dev *dev, struct ftl_io *io)
     375             : {
     376             :         size_t result;
     377           0 :         struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(io->ioch);
     378             : 
     379           0 :         result = spdk_ring_enqueue(ioch->sq, (void **)&io, 1, NULL);
     380           0 :         if (spdk_unlikely(0 == result)) {
     381           0 :                 return -EAGAIN;
     382             :         }
     383             : 
     384           0 :         return 0;
     385             : }
     386             : 
     387             : int
     388           0 : spdk_ftl_writev(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
     389             :                 uint64_t lba, uint64_t lba_cnt, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
     390             :                 void *cb_arg)
     391             : {
     392             :         int rc;
     393             : 
     394           0 :         if (iov_cnt == 0) {
     395           0 :                 return -EINVAL;
     396             :         }
     397             : 
     398           0 :         if (lba_cnt == 0) {
     399           0 :                 return -EINVAL;
     400             :         }
     401             : 
     402           0 :         if (lba_cnt != ftl_iovec_num_blocks(iov, iov_cnt)) {
     403           0 :                 FTL_ERRLOG(dev, "Invalid IO vector to handle, device %s, LBA %"PRIu64"\n",
     404             :                            dev->conf.name, lba);
     405           0 :                 return -EINVAL;
     406             :         }
     407             : 
     408           0 :         if (!dev->initialized) {
     409           0 :                 return -EBUSY;
     410             :         }
     411             : 
     412           0 :         rc = ftl_io_init(ch, io, lba, lba_cnt, iov, iov_cnt, cb_fn, cb_arg, FTL_IO_WRITE);
     413           0 :         if (rc) {
     414           0 :                 return rc;
     415             :         }
     416             : 
     417           0 :         return queue_io(dev, io);
     418             : }
     419             : 
     420             : int
     421           0 : spdk_ftl_readv(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
     422             :                uint64_t lba, uint64_t lba_cnt, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
     423             : {
     424             :         int rc;
     425             : 
     426           0 :         if (iov_cnt == 0) {
     427           0 :                 return -EINVAL;
     428             :         }
     429             : 
     430           0 :         if (lba_cnt == 0) {
     431           0 :                 return -EINVAL;
     432             :         }
     433             : 
     434           0 :         if (lba_cnt != ftl_iovec_num_blocks(iov, iov_cnt)) {
     435           0 :                 FTL_ERRLOG(dev, "Invalid IO vector to handle, device %s, LBA %"PRIu64"\n",
     436             :                            dev->conf.name, lba);
     437           0 :                 return -EINVAL;
     438             :         }
     439             : 
     440           0 :         if (!dev->initialized) {
     441           0 :                 return -EBUSY;
     442             :         }
     443             : 
     444           0 :         rc = ftl_io_init(ch, io, lba, lba_cnt, iov, iov_cnt, cb_fn, cb_arg, FTL_IO_READ);
     445           0 :         if (rc) {
     446           0 :                 return rc;
     447             :         }
     448             : 
     449           0 :         return queue_io(dev, io);
     450             : }
     451             : 
     452             : int
     453           0 : ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
     454             :           uint64_t lba, uint64_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
     455             : {
     456             :         int rc;
     457             : 
     458           0 :         rc = ftl_io_init(ch, io, lba, lba_cnt, NULL, 0, cb_fn, cb_arg, FTL_IO_UNMAP);
     459           0 :         if (rc) {
     460           0 :                 return rc;
     461             :         }
     462             : 
     463           0 :         return queue_io(dev, io);
     464             : }
     465             : 
     466             : int
     467           0 : spdk_ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
     468             :                uint64_t lba, uint64_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
     469             : {
     470             :         int rc;
     471           0 :         uint64_t alignment = dev->layout.l2p.lbas_in_page;
     472             : 
     473           0 :         if (lba_cnt == 0) {
     474           0 :                 return -EINVAL;
     475             :         }
     476             : 
     477           0 :         if (lba + lba_cnt < lba_cnt) {
     478           0 :                 return -EINVAL;
     479             :         }
     480             : 
     481           0 :         if (lba + lba_cnt > dev->num_lbas) {
     482           0 :                 return -EINVAL;
     483             :         }
     484             : 
     485           0 :         if (!dev->initialized) {
     486           0 :                 return -EBUSY;
     487             :         }
     488             : 
     489           0 :         if (lba % alignment || lba_cnt % alignment) {
     490           0 :                 if (!io) {
     491             :                         /* This is management/RPC path, its parameters must be aligned to 1MiB. */
     492           0 :                         return -EINVAL;
     493             :                 }
     494             : 
     495             :                 /* Otherwise unaligned IO requests are NOPs */
     496           0 :                 rc = ftl_io_init(ch, io, lba, lba_cnt, NULL, 0, cb_fn, cb_arg, FTL_IO_UNMAP);
     497           0 :                 if (rc) {
     498           0 :                         return rc;
     499             :                 }
     500             : 
     501           0 :                 io->status = 0;
     502           0 :                 ftl_io_complete(io);
     503           0 :                 return 0;
     504             :         }
     505             : 
     506           0 :         if (io) {
     507           0 :                 rc = ftl_unmap(dev, io, ch, lba, lba_cnt, cb_fn, cb_arg);
     508             :         } else {
     509           0 :                 rc = ftl_mngt_unmap(dev, lba, lba_cnt, cb_fn, cb_arg);
     510             :         }
     511             : 
     512           0 :         return rc;
     513             : }
     514             : 
     515             : #define FTL_IO_QUEUE_BATCH 16
     516             : int
     517           0 : ftl_io_channel_poll(void *arg)
     518             : {
     519           0 :         struct ftl_io_channel *ch = arg;
     520           0 :         void *ios[FTL_IO_QUEUE_BATCH];
     521             :         uint64_t i, count;
     522             : 
     523           0 :         count = spdk_ring_dequeue(ch->cq, ios, FTL_IO_QUEUE_BATCH);
     524           0 :         if (count == 0) {
     525           0 :                 return SPDK_POLLER_IDLE;
     526             :         }
     527             : 
     528           0 :         for (i = 0; i < count; i++) {
     529           0 :                 struct ftl_io *io = ios[i];
     530           0 :                 io->user_fn(io->cb_ctx, io->status);
     531             :         }
     532             : 
     533           0 :         return SPDK_POLLER_BUSY;
     534             : }
     535             : 
     536             : static void
     537           0 : ftl_process_io_channel(struct spdk_ftl_dev *dev, struct ftl_io_channel *ioch)
     538             : {
     539           0 :         void *ios[FTL_IO_QUEUE_BATCH];
     540             :         size_t count, i;
     541             : 
     542           0 :         count = spdk_ring_dequeue(ioch->sq, ios, FTL_IO_QUEUE_BATCH);
     543           0 :         if (count == 0) {
     544           0 :                 return;
     545             :         }
     546             : 
     547           0 :         for (i = 0; i < count; i++) {
     548           0 :                 struct ftl_io *io = ios[i];
     549           0 :                 start_io(io);
     550             :         }
     551             : }
     552             : 
     553             : static void
     554           0 : ftl_process_unmap_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
     555             : {
     556           0 :         struct ftl_io *io = md->owner.cb_ctx;
     557             : 
     558           0 :         io->dev->unmap_qd--;
     559             : 
     560           0 :         if (spdk_unlikely(status)) {
     561             : #ifdef SPDK_FTL_RETRY_ON_ERROR
     562             :                 TAILQ_INSERT_HEAD(&io->dev->unmap_sq, io, queue_entry);
     563             :                 return;
     564             : #else
     565           0 :                 io->status = status;
     566             : #endif
     567             :         }
     568             : 
     569           0 :         ftl_io_complete(io);
     570           0 : }
     571             : 
     572             : void
     573           0 : ftl_set_unmap_map(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks, uint64_t seq_id)
     574             : {
     575             :         uint64_t first_page, num_pages;
     576             :         uint64_t first_md_block, num_md_blocks, num_pages_in_block;
     577           0 :         uint32_t lbas_in_page = dev->layout.l2p.lbas_in_page;
     578           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
     579           0 :         uint64_t *page = ftl_md_get_buffer(md);
     580             :         union ftl_md_vss *page_vss;
     581             :         size_t i;
     582             : 
     583           0 :         first_page = lba / lbas_in_page;
     584           0 :         num_pages = num_blocks / lbas_in_page;
     585             : 
     586           0 :         for (i = first_page; i < first_page + num_pages; ++i) {
     587           0 :                 ftl_bitmap_set(dev->unmap_map, i);
     588           0 :                 page[i] = seq_id;
     589             :         }
     590             : 
     591           0 :         num_pages_in_block = FTL_BLOCK_SIZE / sizeof(*page);
     592           0 :         first_md_block = first_page / num_pages_in_block;
     593           0 :         num_md_blocks = spdk_divide_round_up(num_pages, num_pages_in_block);
     594           0 :         page_vss = ftl_md_get_vss_buffer(md) + first_md_block;
     595           0 :         for (i = first_md_block; i < num_md_blocks; ++i, page_vss++) {
     596           0 :                 page_vss->unmap.start_lba = lba;
     597           0 :                 page_vss->unmap.num_blocks = num_blocks;
     598           0 :                 page_vss->unmap.seq_id = seq_id;
     599             :         }
     600           0 : }
     601             : 
     602             : static bool
     603           0 : ftl_process_unmap(struct ftl_io *io)
     604             : {
     605           0 :         struct spdk_ftl_dev *dev = io->dev;
     606           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
     607             :         uint64_t seq_id;
     608             : 
     609           0 :         seq_id = ftl_nv_cache_acquire_trim_seq_id(&dev->nv_cache);
     610           0 :         if (seq_id == 0) {
     611           0 :                 return false;
     612             :         }
     613             : 
     614           0 :         dev->unmap_in_progress = true;
     615           0 :         dev->unmap_qd++;
     616             : 
     617           0 :         dev->sb_shm->trim.start_lba = io->lba;
     618           0 :         dev->sb_shm->trim.num_blocks = io->num_blocks;
     619           0 :         dev->sb_shm->trim.seq_id = seq_id;
     620           0 :         dev->sb_shm->trim.in_progress = true;
     621           0 :         ftl_set_unmap_map(dev, io->lba, io->num_blocks, seq_id);
     622           0 :         ftl_debug_inject_unmap_error();
     623           0 :         dev->sb_shm->trim.in_progress = false;
     624             : 
     625           0 :         md->owner.cb_ctx = io;
     626           0 :         md->cb = ftl_process_unmap_cb;
     627             : 
     628           0 :         ftl_md_persist(md);
     629             : 
     630           0 :         return true;
     631             : }
     632             : 
     633             : static void
     634           0 : ftl_process_io_queue(struct spdk_ftl_dev *dev)
     635             : {
     636             :         struct ftl_io_channel *ioch;
     637             :         struct ftl_io *io;
     638             : 
     639             :         /* TODO: Try to figure out a mechanism to batch more requests at the same time,
     640             :          * with keeping enough resources (pinned pages), between reads, writes and gc/compaction
     641             :          */
     642           0 :         if (!TAILQ_EMPTY(&dev->rd_sq)) {
     643           0 :                 io = TAILQ_FIRST(&dev->rd_sq);
     644           0 :                 TAILQ_REMOVE(&dev->rd_sq, io, queue_entry);
     645           0 :                 assert(io->type == FTL_IO_READ);
     646           0 :                 ftl_io_pin(io);
     647           0 :                 ftl_add_io_activity(dev);
     648             :         }
     649             : 
     650           0 :         while (!TAILQ_EMPTY(&dev->wr_sq) && !ftl_nv_cache_throttle(dev)) {
     651           0 :                 io = TAILQ_FIRST(&dev->wr_sq);
     652           0 :                 TAILQ_REMOVE(&dev->wr_sq, io, queue_entry);
     653           0 :                 assert(io->type == FTL_IO_WRITE);
     654           0 :                 if (!ftl_nv_cache_write(io)) {
     655           0 :                         TAILQ_INSERT_HEAD(&dev->wr_sq, io, queue_entry);
     656           0 :                         break;
     657             :                 }
     658           0 :                 ftl_add_io_activity(dev);
     659             :         }
     660             : 
     661           0 :         if (!TAILQ_EMPTY(&dev->unmap_sq) && dev->unmap_qd == 0) {
     662           0 :                 io = TAILQ_FIRST(&dev->unmap_sq);
     663           0 :                 TAILQ_REMOVE(&dev->unmap_sq, io, queue_entry);
     664           0 :                 assert(io->type == FTL_IO_UNMAP);
     665             : 
     666             :                 /*
     667             :                  * Unmap operation requires generating a sequence id for itself, which it gets based on the open chunk
     668             :                  * in nv cache. If there are no open chunks (because we're in the middle of state transition or compaction
     669             :                  * lagged behind), then we need to wait for the nv cache to resolve the situation - it's fine to just put the
     670             :                  * unmap and try again later.
     671             :                  */
     672           0 :                 if (!ftl_process_unmap(io)) {
     673           0 :                         TAILQ_INSERT_HEAD(&dev->unmap_sq, io, queue_entry);
     674             :                 } else {
     675           0 :                         ftl_add_io_activity(dev);
     676             :                 }
     677             :         }
     678             : 
     679           0 :         TAILQ_FOREACH(ioch, &dev->ioch_queue, entry) {
     680           0 :                 ftl_process_io_channel(dev, ioch);
     681             :         }
     682           0 : }
     683             : 
     684             : int
     685           0 : ftl_core_poller(void *ctx)
     686             : {
     687           0 :         struct spdk_ftl_dev *dev = ctx;
     688           0 :         uint64_t io_activity_total_old = dev->stats.io_activity_total;
     689             : 
     690           0 :         if (dev->halt && ftl_shutdown_complete(dev)) {
     691           0 :                 spdk_poller_unregister(&dev->core_poller);
     692           0 :                 return SPDK_POLLER_IDLE;
     693             :         }
     694             : 
     695           0 :         ftl_process_io_queue(dev);
     696           0 :         ftl_writer_run(&dev->writer_user);
     697           0 :         ftl_writer_run(&dev->writer_gc);
     698           0 :         ftl_reloc(dev->reloc);
     699           0 :         ftl_nv_cache_process(dev);
     700           0 :         ftl_l2p_process(dev);
     701             : 
     702           0 :         if (io_activity_total_old != dev->stats.io_activity_total) {
     703           0 :                 return SPDK_POLLER_BUSY;
     704             :         }
     705             : 
     706           0 :         return SPDK_POLLER_IDLE;
     707             : }
     708             : 
     709             : struct ftl_band *
     710           0 : ftl_band_get_next_free(struct spdk_ftl_dev *dev)
     711             : {
     712           0 :         struct ftl_band *band = NULL;
     713             : 
     714           0 :         if (!TAILQ_EMPTY(&dev->free_bands)) {
     715           0 :                 band = TAILQ_FIRST(&dev->free_bands);
     716           0 :                 TAILQ_REMOVE(&dev->free_bands, band, queue_entry);
     717           0 :                 ftl_band_erase(band);
     718             :         }
     719             : 
     720           0 :         return band;
     721             : }
     722             : 
     723             : void *g_ftl_write_buf;
     724             : void *g_ftl_read_buf;
     725             : 
     726             : int
     727           0 : spdk_ftl_init(void)
     728             : {
     729           0 :         g_ftl_write_buf = spdk_zmalloc(FTL_ZERO_BUFFER_SIZE, FTL_ZERO_BUFFER_SIZE, NULL,
     730             :                                        SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
     731           0 :         if (!g_ftl_write_buf) {
     732           0 :                 return -ENOMEM;
     733             :         }
     734             : 
     735           0 :         g_ftl_read_buf = spdk_zmalloc(FTL_ZERO_BUFFER_SIZE, FTL_ZERO_BUFFER_SIZE, NULL,
     736             :                                       SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
     737           0 :         if (!g_ftl_read_buf) {
     738           0 :                 spdk_free(g_ftl_write_buf);
     739           0 :                 g_ftl_write_buf = NULL;
     740           0 :                 return -ENOMEM;
     741             :         }
     742           0 :         return 0;
     743             : }
     744             : 
     745             : void
     746           0 : spdk_ftl_fini(void)
     747             : {
     748           0 :         spdk_free(g_ftl_write_buf);
     749           0 :         spdk_free(g_ftl_read_buf);
     750           0 : }
     751             : 
     752             : void
     753           0 : spdk_ftl_dev_set_fast_shutdown(struct spdk_ftl_dev *dev, bool fast_shutdown)
     754             : {
     755           0 :         assert(dev);
     756           0 :         dev->conf.fast_shutdown = fast_shutdown;
     757           0 : }
     758             : 
     759             : void
     760           0 : ftl_stats_bdev_io_completed(struct spdk_ftl_dev *dev, enum ftl_stats_type type,
     761             :                             struct spdk_bdev_io *bdev_io)
     762             : {
     763           0 :         struct ftl_stats_entry *stats_entry = &dev->stats.entries[type];
     764             :         struct ftl_stats_group *stats_group;
     765           0 :         uint32_t cdw0;
     766           0 :         int sct;
     767           0 :         int sc;
     768             : 
     769           0 :         switch (bdev_io->type) {
     770           0 :         case SPDK_BDEV_IO_TYPE_READ:
     771           0 :                 stats_group = &stats_entry->read;
     772           0 :                 break;
     773           0 :         case SPDK_BDEV_IO_TYPE_WRITE:
     774             :         case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
     775           0 :                 stats_group = &stats_entry->write;
     776           0 :                 break;
     777           0 :         default:
     778           0 :                 return;
     779             :         }
     780             : 
     781           0 :         spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
     782             : 
     783           0 :         if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
     784           0 :                 stats_group->ios++;
     785           0 :                 stats_group->blocks += bdev_io->u.bdev.num_blocks;
     786           0 :         } else if (sct == SPDK_NVME_SCT_MEDIA_ERROR) {
     787           0 :                 stats_group->errors.media++;
     788             :         } else {
     789           0 :                 stats_group->errors.other++;
     790             :         }
     791             : }
     792             : 
     793             : struct spdk_io_channel *
     794           0 : spdk_ftl_get_io_channel(struct spdk_ftl_dev *dev)
     795             : {
     796           0 :         return spdk_get_io_channel(dev);
     797             : }
     798             : 
     799             : void
     800           0 : ftl_stats_crc_error(struct spdk_ftl_dev *dev, enum ftl_stats_type type)
     801             : {
     802             : 
     803           0 :         struct ftl_stats_entry *stats_entry = &dev->stats.entries[type];
     804           0 :         struct ftl_stats_group *stats_group = &stats_entry->read;
     805             : 
     806           0 :         stats_group->errors.crc++;
     807           0 : }
     808             : 
     809             : struct ftl_get_stats_ctx {
     810             :         struct spdk_ftl_dev *dev;
     811             :         struct ftl_stats *stats;
     812             :         struct spdk_thread *thread;
     813             :         spdk_ftl_stats_fn cb_fn;
     814             :         void *cb_arg;
     815             : };
     816             : 
     817             : static void
     818           0 : _ftl_get_stats_cb(void *_ctx)
     819             : {
     820           0 :         struct ftl_get_stats_ctx *stats_ctx = _ctx;
     821             : 
     822           0 :         stats_ctx->cb_fn(stats_ctx->stats, stats_ctx->cb_arg);
     823           0 :         free(stats_ctx);
     824           0 : }
     825             : 
     826             : static void
     827           0 : _ftl_get_stats(void *_ctx)
     828             : {
     829           0 :         struct ftl_get_stats_ctx *stats_ctx = _ctx;
     830             : 
     831           0 :         *stats_ctx->stats = stats_ctx->dev->stats;
     832             : 
     833           0 :         if (spdk_thread_send_msg(stats_ctx->thread, _ftl_get_stats_cb, stats_ctx)) {
     834           0 :                 ftl_abort();
     835             :         }
     836           0 : }
     837             : 
     838             : int
     839           0 : spdk_ftl_get_stats(struct spdk_ftl_dev *dev, struct ftl_stats *stats, spdk_ftl_stats_fn cb_fn,
     840             :                    void *cb_arg)
     841             : {
     842             :         struct ftl_get_stats_ctx *stats_ctx;
     843             :         int rc;
     844             : 
     845           0 :         stats_ctx = calloc(1, sizeof(struct ftl_get_stats_ctx));
     846           0 :         if (!stats_ctx) {
     847           0 :                 return -ENOMEM;
     848             :         }
     849             : 
     850           0 :         stats_ctx->dev = dev;
     851           0 :         stats_ctx->stats = stats;
     852           0 :         stats_ctx->cb_fn = cb_fn;
     853           0 :         stats_ctx->cb_arg = cb_arg;
     854           0 :         stats_ctx->thread = spdk_get_thread();
     855             : 
     856           0 :         rc = spdk_thread_send_msg(dev->core_thread, _ftl_get_stats, stats_ctx);
     857           0 :         if (rc) {
     858           0 :                 goto stats_allocated;
     859             :         }
     860             : 
     861           0 :         return 0;
     862             : 
     863           0 : stats_allocated:
     864           0 :         free(stats_ctx);
     865           0 :         return rc;
     866             : }
     867             : 
     868           1 : SPDK_LOG_REGISTER_COMPONENT(ftl_core)

Generated by: LCOV version 1.15