LCOV - code coverage report
Current view: top level - lib/ftl - ftl_nv_cache.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 0 1326 0.0 %
Date: 2024-07-15 21:44:16 Functions: 0 108 0.0 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2022 Intel Corporation.
       3             :  *   Copyright 2023 Solidigm All Rights Reserved
       4             :  *   All rights reserved.
       5             :  */
       6             : 
       7             : 
       8             : #include "spdk/bdev.h"
       9             : #include "spdk/bdev_module.h"
      10             : #include "spdk/ftl.h"
      11             : #include "spdk/string.h"
      12             : 
      13             : #include "ftl_nv_cache.h"
      14             : #include "ftl_nv_cache_io.h"
      15             : #include "ftl_core.h"
      16             : #include "ftl_band.h"
      17             : #include "utils/ftl_addr_utils.h"
      18             : #include "mngt/ftl_mngt.h"
      19             : 
      20             : static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
      21             : static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev);
      22             : static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor);
      23             : static void compaction_process_ftl_done(struct ftl_rq *rq);
      24             : static void compaction_process_read_entry(void *arg);
      25             : static void ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev,
      26             :                                         const struct ftl_property *property,
      27             :                                         struct spdk_json_write_ctx *w);
      28             : 
      29             : static inline void
      30           0 : nvc_validate_md(struct ftl_nv_cache *nv_cache,
      31             :                 struct ftl_nv_cache_chunk_md *chunk_md)
      32             : {
      33           0 :         struct ftl_md *md = nv_cache->md;
      34           0 :         void *buffer = ftl_md_get_buffer(md);
      35           0 :         uint64_t size = ftl_md_get_buffer_size(md);
      36           0 :         void *ptr = chunk_md;
      37             : 
      38           0 :         if (ptr < buffer) {
      39           0 :                 ftl_abort();
      40             :         }
      41             : 
      42           0 :         ptr += sizeof(*chunk_md);
      43           0 :         if (ptr > buffer + size) {
      44           0 :                 ftl_abort();
      45             :         }
      46           0 : }
      47             : 
      48             : static inline uint64_t
      49           0 : nvc_data_offset(struct ftl_nv_cache *nv_cache)
      50             : {
      51           0 :         return 0;
      52             : }
      53             : 
      54             : static inline uint64_t
      55           0 : nvc_data_blocks(struct ftl_nv_cache *nv_cache)
      56             : {
      57           0 :         return nv_cache->chunk_blocks * nv_cache->chunk_count;
      58             : }
      59             : 
      60             : size_t
      61           0 : ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
      62             : {
      63           0 :         struct spdk_ftl_dev *dev =  SPDK_CONTAINEROF(nv_cache,
      64             :                                     struct spdk_ftl_dev, nv_cache);
      65           0 :         return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
      66             :                                     FTL_BLOCK_SIZE);
      67             : }
      68             : 
      69             : static size_t
      70           0 : nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
      71             : {
      72             :         /* Map pool element holds the whole tail md */
      73           0 :         return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
      74             : }
      75             : 
      76             : static uint64_t
      77           0 : get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
      78             : {
      79           0 :         struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
      80             : 
      81           0 :         return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
      82             : }
      83             : 
      84             : static void
      85           0 : ftl_nv_cache_init_update_limits(struct spdk_ftl_dev *dev)
      86             : {
      87           0 :         struct ftl_nv_cache *nvc = &dev->nv_cache;
      88           0 :         uint64_t usable_chunks = nvc->chunk_count - nvc->chunk_inactive_count;
      89             : 
      90             :         /* Start compaction when full chunks exceed given % of entire active chunks */
      91           0 :         nvc->chunk_compaction_threshold = usable_chunks *
      92           0 :                                           dev->conf.nv_cache.chunk_compaction_threshold /
      93             :                                           100;
      94             : 
      95           0 :         nvc->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
      96           0 :                                      (spdk_get_ticks_hz() / 1000);
      97             : 
      98           0 :         nvc->chunk_free_target = spdk_divide_round_up(usable_chunks *
      99           0 :                                  dev->conf.nv_cache.chunk_free_target,
     100             :                                  100);
     101           0 : }
     102             : 
     103             : struct nvc_scrub_ctx {
     104             :         uint64_t chunk_no;
     105             :         nvc_scrub_cb cb;
     106             :         void *cb_ctx;
     107             : 
     108             :         struct ftl_layout_region reg_chunk;
     109             :         struct ftl_md *md_chunk;
     110             : };
     111             : 
     112             : static int
     113           0 : nvc_scrub_find_next_chunk(struct spdk_ftl_dev *dev, struct nvc_scrub_ctx *scrub_ctx)
     114             : {
     115           0 :         while (scrub_ctx->chunk_no < dev->layout.nvc.chunk_count) {
     116           0 :                 if (dev->nv_cache.nvc_type->ops.is_chunk_active(dev, scrub_ctx->reg_chunk.current.offset)) {
     117           0 :                         return 0;
     118             :                 }
     119             : 
     120             :                 /* Move the dummy region along with the active chunk */
     121           0 :                 scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
     122           0 :                 scrub_ctx->chunk_no++;
     123             :         }
     124           0 :         return -ENOENT;
     125             : }
     126             : 
     127             : static void
     128           0 : nvc_scrub_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
     129             : {
     130           0 :         struct nvc_scrub_ctx *scrub_ctx = md->owner.cb_ctx;
     131           0 :         union ftl_md_vss vss;
     132             : 
     133             :         /* Move to the next chunk */
     134           0 :         scrub_ctx->chunk_no++;
     135           0 :         scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
     136             : 
     137           0 :         FTL_DEBUGLOG(dev, "Scrub progress: %"PRIu64"/%"PRIu64" chunks\n",
     138             :                      scrub_ctx->chunk_no, dev->layout.nvc.chunk_count);
     139             : 
     140           0 :         if (status || nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
     141             :                 /* IO error or no more active chunks found. Scrubbing finished. */
     142           0 :                 scrub_ctx->cb(dev, scrub_ctx->cb_ctx, status);
     143           0 :                 ftl_md_destroy(scrub_ctx->md_chunk, 0);
     144           0 :                 free(scrub_ctx);
     145           0 :                 return;
     146             :         }
     147             : 
     148             :         /* Scrub the next chunk */
     149           0 :         vss.version.md_version = 0;
     150           0 :         vss.nv_cache.lba = FTL_ADDR_INVALID;
     151             : 
     152           0 :         scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
     153           0 :         scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
     154             : 
     155           0 :         ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
     156             : }
     157             : 
     158             : void
     159           0 : ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx)
     160             : {
     161           0 :         struct nvc_scrub_ctx *scrub_ctx = calloc(1, sizeof(*scrub_ctx));
     162           0 :         union ftl_md_vss vss;
     163             : 
     164           0 :         if (!scrub_ctx) {
     165           0 :                 cb(dev, cb_ctx, -ENOMEM);
     166           0 :                 return;
     167             :         }
     168             : 
     169           0 :         scrub_ctx->cb = cb;
     170           0 :         scrub_ctx->cb_ctx = cb_ctx;
     171             : 
     172             :         /* Setup a dummy region for the first chunk */
     173           0 :         scrub_ctx->reg_chunk.name = ftl_md_region_name(FTL_LAYOUT_REGION_TYPE_DATA_NVC);
     174           0 :         scrub_ctx->reg_chunk.type = FTL_LAYOUT_REGION_TYPE_DATA_NVC;
     175           0 :         scrub_ctx->reg_chunk.mirror_type = FTL_LAYOUT_REGION_TYPE_INVALID;
     176           0 :         scrub_ctx->reg_chunk.current.version = 0;
     177           0 :         scrub_ctx->reg_chunk.current.offset = 0;
     178           0 :         scrub_ctx->reg_chunk.current.blocks = dev->layout.nvc.chunk_data_blocks;
     179           0 :         scrub_ctx->reg_chunk.entry_size = FTL_BLOCK_SIZE;
     180           0 :         scrub_ctx->reg_chunk.num_entries = dev->layout.nvc.chunk_data_blocks;
     181           0 :         scrub_ctx->reg_chunk.vss_blksz = dev->nv_cache.md_size;
     182           0 :         scrub_ctx->reg_chunk.bdev_desc = dev->nv_cache.bdev_desc;
     183           0 :         scrub_ctx->reg_chunk.ioch = dev->nv_cache.cache_ioch;
     184             : 
     185             :         /* Setup an MD object for the region */
     186           0 :         scrub_ctx->md_chunk = ftl_md_create(dev, scrub_ctx->reg_chunk.current.blocks,
     187             :                                             scrub_ctx->reg_chunk.vss_blksz, scrub_ctx->reg_chunk.name, FTL_MD_CREATE_NO_MEM,
     188           0 :                                             &scrub_ctx->reg_chunk);
     189             : 
     190           0 :         if (!scrub_ctx->md_chunk) {
     191           0 :                 free(scrub_ctx);
     192           0 :                 cb(dev, cb_ctx, -ENOMEM);
     193           0 :                 return;
     194             :         }
     195             : 
     196           0 :         if (nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
     197             :                 /* No active chunks found */
     198           0 :                 ftl_md_destroy(scrub_ctx->md_chunk, 0);
     199           0 :                 free(scrub_ctx);
     200           0 :                 cb(dev, cb_ctx, -ENOENT);
     201           0 :                 return;
     202             :         }
     203             : 
     204             :         /* Scrub the first chunk */
     205           0 :         vss.version.md_version = 0;
     206           0 :         vss.nv_cache.lba = FTL_ADDR_INVALID;
     207             : 
     208           0 :         scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
     209           0 :         scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
     210             : 
     211           0 :         ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
     212           0 :         return;
     213             : }
     214             : 
     215             : int
     216           0 : ftl_nv_cache_init(struct spdk_ftl_dev *dev)
     217             : {
     218           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
     219             :         struct ftl_nv_cache_chunk *chunk;
     220             :         struct ftl_nv_cache_chunk_md *md;
     221             :         struct ftl_nv_cache_compactor *compactor;
     222             :         uint64_t i, offset;
     223             : 
     224           0 :         nv_cache->halt = true;
     225             : 
     226           0 :         nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
     227           0 :         if (!nv_cache->md) {
     228           0 :                 FTL_ERRLOG(dev, "No NV cache metadata object\n");
     229           0 :                 return -1;
     230             :         }
     231             : 
     232           0 :         nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
     233           0 :                                                nv_cache->md_size * dev->xfer_size,
     234             :                                                FTL_BLOCK_SIZE, SPDK_ENV_SOCKET_ID_ANY);
     235           0 :         if (!nv_cache->md_pool) {
     236           0 :                 FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
     237           0 :                 return -1;
     238             :         }
     239             : 
     240             :         /*
     241             :          * Initialize chunk info
     242             :          */
     243           0 :         nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
     244           0 :         nv_cache->chunk_count = dev->layout.nvc.chunk_count;
     245           0 :         nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
     246             : 
     247             :         /* Allocate chunks */
     248           0 :         nv_cache->chunks = calloc(nv_cache->chunk_count,
     249             :                                   sizeof(nv_cache->chunks[0]));
     250           0 :         if (!nv_cache->chunks) {
     251           0 :                 FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
     252           0 :                 return -1;
     253             :         }
     254             : 
     255           0 :         TAILQ_INIT(&nv_cache->chunk_free_list);
     256           0 :         TAILQ_INIT(&nv_cache->chunk_open_list);
     257           0 :         TAILQ_INIT(&nv_cache->chunk_full_list);
     258           0 :         TAILQ_INIT(&nv_cache->chunk_comp_list);
     259           0 :         TAILQ_INIT(&nv_cache->chunk_inactive_list);
     260           0 :         TAILQ_INIT(&nv_cache->needs_free_persist_list);
     261             : 
     262             :         /* First chunk metadata */
     263           0 :         md = ftl_md_get_buffer(nv_cache->md);
     264           0 :         if (!md) {
     265           0 :                 FTL_ERRLOG(dev, "No NV cache metadata\n");
     266           0 :                 return -1;
     267             :         }
     268             : 
     269           0 :         chunk = nv_cache->chunks;
     270           0 :         offset = nvc_data_offset(nv_cache);
     271           0 :         for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
     272           0 :                 chunk->nv_cache = nv_cache;
     273           0 :                 chunk->md = md;
     274           0 :                 chunk->md->version = FTL_NVC_VERSION_CURRENT;
     275           0 :                 nvc_validate_md(nv_cache, md);
     276           0 :                 chunk->offset = offset;
     277           0 :                 offset += nv_cache->chunk_blocks;
     278             : 
     279           0 :                 if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) {
     280           0 :                         nv_cache->chunk_free_count++;
     281           0 :                         TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
     282             :                 } else {
     283           0 :                         chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
     284           0 :                         nv_cache->chunk_inactive_count++;
     285           0 :                         TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
     286             :                 }
     287             :         }
     288           0 :         assert(nv_cache->chunk_free_count + nv_cache->chunk_inactive_count == nv_cache->chunk_count);
     289           0 :         assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
     290             : 
     291           0 :         TAILQ_INIT(&nv_cache->compactor_list);
     292           0 :         for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) {
     293           0 :                 compactor = compactor_alloc(dev);
     294             : 
     295           0 :                 if (!compactor) {
     296           0 :                         FTL_ERRLOG(dev, "Cannot allocate compaction process\n");
     297           0 :                         return -1;
     298             :                 }
     299             : 
     300           0 :                 TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
     301             :         }
     302             : 
     303             : #define FTL_MAX_OPEN_CHUNKS 2
     304           0 :         nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
     305             :                                                 nv_cache_p2l_map_pool_elem_size(nv_cache),
     306             :                                                 FTL_BLOCK_SIZE,
     307             :                                                 SPDK_ENV_SOCKET_ID_ANY);
     308           0 :         if (!nv_cache->p2l_pool) {
     309           0 :                 return -ENOMEM;
     310             :         }
     311             : 
     312             :         /* One entry per open chunk */
     313           0 :         nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
     314             :                                   sizeof(struct ftl_nv_cache_chunk_md),
     315             :                                   FTL_BLOCK_SIZE,
     316             :                                   SPDK_ENV_SOCKET_ID_ANY);
     317           0 :         if (!nv_cache->chunk_md_pool) {
     318           0 :                 return -ENOMEM;
     319             :         }
     320             : 
     321             :         /* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
     322             :          * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely
     323             :          * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates
     324             :          * to free state at once) */
     325           0 :         nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
     326             :                                        sizeof(struct ftl_nv_cache_chunk_md),
     327             :                                        FTL_BLOCK_SIZE,
     328             :                                        SPDK_ENV_SOCKET_ID_ANY);
     329           0 :         if (!nv_cache->free_chunk_md_pool) {
     330           0 :                 return -ENOMEM;
     331             :         }
     332             : 
     333           0 :         ftl_nv_cache_init_update_limits(dev);
     334           0 :         ftl_property_register(dev, "cache_device", NULL, 0, NULL, NULL, ftl_property_dump_cache_dev, NULL,
     335             :                               NULL, true);
     336           0 :         return 0;
     337             : }
     338             : 
     339             : void
     340           0 : ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
     341             : {
     342           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
     343             :         struct ftl_nv_cache_compactor *compactor;
     344             : 
     345           0 :         while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
     346           0 :                 compactor = TAILQ_FIRST(&nv_cache->compactor_list);
     347           0 :                 TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
     348             : 
     349           0 :                 compactor_free(dev, compactor);
     350             :         }
     351             : 
     352           0 :         ftl_mempool_destroy(nv_cache->md_pool);
     353           0 :         ftl_mempool_destroy(nv_cache->p2l_pool);
     354           0 :         ftl_mempool_destroy(nv_cache->chunk_md_pool);
     355           0 :         ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
     356           0 :         nv_cache->md_pool = NULL;
     357           0 :         nv_cache->p2l_pool = NULL;
     358           0 :         nv_cache->chunk_md_pool = NULL;
     359           0 :         nv_cache->free_chunk_md_pool = NULL;
     360             : 
     361           0 :         free(nv_cache->chunks);
     362           0 :         nv_cache->chunks = NULL;
     363           0 : }
     364             : 
     365             : static uint64_t
     366           0 : chunk_get_free_space(struct ftl_nv_cache *nv_cache,
     367             :                      struct ftl_nv_cache_chunk *chunk)
     368             : {
     369           0 :         assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
     370             :                nv_cache->chunk_blocks);
     371           0 :         return nv_cache->chunk_blocks - chunk->md->write_pointer -
     372           0 :                nv_cache->tail_md_chunk_blocks;
     373             : }
     374             : 
     375             : static bool
     376           0 : chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
     377             : {
     378           0 :         return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
     379             : }
     380             : 
     381             : static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
     382             : 
     383             : static uint64_t
     384           0 : ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
     385             : {
     386           0 :         uint64_t address = FTL_LBA_INVALID;
     387           0 :         uint64_t num_blocks = io->num_blocks;
     388             :         uint64_t free_space;
     389             :         struct ftl_nv_cache_chunk *chunk;
     390             : 
     391             :         do {
     392           0 :                 chunk = nv_cache->chunk_current;
     393             :                 /* Chunk has been closed so pick new one */
     394           0 :                 if (chunk && chunk_is_closed(chunk))  {
     395           0 :                         chunk = NULL;
     396             :                 }
     397             : 
     398           0 :                 if (!chunk) {
     399           0 :                         chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
     400           0 :                         if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
     401           0 :                                 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
     402           0 :                                 nv_cache->chunk_current = chunk;
     403             :                         } else {
     404             :                                 break;
     405             :                         }
     406             :                 }
     407             : 
     408           0 :                 free_space = chunk_get_free_space(nv_cache, chunk);
     409             : 
     410           0 :                 if (free_space >= num_blocks) {
     411             :                         /* Enough space in chunk */
     412             : 
     413             :                         /* Calculate address in NV cache */
     414           0 :                         address = chunk->offset + chunk->md->write_pointer;
     415             : 
     416             :                         /* Set chunk in IO */
     417           0 :                         io->nv_cache_chunk = chunk;
     418             : 
     419             :                         /* Move write pointer */
     420           0 :                         chunk->md->write_pointer += num_blocks;
     421           0 :                         break;
     422             :                 }
     423             : 
     424             :                 /* Not enough space in nv_cache_chunk */
     425           0 :                 nv_cache->chunk_current = NULL;
     426             : 
     427           0 :                 if (0 == free_space) {
     428           0 :                         continue;
     429             :                 }
     430             : 
     431           0 :                 chunk->md->blocks_skipped = free_space;
     432           0 :                 chunk->md->blocks_written += free_space;
     433           0 :                 chunk->md->write_pointer += free_space;
     434             : 
     435           0 :                 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
     436           0 :                         ftl_chunk_close(chunk);
     437             :                 }
     438             :         } while (1);
     439             : 
     440           0 :         return address;
     441             : }
     442             : 
     443             : void
     444           0 : ftl_nv_cache_fill_md(struct ftl_io *io)
     445             : {
     446           0 :         struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
     447             :         uint64_t i;
     448           0 :         union ftl_md_vss *metadata = io->md;
     449           0 :         uint64_t lba = ftl_io_get_lba(io, 0);
     450             : 
     451           0 :         for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
     452           0 :                 metadata->nv_cache.lba = lba;
     453           0 :                 metadata->nv_cache.seq_id = chunk->md->seq_id;
     454             :         }
     455           0 : }
     456             : 
     457             : uint64_t
     458           0 : chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
     459             : {
     460           0 :         return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
     461             : }
     462             : 
     463             : static void
     464           0 : chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
     465             :                      uint64_t advanced_blocks)
     466             : {
     467           0 :         chunk->md->blocks_written += advanced_blocks;
     468             : 
     469           0 :         assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
     470             : 
     471           0 :         if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
     472           0 :                 ftl_chunk_close(chunk);
     473             :         }
     474           0 : }
     475             : 
     476             : static uint64_t
     477           0 : chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
     478             : {
     479           0 :         return chunk->md->blocks_written - chunk->md->blocks_skipped -
     480           0 :                chunk->nv_cache->tail_md_chunk_blocks;
     481             : }
     482             : 
     483             : static bool
     484           0 : is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
     485             : {
     486           0 :         assert(chunk->md->blocks_written != 0);
     487             : 
     488           0 :         if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
     489           0 :                 return true;
     490             :         }
     491             : 
     492           0 :         return false;
     493             : }
     494             : 
     495             : static int
     496           0 : ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
     497             : {
     498           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     499           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
     500             : 
     501           0 :         p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
     502             : 
     503           0 :         if (!p2l_map->chunk_dma_md) {
     504           0 :                 return -ENOMEM;
     505             :         }
     506             : 
     507           0 :         ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
     508           0 :         return 0;
     509             : }
     510             : 
     511             : static void
     512           0 : ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
     513             : {
     514           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
     515             : 
     516           0 :         ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
     517           0 :         p2l_map->chunk_dma_md = NULL;
     518           0 : }
     519             : 
     520             : static void
     521           0 : ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
     522             : {
     523           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     524             : 
     525             :         /* Reset chunk */
     526           0 :         ftl_nv_cache_chunk_md_initialize(chunk->md);
     527             : 
     528           0 :         TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
     529           0 :         nv_cache->chunk_free_persist_count++;
     530           0 : }
     531             : 
     532             : static int
     533           0 : ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
     534             : {
     535           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     536           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
     537             : 
     538           0 :         p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
     539           0 :         if (!p2l_map->chunk_dma_md) {
     540           0 :                 return -ENOMEM;
     541             :         }
     542             : 
     543           0 :         ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
     544           0 :         return 0;
     545             : }
     546             : 
     547             : static void
     548           0 : ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
     549             : {
     550           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
     551             : 
     552           0 :         ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
     553           0 :         p2l_map->chunk_dma_md = NULL;
     554           0 : }
     555             : 
     556             : static void
     557           0 : chunk_free_cb(int status, void *ctx)
     558             : {
     559           0 :         struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
     560             : 
     561           0 :         if (spdk_likely(!status)) {
     562           0 :                 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     563             : 
     564           0 :                 nv_cache->chunk_free_persist_count--;
     565           0 :                 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
     566           0 :                 nv_cache->chunk_free_count++;
     567           0 :                 nv_cache->chunk_full_count--;
     568           0 :                 chunk->md->state = FTL_CHUNK_STATE_FREE;
     569           0 :                 chunk->md->close_seq_id = 0;
     570           0 :                 ftl_chunk_free_chunk_free_entry(chunk);
     571             :         } else {
     572             : #ifdef SPDK_FTL_RETRY_ON_ERROR
     573             :                 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
     574             : #else
     575           0 :                 ftl_abort();
     576             : #endif
     577             :         }
     578           0 : }
     579             : 
     580             : static void
     581           0 : ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
     582             : {
     583             :         int rc;
     584           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     585             :         struct ftl_p2l_map *p2l_map;
     586           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
     587           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
     588           0 :         struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
     589             : 
     590           0 :         TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
     591           0 :                 p2l_map = &chunk->p2l_map;
     592           0 :                 rc = ftl_chunk_alloc_chunk_free_entry(chunk);
     593           0 :                 if (rc) {
     594           0 :                         break;
     595             :                 }
     596             : 
     597           0 :                 TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
     598             : 
     599           0 :                 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
     600           0 :                 p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
     601           0 :                 p2l_map->chunk_dma_md->close_seq_id = 0;
     602           0 :                 p2l_map->chunk_dma_md->p2l_map_checksum = 0;
     603             : 
     604           0 :                 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
     605             :                                        chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
     606             :         }
     607           0 : }
     608             : 
     609             : static void
     610           0 : compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
     611             : {
     612           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     613           0 :         struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
     614             :         double *ptr;
     615             : 
     616           0 :         if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
     617           0 :                 return;
     618             :         }
     619             : 
     620           0 :         if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
     621           0 :                 ptr = compaction_bw->buf + compaction_bw->first;
     622           0 :                 compaction_bw->first++;
     623           0 :                 if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
     624           0 :                         compaction_bw->first = 0;
     625             :                 }
     626           0 :                 compaction_bw->sum -= *ptr;
     627             :         } else {
     628           0 :                 ptr = compaction_bw->buf + compaction_bw->count;
     629           0 :                 compaction_bw->count++;
     630             :         }
     631             : 
     632           0 :         *ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
     633           0 :         chunk->compaction_length_tsc = 0;
     634             : 
     635           0 :         compaction_bw->sum += *ptr;
     636           0 :         nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
     637             : }
     638             : 
     639             : static void
     640           0 : chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
     641             : {
     642           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     643           0 :         uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
     644             : 
     645           0 :         chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
     646           0 :         chunk->compaction_start_tsc = tsc;
     647             : 
     648           0 :         chunk->md->blocks_compacted += num_blocks;
     649           0 :         assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
     650           0 :         if (!is_chunk_compacted(chunk)) {
     651           0 :                 return;
     652             :         }
     653             : 
     654             :         /* Remove chunk from compacted list */
     655           0 :         TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
     656           0 :         nv_cache->chunk_comp_count--;
     657             : 
     658           0 :         compaction_stats_update(chunk);
     659             : 
     660           0 :         ftl_chunk_free(chunk);
     661             : }
     662             : 
     663             : static bool
     664           0 : is_compaction_required_for_upgrade(struct ftl_nv_cache *nv_cache)
     665             : {
     666           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     667             : 
     668           0 :         if (dev->conf.prep_upgrade_on_shutdown) {
     669           0 :                 if (nv_cache->chunk_full_count || nv_cache->chunk_open_count) {
     670           0 :                         return true;
     671             :                 }
     672             :         }
     673             : 
     674           0 :         return false;
     675             : }
     676             : 
     677             : static bool
     678           0 : is_compaction_required(struct ftl_nv_cache *nv_cache)
     679             : {
     680           0 :         if (spdk_unlikely(nv_cache->halt)) {
     681           0 :                 return is_compaction_required_for_upgrade(nv_cache);
     682             :         }
     683             : 
     684           0 :         if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) {
     685           0 :                 return true;
     686             :         }
     687             : 
     688           0 :         return false;
     689             : }
     690             : 
     691             : static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor);
     692             : static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp);
     693             : 
     694             : static void
     695           0 : _compaction_process_pin_lba(void *_comp)
     696             : {
     697           0 :         struct ftl_nv_cache_compactor *comp = _comp;
     698             : 
     699           0 :         compaction_process_pin_lba(comp);
     700           0 : }
     701             : 
     702             : static void
     703           0 : compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
     704             : {
     705           0 :         struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx;
     706           0 :         struct ftl_rq *rq = comp->rq;
     707             : 
     708           0 :         if (status) {
     709           0 :                 rq->iter.status = status;
     710           0 :                 pin_ctx->lba = FTL_LBA_INVALID;
     711             :         }
     712             : 
     713           0 :         if (--rq->iter.remaining == 0) {
     714           0 :                 if (rq->iter.status) {
     715             :                         /* unpin and try again */
     716           0 :                         ftl_rq_unpin(rq);
     717           0 :                         spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp);
     718           0 :                         return;
     719             :                 }
     720             : 
     721           0 :                 compaction_process_finish_read(comp);
     722             :         }
     723             : }
     724             : 
     725             : static void
     726           0 : compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
     727             : {
     728           0 :         struct ftl_rq *rq = comp->rq;
     729           0 :         struct spdk_ftl_dev *dev = rq->dev;
     730             :         struct ftl_rq_entry *entry;
     731             : 
     732           0 :         assert(rq->iter.count);
     733           0 :         rq->iter.remaining = rq->iter.count;
     734           0 :         rq->iter.status = 0;
     735             : 
     736           0 :         FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
     737           0 :                 struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
     738           0 :                 struct ftl_l2p_pin_ctx *pin_ctx = &entry->l2p_pin_ctx;
     739           0 :                 union ftl_md_vss *md = entry->io_md;
     740             : 
     741           0 :                 if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
     742           0 :                         ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
     743             :                 } else {
     744           0 :                         ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
     745             :                 }
     746             :         }
     747           0 : }
     748             : 
     749             : static void
     750           0 : compaction_process_read_entry_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
     751             : {
     752           0 :         struct ftl_rq_entry *entry = arg;
     753           0 :         struct ftl_rq *rq = ftl_rq_from_entry(entry);
     754           0 :         struct spdk_ftl_dev *dev = rq->dev;
     755           0 :         struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
     756             : 
     757           0 :         ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io);
     758             : 
     759           0 :         spdk_bdev_free_io(bdev_io);
     760             : 
     761           0 :         if (!success) {
     762             :                 /* retry */
     763           0 :                 spdk_thread_send_msg(spdk_get_thread(), compaction_process_read_entry, entry);
     764           0 :                 return;
     765             :         }
     766             : 
     767           0 :         assert(rq->iter.remaining >= entry->bdev_io.num_blocks);
     768           0 :         rq->iter.remaining -= entry->bdev_io.num_blocks;
     769           0 :         if (0 == rq->iter.remaining) {
     770             :                 /* All IOs processed, go to next phase - pining */
     771           0 :                 compaction_process_pin_lba(compactor);
     772             :         }
     773             : }
     774             : 
     775             : static void
     776           0 : compaction_process_read_entry(void *arg)
     777             : {
     778           0 :         struct ftl_rq_entry *entry = arg;
     779           0 :         struct ftl_rq *rq = ftl_rq_from_entry(entry);
     780           0 :         struct spdk_ftl_dev *dev = rq->dev;
     781             : 
     782           0 :         int rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, dev->nv_cache.bdev_desc,
     783             :                         dev->nv_cache.cache_ioch, entry->io_payload, entry->io_md,
     784             :                         entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
     785             :                         compaction_process_read_entry_cb, entry);
     786             : 
     787           0 :         if (spdk_unlikely(rc)) {
     788           0 :                 if (rc == -ENOMEM) {
     789           0 :                         struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
     790           0 :                         entry->bdev_io.wait_entry.bdev = bdev;
     791           0 :                         entry->bdev_io.wait_entry.cb_fn = compaction_process_read_entry;
     792           0 :                         entry->bdev_io.wait_entry.cb_arg = entry;
     793           0 :                         spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry);
     794             :                 } else {
     795           0 :                         ftl_abort();
     796             :                 }
     797             :         }
     798             : 
     799           0 :         dev->stats.io_activity_total += entry->bdev_io.num_blocks;
     800           0 : }
     801             : 
     802             : static bool
     803           0 : is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
     804             : {
     805           0 :         assert(chunk->md->blocks_written != 0);
     806             : 
     807           0 :         if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
     808           0 :                 return false;
     809             :         }
     810             : 
     811           0 :         return true;
     812             : }
     813             : 
     814             : static struct ftl_nv_cache_chunk *
     815           0 : get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
     816             : {
     817           0 :         struct ftl_nv_cache_chunk *chunk = NULL;
     818             : 
     819           0 :         if (!TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
     820           0 :                 chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
     821           0 :                 if (is_chunk_to_read(chunk)) {
     822           0 :                         return chunk;
     823             :                 }
     824             :         }
     825             : 
     826           0 :         if (!TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
     827           0 :                 chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
     828           0 :                 TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
     829             : 
     830           0 :                 assert(chunk->md->write_pointer);
     831             :         } else {
     832           0 :                 return NULL;
     833             :         }
     834             : 
     835           0 :         if (spdk_likely(chunk)) {
     836           0 :                 assert(chunk->md->write_pointer != 0);
     837           0 :                 TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
     838           0 :                 nv_cache->chunk_comp_count++;
     839             :         }
     840             : 
     841           0 :         return chunk;
     842             : }
     843             : 
     844             : static uint64_t
     845           0 : chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
     846             : {
     847             :         uint64_t blocks_written;
     848             :         uint64_t blocks_to_read;
     849             : 
     850           0 :         assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
     851           0 :         blocks_written = chunk_user_blocks_written(chunk);
     852             : 
     853           0 :         assert(blocks_written >= chunk->md->read_pointer);
     854           0 :         blocks_to_read = blocks_written - chunk->md->read_pointer;
     855             : 
     856           0 :         return blocks_to_read;
     857             : }
     858             : 
     859             : static void
     860           0 : compactor_deactivate(struct ftl_nv_cache_compactor *compactor)
     861             : {
     862           0 :         struct ftl_nv_cache *nv_cache = compactor->nv_cache;
     863             : 
     864           0 :         compactor->rq->iter.count = 0;
     865           0 :         assert(nv_cache->compaction_active_count);
     866           0 :         nv_cache->compaction_active_count--;
     867           0 :         TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
     868           0 : }
     869             : 
     870             : static void
     871           0 : compaction_process_invalidate_entry(struct ftl_rq_entry *entry)
     872             : {
     873           0 :         entry->addr = FTL_ADDR_INVALID;
     874           0 :         entry->lba = FTL_LBA_INVALID;
     875           0 :         entry->seq_id = 0;
     876           0 :         entry->owner.priv = NULL;
     877           0 : }
     878             : 
     879             : static void
     880           0 : compaction_process_pad(struct ftl_nv_cache_compactor *compactor, uint64_t idx)
     881             : {
     882           0 :         struct ftl_rq *rq = compactor->rq;
     883             :         struct ftl_rq_entry *entry;
     884             : 
     885           0 :         assert(idx < rq->num_blocks);
     886           0 :         FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[idx], entry, rq->num_blocks) {
     887           0 :                 compaction_process_invalidate_entry(entry);
     888             :         }
     889           0 : }
     890             : 
     891             : static void
     892           0 : compaction_process_read(struct ftl_nv_cache_compactor *compactor)
     893             : {
     894           0 :         struct ftl_rq *rq = compactor->rq;
     895           0 :         struct ftl_nv_cache *nv_cache = compactor->nv_cache;
     896           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     897             :         struct ftl_rq_entry *entry, *io;
     898             : 
     899           0 :         assert(rq->iter.count);
     900           0 :         rq->iter.remaining = rq->iter.count;
     901             : 
     902           0 :         io = rq->entries;
     903           0 :         io->bdev_io.num_blocks = 1;
     904           0 :         io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
     905           0 :         FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[1], entry,  rq->iter.count) {
     906           0 :                 if (entry->addr == io->addr + io->bdev_io.num_blocks) {
     907           0 :                         io->bdev_io.num_blocks++;
     908             :                 } else {
     909           0 :                         compaction_process_read_entry(io);
     910           0 :                         io = entry;
     911           0 :                         io->bdev_io.num_blocks = 1;
     912           0 :                         io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
     913             :                 }
     914             :         }
     915           0 :         compaction_process_read_entry(io);
     916           0 : }
     917             : 
     918             : static ftl_addr
     919           0 : compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
     920             : {
     921             :         ftl_addr start, pos;
     922           0 :         uint64_t skip, to_read = chunk_blocks_to_read(chunk);
     923             : 
     924           0 :         if (0 == to_read) {
     925           0 :                 return FTL_ADDR_INVALID;
     926             :         }
     927             : 
     928           0 :         start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
     929           0 :         pos = ftl_bitmap_find_first_set(dev->valid_map, start, start + to_read - 1);
     930             : 
     931           0 :         if (pos == UINT64_MAX) {
     932           0 :                 chunk->md->read_pointer += to_read;
     933           0 :                 chunk_compaction_advance(chunk, to_read);
     934           0 :                 return FTL_ADDR_INVALID;
     935             :         }
     936             : 
     937           0 :         assert(pos >= start);
     938           0 :         skip = pos - start;
     939           0 :         if (skip) {
     940           0 :                 chunk->md->read_pointer += skip;
     941           0 :                 chunk_compaction_advance(chunk, skip);
     942             :         }
     943             : 
     944           0 :         return pos;
     945             : }
     946             : 
     947             : static bool
     948           0 : compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry)
     949             : {
     950           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     951           0 :         struct ftl_nv_cache_chunk *chunk = NULL;
     952           0 :         ftl_addr addr = FTL_ADDR_INVALID;
     953             : 
     954           0 :         while (!chunk) {
     955             :                 /* Get currently handled chunk */
     956           0 :                 chunk = get_chunk_for_compaction(nv_cache);
     957           0 :                 if (!chunk) {
     958           0 :                         return false;
     959             :                 }
     960           0 :                 chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
     961             : 
     962             :                 /* Get next read position in chunk */
     963           0 :                 addr = compaction_chunk_read_pos(dev, chunk);
     964           0 :                 if (FTL_ADDR_INVALID == addr) {
     965           0 :                         chunk = NULL;
     966             :                 }
     967             :         }
     968             : 
     969           0 :         assert(FTL_ADDR_INVALID != addr);
     970             : 
     971             :         /* Set entry address info and chunk */
     972           0 :         entry->addr = addr;
     973           0 :         entry->owner.priv = chunk;
     974             : 
     975             :         /* Move read pointer in the chunk */
     976           0 :         chunk->md->read_pointer++;
     977             : 
     978           0 :         return true;
     979             : }
     980             : 
     981             : static void
     982           0 : compaction_process_start(struct ftl_nv_cache_compactor *compactor)
     983             : {
     984           0 :         struct ftl_rq *rq = compactor->rq;
     985           0 :         struct ftl_nv_cache *nv_cache = compactor->nv_cache;
     986             :         struct ftl_rq_entry *entry;
     987             : 
     988           0 :         assert(0 == compactor->rq->iter.count);
     989           0 :         FTL_RQ_ENTRY_LOOP(rq, entry, rq->num_blocks) {
     990           0 :                 if (!compaction_entry_read_pos(nv_cache, entry)) {
     991           0 :                         compaction_process_pad(compactor, entry->index);
     992           0 :                         break;
     993             :                 }
     994           0 :                 rq->iter.count++;
     995             :         }
     996             : 
     997           0 :         if (rq->iter.count) {
     998             :                 /* Schedule Read IOs */
     999           0 :                 compaction_process_read(compactor);
    1000             :         } else {
    1001           0 :                 compactor_deactivate(compactor);
    1002             :         }
    1003           0 : }
    1004             : 
    1005             : static void
    1006           0 : compaction_process(struct ftl_nv_cache *nv_cache)
    1007             : {
    1008           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1009             :         struct ftl_nv_cache_compactor *compactor;
    1010             : 
    1011           0 :         if (!is_compaction_required(nv_cache)) {
    1012           0 :                 return;
    1013             :         }
    1014             : 
    1015           0 :         compactor = TAILQ_FIRST(&nv_cache->compactor_list);
    1016           0 :         if (!compactor) {
    1017           0 :                 return;
    1018             :         }
    1019             : 
    1020           0 :         TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
    1021           0 :         compactor->nv_cache->compaction_active_count++;
    1022           0 :         compaction_process_start(compactor);
    1023           0 :         ftl_add_io_activity(dev);
    1024             : }
    1025             : 
    1026             : static void
    1027           0 : compaction_process_ftl_done(struct ftl_rq *rq)
    1028             : {
    1029           0 :         struct spdk_ftl_dev *dev = rq->dev;
    1030           0 :         struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
    1031           0 :         struct ftl_band *band = rq->io.band;
    1032             :         struct ftl_rq_entry *entry;
    1033             :         ftl_addr addr;
    1034             : 
    1035           0 :         if (spdk_unlikely(false == rq->success)) {
    1036             :                 /* IO error retry writing */
    1037             : #ifdef SPDK_FTL_RETRY_ON_ERROR
    1038             :                 ftl_writer_queue_rq(&dev->writer_user, rq);
    1039             :                 return;
    1040             : #else
    1041           0 :                 ftl_abort();
    1042             : #endif
    1043             :         }
    1044             : 
    1045           0 :         assert(rq->iter.count);
    1046             : 
    1047             :         /* Update L2P table */
    1048           0 :         addr = rq->io.addr;
    1049           0 :         FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
    1050           0 :                 struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
    1051             : 
    1052           0 :                 if (entry->lba != FTL_LBA_INVALID) {
    1053           0 :                         ftl_l2p_update_base(dev, entry->lba, addr, entry->addr);
    1054           0 :                         ftl_l2p_unpin(dev, entry->lba, 1);
    1055           0 :                         chunk_compaction_advance(chunk, 1);
    1056             :                 } else {
    1057           0 :                         assert(entry->addr == FTL_ADDR_INVALID);
    1058             :                 }
    1059             : 
    1060           0 :                 addr = ftl_band_next_addr(band, addr, 1);
    1061           0 :                 compaction_process_invalidate_entry(entry);
    1062             :         }
    1063             : 
    1064           0 :         compactor_deactivate(compactor);
    1065           0 : }
    1066             : 
    1067             : static void
    1068           0 : compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
    1069             : {
    1070           0 :         struct ftl_rq *rq = compactor->rq;
    1071           0 :         struct spdk_ftl_dev *dev = rq->dev;
    1072             :         struct ftl_rq_entry *entry;
    1073             :         ftl_addr current_addr;
    1074           0 :         uint64_t skip = 0;
    1075             : 
    1076           0 :         FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
    1077           0 :                 struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
    1078           0 :                 union ftl_md_vss *md = entry->io_md;
    1079             : 
    1080           0 :                 if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
    1081           0 :                         skip++;
    1082           0 :                         compaction_process_invalidate_entry(entry);
    1083           0 :                         chunk_compaction_advance(chunk, 1);
    1084           0 :                         continue;
    1085             :                 }
    1086             : 
    1087           0 :                 current_addr = ftl_l2p_get(dev, md->nv_cache.lba);
    1088           0 :                 if (current_addr == entry->addr) {
    1089           0 :                         entry->lba = md->nv_cache.lba;
    1090           0 :                         entry->seq_id = chunk->md->seq_id;
    1091             :                 } else {
    1092             :                         /* This address already invalidated, just omit this block */
    1093           0 :                         chunk_compaction_advance(chunk, 1);
    1094           0 :                         ftl_l2p_unpin(dev, md->nv_cache.lba, 1);
    1095           0 :                         compaction_process_invalidate_entry(entry);
    1096           0 :                         skip++;
    1097             :                 }
    1098             :         }
    1099             : 
    1100           0 :         if (skip < rq->iter.count) {
    1101             :                 /*
    1102             :                  * Request contains data to be placed on FTL, compact it
    1103             :                  */
    1104           0 :                 ftl_writer_queue_rq(&dev->writer_user, rq);
    1105             :         } else {
    1106           0 :                 compactor_deactivate(compactor);
    1107             :         }
    1108           0 : }
    1109             : 
    1110             : static void
    1111           0 : compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor)
    1112             : {
    1113           0 :         if (!compactor) {
    1114           0 :                 return;
    1115             :         }
    1116             : 
    1117           0 :         ftl_rq_del(compactor->rq);
    1118           0 :         free(compactor);
    1119             : }
    1120             : 
    1121             : static struct ftl_nv_cache_compactor *
    1122           0 : compactor_alloc(struct spdk_ftl_dev *dev)
    1123             : {
    1124             :         struct ftl_nv_cache_compactor *compactor;
    1125             :         struct ftl_rq_entry *entry;
    1126             : 
    1127           0 :         compactor = calloc(1, sizeof(*compactor));
    1128           0 :         if (!compactor) {
    1129           0 :                 goto error;
    1130             :         }
    1131             : 
    1132             :         /* Allocate help request for reading */
    1133           0 :         compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
    1134           0 :         if (!compactor->rq) {
    1135           0 :                 goto error;
    1136             :         }
    1137             : 
    1138           0 :         compactor->nv_cache = &dev->nv_cache;
    1139           0 :         compactor->rq->owner.priv = compactor;
    1140           0 :         compactor->rq->owner.cb = compaction_process_ftl_done;
    1141           0 :         compactor->rq->owner.compaction = true;
    1142             : 
    1143           0 :         FTL_RQ_ENTRY_LOOP(compactor->rq, entry, compactor->rq->num_blocks) {
    1144           0 :                 compaction_process_invalidate_entry(entry);
    1145             :         }
    1146             : 
    1147           0 :         return compactor;
    1148             : 
    1149           0 : error:
    1150           0 :         compactor_free(dev, compactor);
    1151           0 :         return NULL;
    1152             : }
    1153             : 
    1154             : static void
    1155           0 : ftl_nv_cache_submit_cb_done(struct ftl_io *io)
    1156             : {
    1157           0 :         struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
    1158             : 
    1159           0 :         chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
    1160           0 :         io->nv_cache_chunk = NULL;
    1161             : 
    1162           0 :         ftl_mempool_put(nv_cache->md_pool, io->md);
    1163           0 :         ftl_io_complete(io);
    1164           0 : }
    1165             : 
    1166             : static void
    1167           0 : ftl_nv_cache_l2p_update(struct ftl_io *io)
    1168             : {
    1169           0 :         struct spdk_ftl_dev *dev = io->dev;
    1170           0 :         ftl_addr next_addr = io->addr;
    1171             :         size_t i;
    1172             : 
    1173           0 :         for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
    1174           0 :                 ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
    1175             :         }
    1176             : 
    1177           0 :         ftl_l2p_unpin(dev, io->lba, io->num_blocks);
    1178           0 :         ftl_nv_cache_submit_cb_done(io);
    1179           0 : }
    1180             : 
    1181             : static void
    1182           0 : ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
    1183             : {
    1184           0 :         struct ftl_io *io = cb_arg;
    1185             : 
    1186           0 :         ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io);
    1187             : 
    1188           0 :         spdk_bdev_free_io(bdev_io);
    1189             : 
    1190           0 :         if (spdk_unlikely(!success)) {
    1191           0 :                 FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
    1192             :                            io->addr);
    1193           0 :                 io->status = -EIO;
    1194           0 :                 ftl_nv_cache_submit_cb_done(io);
    1195             :         } else {
    1196           0 :                 ftl_nv_cache_l2p_update(io);
    1197             :         }
    1198           0 : }
    1199             : 
    1200             : static void
    1201           0 : nv_cache_write(void *_io)
    1202             : {
    1203           0 :         struct ftl_io *io = _io;
    1204           0 :         struct spdk_ftl_dev *dev = io->dev;
    1205           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
    1206             :         int rc;
    1207             : 
    1208           0 :         rc = spdk_bdev_writev_blocks_with_md(nv_cache->bdev_desc, nv_cache->cache_ioch,
    1209           0 :                                              io->iov, io->iov_cnt, io->md,
    1210             :                                              ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
    1211             :                                              ftl_nv_cache_submit_cb, io);
    1212           0 :         if (spdk_unlikely(rc)) {
    1213           0 :                 if (rc == -ENOMEM) {
    1214           0 :                         struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
    1215           0 :                         io->bdev_io_wait.bdev = bdev;
    1216           0 :                         io->bdev_io_wait.cb_fn = nv_cache_write;
    1217           0 :                         io->bdev_io_wait.cb_arg = io;
    1218           0 :                         spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
    1219             :                 } else {
    1220           0 :                         ftl_abort();
    1221             :                 }
    1222             :         }
    1223           0 : }
    1224             : 
    1225             : static void
    1226           0 : ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
    1227             : {
    1228           0 :         struct ftl_io *io = pin_ctx->cb_ctx;
    1229             :         size_t i;
    1230             : 
    1231           0 :         if (spdk_unlikely(status != 0)) {
    1232             :                 /* Retry on the internal L2P fault */
    1233           0 :                 FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
    1234             :                            io->addr);
    1235           0 :                 io->status = -EAGAIN;
    1236           0 :                 ftl_nv_cache_submit_cb_done(io);
    1237           0 :                 return;
    1238             :         }
    1239             : 
    1240             :         /* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
    1241           0 :         for (i = 0; i < io->num_blocks; ++i) {
    1242           0 :                 io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
    1243             :         }
    1244             : 
    1245           0 :         assert(io->iov_pos == 0);
    1246             : 
    1247           0 :         ftl_trace_submission(io->dev, io, io->addr, io->num_blocks);
    1248             : 
    1249           0 :         nv_cache_write(io);
    1250             : }
    1251             : 
    1252             : bool
    1253           0 : ftl_nv_cache_write(struct ftl_io *io)
    1254             : {
    1255           0 :         struct spdk_ftl_dev *dev = io->dev;
    1256             :         uint64_t cache_offset;
    1257             : 
    1258           0 :         io->md = ftl_mempool_get(dev->nv_cache.md_pool);
    1259           0 :         if (spdk_unlikely(!io->md)) {
    1260           0 :                 return false;
    1261             :         }
    1262             : 
    1263             :         /* Reserve area on the write buffer cache */
    1264           0 :         cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
    1265           0 :         if (cache_offset == FTL_LBA_INVALID) {
    1266             :                 /* No free space in NV cache, resubmit request */
    1267           0 :                 ftl_mempool_put(dev->nv_cache.md_pool, io->md);
    1268           0 :                 return false;
    1269             :         }
    1270           0 :         io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
    1271           0 :         io->nv_cache_chunk = dev->nv_cache.chunk_current;
    1272             : 
    1273           0 :         ftl_nv_cache_fill_md(io);
    1274           0 :         ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
    1275             :                     ftl_nv_cache_pin_cb, io,
    1276             :                     &io->l2p_pin_ctx);
    1277             : 
    1278           0 :         dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
    1279             : 
    1280           0 :         return true;
    1281             : }
    1282             : 
    1283             : int
    1284           0 : ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
    1285             :                   spdk_bdev_io_completion_cb cb, void *cb_arg)
    1286             : {
    1287             :         int rc;
    1288           0 :         struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
    1289             : 
    1290           0 :         assert(ftl_addr_in_nvc(io->dev, addr));
    1291             : 
    1292           0 :         rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
    1293           0 :                         ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
    1294             :                         num_blocks, cb, cb_arg);
    1295             : 
    1296           0 :         return rc;
    1297             : }
    1298             : 
    1299             : bool
    1300           0 : ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
    1301             : {
    1302           0 :         if (nv_cache->compaction_active_count) {
    1303           0 :                 return false;
    1304             :         }
    1305             : 
    1306           0 :         if (nv_cache->chunk_open_count > 0) {
    1307           0 :                 return false;
    1308             :         }
    1309             : 
    1310           0 :         if (is_compaction_required_for_upgrade(nv_cache)) {
    1311           0 :                 return false;
    1312             :         }
    1313             : 
    1314           0 :         return true;
    1315             : }
    1316             : 
    1317             : void
    1318           0 : ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
    1319             :                       uint64_t offset, uint64_t lba)
    1320             : {
    1321           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1322           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1323             : 
    1324           0 :         ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
    1325           0 : }
    1326             : 
    1327             : uint64_t
    1328           0 : ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
    1329             : {
    1330           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1331           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1332             : 
    1333           0 :         return ftl_lba_load(dev, p2l_map->chunk_map, offset);
    1334             : }
    1335             : 
    1336             : static void
    1337           0 : ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
    1338             : {
    1339           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1340           0 :         uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
    1341             :         uint64_t offset;
    1342             : 
    1343           0 :         offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
    1344           0 :         ftl_chunk_map_set_lba(chunk, offset, lba);
    1345           0 : }
    1346             : 
    1347             : struct ftl_nv_cache_chunk *
    1348           0 : ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
    1349             : {
    1350           0 :         struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
    1351             :         uint64_t chunk_idx;
    1352           0 :         uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
    1353             : 
    1354           0 :         assert(chunk != NULL);
    1355           0 :         chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
    1356           0 :         chunk += chunk_idx;
    1357             : 
    1358           0 :         return chunk;
    1359             : }
    1360             : 
    1361             : void
    1362           0 : ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
    1363             : {
    1364             :         struct ftl_nv_cache_chunk *chunk;
    1365             : 
    1366           0 :         chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
    1367             : 
    1368           0 :         assert(lba != FTL_LBA_INVALID);
    1369             : 
    1370           0 :         ftl_chunk_set_addr(chunk, lba, addr);
    1371           0 :         ftl_bitmap_set(dev->valid_map, addr);
    1372           0 : }
    1373             : 
    1374             : static void
    1375           0 : ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
    1376             : {
    1377             :         double err;
    1378             :         double modifier;
    1379             : 
    1380           0 :         err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
    1381           0 :         modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
    1382             : 
    1383           0 :         if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
    1384           0 :                 modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
    1385           0 :         } else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
    1386           0 :                 modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
    1387             :         }
    1388             : 
    1389           0 :         if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
    1390           0 :                 nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
    1391             :         } else {
    1392           0 :                 double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
    1393             :                                              FTL_BLOCK_SIZE;
    1394           0 :                 nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
    1395             :         }
    1396           0 : }
    1397             : 
    1398             : static void
    1399           0 : ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
    1400             : {
    1401           0 :         uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
    1402             : 
    1403           0 :         if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
    1404           0 :                 nv_cache->throttle.start_tsc = tsc;
    1405           0 :         } else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
    1406           0 :                 ftl_nv_cache_throttle_update(nv_cache);
    1407           0 :                 nv_cache->throttle.start_tsc = tsc;
    1408           0 :                 nv_cache->throttle.blocks_submitted = 0;
    1409             :         }
    1410           0 : }
    1411             : 
    1412             : static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
    1413             : 
    1414             : void
    1415           0 : ftl_nv_cache_process(struct spdk_ftl_dev *dev)
    1416             : {
    1417           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
    1418             : 
    1419           0 :         assert(dev->nv_cache.bdev_desc);
    1420             : 
    1421           0 :         if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
    1422           0 :             !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
    1423           0 :                 struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
    1424           0 :                 TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
    1425           0 :                 TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
    1426           0 :                 nv_cache->chunk_free_count--;
    1427           0 :                 chunk->md->seq_id = ftl_get_next_seq_id(dev);
    1428           0 :                 ftl_chunk_open(chunk);
    1429           0 :                 ftl_add_io_activity(dev);
    1430             :         }
    1431             : 
    1432           0 :         compaction_process(nv_cache);
    1433           0 :         ftl_chunk_persist_free_state(nv_cache);
    1434           0 :         ftl_nv_cache_process_throttle(nv_cache);
    1435           0 : }
    1436             : 
    1437             : static bool
    1438           0 : ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
    1439             : {
    1440           0 :         if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
    1441           0 :                 return true;
    1442             :         } else {
    1443           0 :                 return false;
    1444             :         }
    1445             : }
    1446             : 
    1447             : bool
    1448           0 : ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
    1449             : {
    1450           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
    1451             : 
    1452           0 :         if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
    1453           0 :             ftl_nv_cache_full(nv_cache)) {
    1454           0 :                 return true;
    1455             :         }
    1456             : 
    1457           0 :         return false;
    1458             : }
    1459             : 
    1460             : static void
    1461           0 : chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
    1462             : {
    1463           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1464           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1465             : 
    1466           0 :         ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
    1467           0 :         p2l_map->chunk_map = NULL;
    1468             : 
    1469           0 :         ftl_chunk_free_md_entry(chunk);
    1470           0 : }
    1471             : 
    1472             : int
    1473           0 : ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
    1474             : {
    1475           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1476             :         struct ftl_nv_cache_chunk *chunk;
    1477           0 :         int status = 0;
    1478             :         uint64_t i;
    1479             : 
    1480           0 :         assert(nv_cache->chunk_open_count == 0);
    1481             : 
    1482           0 :         if (nv_cache->compaction_active_count) {
    1483           0 :                 FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n");
    1484           0 :                 return -EINVAL;
    1485             :         }
    1486             : 
    1487           0 :         chunk = nv_cache->chunks;
    1488           0 :         if (!chunk) {
    1489           0 :                 FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n");
    1490           0 :                 return -ENOMEM;
    1491             :         }
    1492             : 
    1493           0 :         for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
    1494           0 :                 nvc_validate_md(nv_cache, chunk->md);
    1495             : 
    1496           0 :                 if (chunk->md->read_pointer)  {
    1497             :                         /* Only full chunks can be compacted */
    1498           0 :                         if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
    1499           0 :                                 assert(0);
    1500             :                                 status = -EINVAL;
    1501             :                                 break;
    1502             :                         }
    1503             : 
    1504             :                         /*
    1505             :                          * Chunk in the middle of compaction, start over after
    1506             :                          * load
    1507             :                          */
    1508           0 :                         chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
    1509           0 :                 } else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
    1510             :                         /* Full chunk */
    1511           0 :                 } else if (0 == chunk->md->blocks_written) {
    1512             :                         /* Empty chunk */
    1513             :                 } else {
    1514           0 :                         assert(0);
    1515             :                         status = -EINVAL;
    1516             :                         break;
    1517             :                 }
    1518             :         }
    1519             : 
    1520           0 :         if (status) {
    1521           0 :                 FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache"
    1522             :                            "metadata\n");
    1523             :         }
    1524             : 
    1525           0 :         return status;
    1526             : }
    1527             : 
    1528             : static int
    1529           0 : sort_chunks_cmp(const void *a, const void *b)
    1530             : {
    1531           0 :         struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
    1532           0 :         struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
    1533             : 
    1534           0 :         return a_chunk->md->seq_id - b_chunk->md->seq_id;
    1535             : }
    1536             : 
    1537             : static int
    1538           0 : sort_chunks(struct ftl_nv_cache *nv_cache)
    1539             : {
    1540             :         struct ftl_nv_cache_chunk **chunks_list;
    1541             :         struct ftl_nv_cache_chunk *chunk;
    1542             :         uint32_t i;
    1543             : 
    1544           0 :         if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
    1545           0 :                 return 0;
    1546             :         }
    1547             : 
    1548           0 :         chunks_list = calloc(nv_cache->chunk_full_count,
    1549             :                              sizeof(chunks_list[0]));
    1550           0 :         if (!chunks_list) {
    1551           0 :                 return -ENOMEM;
    1552             :         }
    1553             : 
    1554           0 :         i = 0;
    1555           0 :         TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
    1556           0 :                 chunks_list[i] = chunk;
    1557           0 :                 i++;
    1558             :         }
    1559           0 :         assert(i == nv_cache->chunk_full_count);
    1560             : 
    1561           0 :         qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
    1562             :               sort_chunks_cmp);
    1563             : 
    1564           0 :         TAILQ_INIT(&nv_cache->chunk_full_list);
    1565           0 :         for (i = 0; i < nv_cache->chunk_full_count; i++) {
    1566           0 :                 chunk = chunks_list[i];
    1567           0 :                 TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
    1568             :         }
    1569             : 
    1570           0 :         free(chunks_list);
    1571           0 :         return 0;
    1572             : }
    1573             : 
    1574             : static int
    1575           0 : chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
    1576             : {
    1577           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1578           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1579             : 
    1580           0 :         assert(p2l_map->ref_cnt == 0);
    1581           0 :         assert(p2l_map->chunk_map == NULL);
    1582             : 
    1583           0 :         p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
    1584             : 
    1585           0 :         if (!p2l_map->chunk_map) {
    1586           0 :                 return -ENOMEM;
    1587             :         }
    1588             : 
    1589           0 :         if (ftl_chunk_alloc_md_entry(chunk)) {
    1590           0 :                 ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
    1591           0 :                 p2l_map->chunk_map = NULL;
    1592           0 :                 return -ENOMEM;
    1593             :         }
    1594             : 
    1595             :         /* Set the P2L to FTL_LBA_INVALID */
    1596           0 :         memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
    1597             : 
    1598           0 :         return 0;
    1599             : }
    1600             : 
    1601             : int
    1602           0 : ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
    1603             : {
    1604           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1605             :         struct ftl_nv_cache_chunk *chunk;
    1606             :         uint64_t chunks_number, offset, i;
    1607           0 :         int status = 0;
    1608             :         bool active;
    1609             : 
    1610           0 :         nv_cache->chunk_current = NULL;
    1611           0 :         TAILQ_INIT(&nv_cache->chunk_free_list);
    1612           0 :         TAILQ_INIT(&nv_cache->chunk_full_list);
    1613           0 :         TAILQ_INIT(&nv_cache->chunk_inactive_list);
    1614           0 :         nv_cache->chunk_full_count = 0;
    1615           0 :         nv_cache->chunk_free_count = 0;
    1616           0 :         nv_cache->chunk_inactive_count = 0;
    1617             : 
    1618           0 :         assert(nv_cache->chunk_open_count == 0);
    1619           0 :         offset = nvc_data_offset(nv_cache);
    1620           0 :         if (!nv_cache->chunks) {
    1621           0 :                 FTL_ERRLOG(dev, "No NV cache metadata\n");
    1622           0 :                 return -1;
    1623             :         }
    1624             : 
    1625           0 :         if (dev->sb->upgrade_ready) {
    1626             :                 /*
    1627             :                  * During upgrade some transitions are allowed:
    1628             :                  *
    1629             :                  * 1. FREE -> INACTIVE
    1630             :                  * 2. INACTIVE -> FREE
    1631             :                  */
    1632           0 :                 chunk = nv_cache->chunks;
    1633           0 :                 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
    1634           0 :                         active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
    1635             : 
    1636           0 :                         if (chunk->md->state == FTL_CHUNK_STATE_FREE) {
    1637           0 :                                 if (!active) {
    1638           0 :                                         chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
    1639             :                                 }
    1640           0 :                         } else if (chunk->md->state == FTL_CHUNK_STATE_INACTIVE) {
    1641           0 :                                 if (active) {
    1642           0 :                                         chunk->md->state = FTL_CHUNK_STATE_FREE;
    1643             :                                 }
    1644             :                         }
    1645             :                 }
    1646             :         }
    1647             : 
    1648           0 :         chunk = nv_cache->chunks;
    1649           0 :         for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
    1650           0 :                 chunk->nv_cache = nv_cache;
    1651           0 :                 nvc_validate_md(nv_cache, chunk->md);
    1652             : 
    1653           0 :                 if (offset != chunk->offset) {
    1654           0 :                         status = -EINVAL;
    1655           0 :                         goto error;
    1656             :                 }
    1657             : 
    1658           0 :                 if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
    1659           0 :                         status = -EINVAL;
    1660           0 :                         goto error;
    1661             :                 }
    1662             : 
    1663           0 :                 active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
    1664           0 :                 if (false == active) {
    1665           0 :                         if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
    1666           0 :                                 status = -EINVAL;
    1667           0 :                                 goto error;
    1668             :                         }
    1669             :                 }
    1670             : 
    1671           0 :                 switch (chunk->md->state) {
    1672           0 :                 case FTL_CHUNK_STATE_FREE:
    1673           0 :                         if (chunk->md->blocks_written || chunk->md->write_pointer) {
    1674           0 :                                 status = -EINVAL;
    1675           0 :                                 goto error;
    1676             :                         }
    1677             :                         /* Chunk empty, move it on empty list */
    1678           0 :                         TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
    1679           0 :                         nv_cache->chunk_free_count++;
    1680           0 :                         break;
    1681           0 :                 case FTL_CHUNK_STATE_OPEN:
    1682             :                         /* All chunks needs to closed at this point */
    1683           0 :                         status = -EINVAL;
    1684           0 :                         goto error;
    1685             :                         break;
    1686           0 :                 case FTL_CHUNK_STATE_CLOSED:
    1687           0 :                         if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
    1688           0 :                                 status = -EINVAL;
    1689           0 :                                 goto error;
    1690             :                         }
    1691             :                         /* Chunk full, move it on full list */
    1692           0 :                         TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
    1693           0 :                         nv_cache->chunk_full_count++;
    1694           0 :                         break;
    1695           0 :                 case FTL_CHUNK_STATE_INACTIVE:
    1696           0 :                         TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
    1697           0 :                         nv_cache->chunk_inactive_count++;
    1698           0 :                         break;
    1699           0 :                 default:
    1700           0 :                         status = -EINVAL;
    1701           0 :                         FTL_ERRLOG(dev, "Invalid chunk state\n");
    1702           0 :                         goto error;
    1703             :                 }
    1704             : 
    1705           0 :                 offset += nv_cache->chunk_blocks;
    1706             :         }
    1707             : 
    1708           0 :         chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count +
    1709           0 :                         nv_cache->chunk_inactive_count;
    1710           0 :         assert(nv_cache->chunk_current == NULL);
    1711             : 
    1712           0 :         if (chunks_number != nv_cache->chunk_count) {
    1713           0 :                 FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n");
    1714           0 :                 status = -EINVAL;
    1715           0 :                 goto error;
    1716             :         }
    1717             : 
    1718           0 :         status = sort_chunks(nv_cache);
    1719           0 :         if (status) {
    1720           0 :                 FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
    1721             :         }
    1722             : 
    1723           0 :         FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
    1724             :                       nv_cache->chunk_full_count, nv_cache->chunk_free_count);
    1725             : 
    1726           0 :         if (0 == status) {
    1727           0 :                 FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n");
    1728             :         } else {
    1729           0 :                 FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n");
    1730             :         }
    1731             : 
    1732             :         /* The number of active/inactive chunks calculated at initialization can change at this point due to metadata
    1733             :          * upgrade. Recalculate the thresholds that depend on active chunk count.
    1734             :          */
    1735           0 :         ftl_nv_cache_init_update_limits(dev);
    1736           0 : error:
    1737           0 :         return status;
    1738             : }
    1739             : 
    1740             : void
    1741           0 : ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
    1742             :                             uint64_t *close_seq_id)
    1743             : {
    1744           0 :         uint64_t i, o_seq_id = 0, c_seq_id = 0;
    1745             :         struct ftl_nv_cache_chunk *chunk;
    1746             : 
    1747           0 :         chunk = nv_cache->chunks;
    1748           0 :         assert(chunk);
    1749             : 
    1750             :         /* Iterate over chunks and get their max open and close seq id */
    1751           0 :         for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
    1752           0 :                 o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
    1753           0 :                 c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
    1754             :         }
    1755             : 
    1756           0 :         *open_seq_id = o_seq_id;
    1757           0 :         *close_seq_id = c_seq_id;
    1758           0 : }
    1759             : 
    1760             : typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
    1761             : 
    1762             : static void
    1763           0 : write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
    1764             : {
    1765           0 :         struct ftl_basic_rq *brq = arg;
    1766           0 :         struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
    1767             : 
    1768           0 :         ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
    1769             : 
    1770           0 :         brq->success = success;
    1771           0 :         if (spdk_likely(success)) {
    1772           0 :                 chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
    1773             :         }
    1774             : 
    1775           0 :         spdk_bdev_free_io(bdev_io);
    1776           0 :         brq->owner.cb(brq);
    1777           0 : }
    1778             : 
    1779             : static void
    1780           0 : _ftl_chunk_basic_rq_write(void *_brq)
    1781             : {
    1782           0 :         struct ftl_basic_rq *brq = _brq;
    1783           0 :         struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
    1784           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1785             :         int rc;
    1786             : 
    1787           0 :         rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
    1788             :                         brq->io_payload, NULL, brq->io.addr,
    1789             :                         brq->num_blocks, write_brq_end, brq);
    1790           0 :         if (spdk_unlikely(rc)) {
    1791           0 :                 if (rc == -ENOMEM) {
    1792           0 :                         struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
    1793           0 :                         brq->io.bdev_io_wait.bdev = bdev;
    1794           0 :                         brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write;
    1795           0 :                         brq->io.bdev_io_wait.cb_arg = brq;
    1796           0 :                         spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
    1797             :                 } else {
    1798           0 :                         ftl_abort();
    1799             :                 }
    1800             :         }
    1801           0 : }
    1802             : 
    1803             : static void
    1804           0 : ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
    1805             : {
    1806           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1807           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1808             : 
    1809           0 :         brq->io.chunk = chunk;
    1810           0 :         brq->success = false;
    1811             : 
    1812           0 :         _ftl_chunk_basic_rq_write(brq);
    1813             : 
    1814           0 :         chunk->md->write_pointer += brq->num_blocks;
    1815           0 :         dev->stats.io_activity_total += brq->num_blocks;
    1816           0 : }
    1817             : 
    1818             : static void
    1819           0 : read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
    1820             : {
    1821           0 :         struct ftl_basic_rq *brq = arg;
    1822             : 
    1823           0 :         ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
    1824             : 
    1825           0 :         brq->success = success;
    1826             : 
    1827           0 :         brq->owner.cb(brq);
    1828           0 :         spdk_bdev_free_io(bdev_io);
    1829           0 : }
    1830             : 
    1831             : static int
    1832           0 : ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
    1833             : {
    1834           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1835           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1836             :         int rc;
    1837             : 
    1838           0 :         brq->io.chunk = chunk;
    1839           0 :         brq->success = false;
    1840             : 
    1841           0 :         rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
    1842             :                         brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
    1843             : 
    1844           0 :         if (spdk_likely(!rc)) {
    1845           0 :                 dev->stats.io_activity_total += brq->num_blocks;
    1846             :         }
    1847             : 
    1848           0 :         return rc;
    1849             : }
    1850             : 
    1851             : static void
    1852           0 : chunk_open_cb(int status, void *ctx)
    1853             : {
    1854           0 :         struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
    1855             : 
    1856           0 :         if (spdk_unlikely(status)) {
    1857             : #ifdef SPDK_FTL_RETRY_ON_ERROR
    1858             :                 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
    1859             :                 return;
    1860             : #else
    1861           0 :                 ftl_abort();
    1862             : #endif
    1863             :         }
    1864             : 
    1865           0 :         chunk->md->state = FTL_CHUNK_STATE_OPEN;
    1866           0 : }
    1867             : 
    1868             : static void
    1869           0 : ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
    1870             : {
    1871           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1872           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1873           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
    1874           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
    1875             : 
    1876           0 :         if (chunk_alloc_p2l_map(chunk)) {
    1877           0 :                 assert(0);
    1878             :                 /*
    1879             :                  * We control number of opening chunk and it shall be consistent with size of chunk
    1880             :                  * P2L map pool
    1881             :                  */
    1882             :                 ftl_abort();
    1883             :                 return;
    1884             :         }
    1885             : 
    1886           0 :         chunk->nv_cache->chunk_open_count++;
    1887             : 
    1888           0 :         assert(chunk->md->write_pointer == 0);
    1889           0 :         assert(chunk->md->blocks_written == 0);
    1890             : 
    1891           0 :         memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
    1892           0 :         p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN;
    1893           0 :         p2l_map->chunk_dma_md->p2l_map_checksum = 0;
    1894             : 
    1895           0 :         ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md,
    1896             :                                NULL, chunk_open_cb, chunk,
    1897             :                                &chunk->md_persist_entry_ctx);
    1898             : }
    1899             : 
    1900             : static void
    1901           0 : chunk_close_cb(int status, void *ctx)
    1902             : {
    1903           0 :         struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
    1904             : 
    1905           0 :         assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
    1906             : 
    1907           0 :         if (spdk_likely(!status)) {
    1908           0 :                 chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
    1909           0 :                 chunk_free_p2l_map(chunk);
    1910             : 
    1911           0 :                 assert(chunk->nv_cache->chunk_open_count > 0);
    1912           0 :                 chunk->nv_cache->chunk_open_count--;
    1913             : 
    1914             :                 /* Chunk full move it on full list */
    1915           0 :                 TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
    1916           0 :                 chunk->nv_cache->chunk_full_count++;
    1917             : 
    1918           0 :                 chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
    1919             : 
    1920           0 :                 chunk->md->state = FTL_CHUNK_STATE_CLOSED;
    1921             :         } else {
    1922             : #ifdef SPDK_FTL_RETRY_ON_ERROR
    1923             :                 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
    1924             : #else
    1925           0 :                 ftl_abort();
    1926             : #endif
    1927             :         }
    1928           0 : }
    1929             : 
    1930             : static void
    1931           0 : chunk_map_write_cb(struct ftl_basic_rq *brq)
    1932             : {
    1933           0 :         struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
    1934           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1935           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1936           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
    1937           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
    1938             :         uint32_t chunk_map_crc;
    1939             : 
    1940           0 :         if (spdk_likely(brq->success)) {
    1941           0 :                 chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
    1942           0 :                                                    chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
    1943           0 :                 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
    1944           0 :                 p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
    1945           0 :                 p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
    1946           0 :                 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk->p2l_map.chunk_dma_md,
    1947             :                                        NULL, chunk_close_cb, chunk,
    1948             :                                        &chunk->md_persist_entry_ctx);
    1949             :         } else {
    1950             : #ifdef SPDK_FTL_RETRY_ON_ERROR
    1951             :                 /* retry */
    1952             :                 chunk->md->write_pointer -= brq->num_blocks;
    1953             :                 ftl_chunk_basic_rq_write(chunk, brq);
    1954             : #else
    1955           0 :                 ftl_abort();
    1956             : #endif
    1957             :         }
    1958           0 : }
    1959             : 
    1960             : static void
    1961           0 : ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
    1962             : {
    1963           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1964           0 :         struct ftl_basic_rq *brq = &chunk->metadata_rq;
    1965           0 :         void *metadata = chunk->p2l_map.chunk_map;
    1966             : 
    1967           0 :         chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
    1968           0 :         ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
    1969           0 :         ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
    1970             : 
    1971           0 :         assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
    1972           0 :         brq->io.addr = chunk->offset + chunk->md->write_pointer;
    1973             : 
    1974           0 :         ftl_chunk_basic_rq_write(chunk, brq);
    1975           0 : }
    1976             : 
    1977             : static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
    1978             :                                   void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
    1979             : static void read_tail_md_cb(struct ftl_basic_rq *brq);
    1980             : static void recover_open_chunk_cb(struct ftl_basic_rq *brq);
    1981             : 
    1982             : static void
    1983           0 : restore_chunk_close_cb(int status, void *ctx)
    1984             : {
    1985           0 :         struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
    1986           0 :         struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
    1987           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1988             : 
    1989           0 :         if (spdk_unlikely(status)) {
    1990           0 :                 parent->success = false;
    1991             :         } else {
    1992           0 :                 chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
    1993           0 :                 chunk->md->state = FTL_CHUNK_STATE_CLOSED;
    1994             :         }
    1995             : 
    1996           0 :         read_tail_md_cb(parent);
    1997           0 : }
    1998             : 
    1999             : static void
    2000           0 : restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
    2001             : {
    2002           0 :         struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
    2003           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    2004           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    2005           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
    2006           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
    2007             :         uint32_t chunk_map_crc;
    2008             : 
    2009             :         /* Set original callback */
    2010           0 :         ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);
    2011             : 
    2012           0 :         if (spdk_unlikely(!parent->success)) {
    2013           0 :                 read_tail_md_cb(parent);
    2014           0 :                 return;
    2015             :         }
    2016             : 
    2017           0 :         chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
    2018           0 :                                            chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
    2019           0 :         memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
    2020           0 :         p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
    2021           0 :         p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
    2022           0 :         p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
    2023           0 :         p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
    2024             : 
    2025           0 :         ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
    2026             :                                restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
    2027             : }
    2028             : 
    2029             : static void
    2030           0 : restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
    2031             : {
    2032           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    2033             :         void *metadata;
    2034             : 
    2035           0 :         chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
    2036             : 
    2037           0 :         metadata = chunk->p2l_map.chunk_map;
    2038           0 :         ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
    2039           0 :         ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);
    2040             : 
    2041           0 :         parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
    2042           0 :         parent->io.chunk = chunk;
    2043             : 
    2044           0 :         ftl_chunk_basic_rq_write(chunk, parent);
    2045           0 : }
    2046             : 
    2047             : static void
    2048           0 : read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
    2049             : {
    2050           0 :         struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
    2051           0 :         struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
    2052           0 :         struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
    2053           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    2054           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    2055             :         union ftl_md_vss *md;
    2056           0 :         uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
    2057           0 :         uint64_t len = bdev_io->u.bdev.num_blocks;
    2058           0 :         ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
    2059             :         int rc;
    2060             : 
    2061           0 :         ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
    2062             : 
    2063           0 :         spdk_bdev_free_io(bdev_io);
    2064             : 
    2065           0 :         if (!success) {
    2066           0 :                 parent->success = false;
    2067           0 :                 read_tail_md_cb(parent);
    2068           0 :                 return;
    2069             :         }
    2070             : 
    2071           0 :         while (rq->iter.idx < rq->iter.count) {
    2072             :                 /* Get metadata */
    2073           0 :                 md = rq->entries[rq->iter.idx].io_md;
    2074           0 :                 if (md->nv_cache.seq_id != chunk->md->seq_id) {
    2075           0 :                         md->nv_cache.lba = FTL_LBA_INVALID;
    2076             :                 }
    2077             :                 /*
    2078             :                  * The p2l map contains effectively random data at this point (since it contains arbitrary
    2079             :                  * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
    2080             :                  */
    2081             : 
    2082           0 :                 ftl_chunk_set_addr(chunk,  md->nv_cache.lba, addr + rq->iter.idx);
    2083           0 :                 rq->iter.idx++;
    2084             :         }
    2085             : 
    2086           0 :         if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
    2087           0 :                 cache_offset += len;
    2088           0 :                 len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
    2089           0 :                 rq->iter.idx = 0;
    2090           0 :                 rq->iter.count = len;
    2091             : 
    2092           0 :                 rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
    2093             :                                 nv_cache->cache_ioch,
    2094             :                                 rq->io_payload,
    2095             :                                 rq->io_md,
    2096             :                                 cache_offset, len,
    2097             :                                 read_open_chunk_cb,
    2098             :                                 rq);
    2099             : 
    2100           0 :                 if (rc) {
    2101           0 :                         ftl_rq_del(rq);
    2102           0 :                         parent->success = false;
    2103           0 :                         read_tail_md_cb(parent);
    2104           0 :                         return;
    2105             :                 }
    2106             :         } else {
    2107           0 :                 ftl_rq_del(rq);
    2108           0 :                 restore_fill_tail_md(parent, chunk);
    2109             :         }
    2110             : }
    2111             : 
    2112             : static void
    2113           0 : restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
    2114             : {
    2115           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    2116           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    2117             :         struct ftl_rq *rq;
    2118             :         uint64_t addr;
    2119           0 :         uint64_t len = dev->xfer_size;
    2120             :         int rc;
    2121             : 
    2122             :         /*
    2123             :          * We've just read the p2l map, prefill it with INVALID LBA
    2124             :          * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
    2125             :          */
    2126           0 :         memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
    2127             : 
    2128             :         /* Need to read user data, recalculate chunk's P2L and write tail md with it */
    2129           0 :         rq = ftl_rq_new(dev, dev->nv_cache.md_size);
    2130           0 :         if (!rq) {
    2131           0 :                 parent->success = false;
    2132           0 :                 read_tail_md_cb(parent);
    2133           0 :                 return;
    2134             :         }
    2135             : 
    2136           0 :         rq->owner.priv = parent;
    2137           0 :         rq->iter.idx = 0;
    2138           0 :         rq->iter.count = len;
    2139             : 
    2140           0 :         addr = chunk->offset;
    2141             : 
    2142           0 :         len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);
    2143             : 
    2144           0 :         rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
    2145             :                         nv_cache->cache_ioch,
    2146             :                         rq->io_payload,
    2147             :                         rq->io_md,
    2148             :                         addr, len,
    2149             :                         read_open_chunk_cb,
    2150             :                         rq);
    2151             : 
    2152           0 :         if (rc) {
    2153           0 :                 ftl_rq_del(rq);
    2154           0 :                 parent->success = false;
    2155           0 :                 read_tail_md_cb(parent);
    2156             :         }
    2157             : }
    2158             : 
    2159             : static void
    2160           0 : read_tail_md_cb(struct ftl_basic_rq *brq)
    2161             : {
    2162           0 :         brq->owner.cb(brq);
    2163           0 : }
    2164             : 
    2165             : static int
    2166           0 : ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
    2167             :                        void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
    2168             : {
    2169           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    2170             :         void *metadata;
    2171             :         int rc;
    2172             : 
    2173           0 :         metadata = chunk->p2l_map.chunk_map;
    2174           0 :         ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
    2175           0 :         ftl_basic_rq_set_owner(brq, cb, cb_ctx);
    2176             : 
    2177           0 :         brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
    2178           0 :         rc = ftl_chunk_basic_rq_read(chunk, brq);
    2179             : 
    2180           0 :         return rc;
    2181             : }
    2182             : 
    2183             : struct restore_chunk_md_ctx {
    2184             :         ftl_chunk_md_cb cb;
    2185             :         void *cb_ctx;
    2186             :         int status;
    2187             :         uint64_t qd;
    2188             :         uint64_t id;
    2189             : };
    2190             : 
    2191             : static inline bool
    2192           0 : is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
    2193             : {
    2194           0 :         uint64_t chunk_count = 0;
    2195             : 
    2196           0 :         chunk_count += nv_cache->chunk_open_count;
    2197           0 :         chunk_count += nv_cache->chunk_free_count;
    2198           0 :         chunk_count += nv_cache->chunk_full_count;
    2199           0 :         chunk_count += nv_cache->chunk_comp_count;
    2200           0 :         chunk_count += nv_cache->chunk_inactive_count;
    2201             : 
    2202           0 :         return chunk_count == nv_cache->chunk_count;
    2203             : }
    2204             : 
    2205             : static void
    2206           0 : walk_tail_md_cb(struct ftl_basic_rq *brq)
    2207             : {
    2208           0 :         struct ftl_mngt_process *mngt = brq->owner.priv;
    2209           0 :         struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
    2210           0 :         struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
    2211           0 :         int rc = 0;
    2212             : 
    2213           0 :         if (brq->success) {
    2214           0 :                 rc = ctx->cb(chunk, ctx->cb_ctx);
    2215             :         } else {
    2216           0 :                 rc = -EIO;
    2217             :         }
    2218             : 
    2219           0 :         if (rc) {
    2220           0 :                 ctx->status = rc;
    2221             :         }
    2222           0 :         ctx->qd--;
    2223           0 :         chunk_free_p2l_map(chunk);
    2224           0 :         ftl_mngt_continue_step(mngt);
    2225           0 : }
    2226             : 
    2227             : static void
    2228           0 : ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
    2229             :                                uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
    2230             : {
    2231           0 :         struct ftl_nv_cache *nvc = &dev->nv_cache;
    2232             :         struct restore_chunk_md_ctx *ctx;
    2233             : 
    2234           0 :         ctx = ftl_mngt_get_step_ctx(mngt);
    2235           0 :         if (!ctx) {
    2236           0 :                 if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
    2237           0 :                         ftl_mngt_fail_step(mngt);
    2238           0 :                         return;
    2239             :                 }
    2240           0 :                 ctx = ftl_mngt_get_step_ctx(mngt);
    2241           0 :                 assert(ctx);
    2242             : 
    2243           0 :                 ctx->cb = cb;
    2244           0 :                 ctx->cb_ctx = cb_ctx;
    2245             :         }
    2246             : 
    2247             :         /*
    2248             :          * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
    2249             :          * are processed before returning an error (if any were found) or continuing on.
    2250             :          */
    2251           0 :         if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
    2252           0 :                 if (!is_chunk_count_valid(nvc)) {
    2253           0 :                         FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
    2254           0 :                         assert(false);
    2255             :                         ctx->status = -EINVAL;
    2256             :                 }
    2257             : 
    2258           0 :                 if (ctx->status) {
    2259           0 :                         ftl_mngt_fail_step(mngt);
    2260             :                 } else {
    2261           0 :                         ftl_mngt_next_step(mngt);
    2262             :                 }
    2263           0 :                 return;
    2264             :         }
    2265             : 
    2266           0 :         while (ctx->id < nvc->chunk_count) {
    2267           0 :                 struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
    2268             :                 int rc;
    2269             : 
    2270           0 :                 if (!chunk->recovery) {
    2271             :                         /* This chunk is inactive or empty and not used in recovery */
    2272           0 :                         ctx->id++;
    2273           0 :                         continue;
    2274             :                 }
    2275             : 
    2276           0 :                 if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
    2277           0 :                         ctx->id++;
    2278           0 :                         continue;
    2279             :                 }
    2280             : 
    2281           0 :                 if (chunk_alloc_p2l_map(chunk)) {
    2282             :                         /* No more free P2L map, break and continue later */
    2283           0 :                         break;
    2284             :                 }
    2285           0 :                 ctx->id++;
    2286             : 
    2287           0 :                 rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
    2288             : 
    2289           0 :                 if (0 == rc) {
    2290           0 :                         ctx->qd++;
    2291             :                 } else {
    2292           0 :                         chunk_free_p2l_map(chunk);
    2293           0 :                         ctx->status = rc;
    2294             :                 }
    2295             :         }
    2296             : 
    2297           0 :         if (0 == ctx->qd) {
    2298             :                 /*
    2299             :                  * No QD could happen due to all leftover chunks being in free state.
    2300             :                  * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
    2301             :                  * For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
    2302             :                  * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
    2303             :                  */
    2304           0 :                 ftl_mngt_continue_step(mngt);
    2305             :         }
    2306             : 
    2307             : }
    2308             : 
    2309             : void
    2310           0 : ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
    2311             :                               ftl_chunk_md_cb cb, void *cb_ctx)
    2312             : {
    2313           0 :         ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
    2314           0 : }
    2315             : 
    2316             : static void
    2317           0 : restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
    2318             : {
    2319           0 :         struct ftl_mngt_process *mngt = md->owner.cb_ctx;
    2320           0 :         struct ftl_nv_cache *nvc = &dev->nv_cache;
    2321             :         struct ftl_nv_cache_chunk *chunk;
    2322             :         uint64_t i;
    2323             : 
    2324           0 :         if (status) {
    2325             :                 /* Restore error, end step */
    2326           0 :                 ftl_mngt_fail_step(mngt);
    2327           0 :                 return;
    2328             :         }
    2329             : 
    2330           0 :         for (i = 0; i < nvc->chunk_count; i++) {
    2331           0 :                 chunk = &nvc->chunks[i];
    2332             : 
    2333           0 :                 if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk->offset) &&
    2334           0 :                     chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
    2335           0 :                         status = -EINVAL;
    2336           0 :                         break;
    2337             :                 }
    2338             : 
    2339           0 :                 if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
    2340           0 :                         status = -EINVAL;
    2341           0 :                         break;
    2342             :                 }
    2343             : 
    2344           0 :                 switch (chunk->md->state) {
    2345           0 :                 case FTL_CHUNK_STATE_FREE:
    2346           0 :                         break;
    2347           0 :                 case FTL_CHUNK_STATE_OPEN:
    2348           0 :                         TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
    2349           0 :                         nvc->chunk_free_count--;
    2350             : 
    2351           0 :                         TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
    2352           0 :                         nvc->chunk_open_count++;
    2353             : 
    2354             :                         /* Chunk is not empty, mark it to be recovered */
    2355           0 :                         chunk->recovery = true;
    2356           0 :                         break;
    2357           0 :                 case FTL_CHUNK_STATE_CLOSED:
    2358           0 :                         TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
    2359           0 :                         nvc->chunk_free_count--;
    2360             : 
    2361           0 :                         TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
    2362           0 :                         nvc->chunk_full_count++;
    2363             : 
    2364             :                         /* Chunk is not empty, mark it to be recovered */
    2365           0 :                         chunk->recovery = true;
    2366           0 :                         break;
    2367           0 :                 case FTL_CHUNK_STATE_INACTIVE:
    2368           0 :                         break;
    2369           0 :                 default:
    2370           0 :                         status = -EINVAL;
    2371             :                 }
    2372             :         }
    2373             : 
    2374           0 :         if (status) {
    2375           0 :                 ftl_mngt_fail_step(mngt);
    2376             :         } else {
    2377           0 :                 ftl_mngt_next_step(mngt);
    2378             :         }
    2379             : }
    2380             : 
    2381             : void
    2382           0 : ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
    2383             : {
    2384           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
    2385             : 
    2386           0 :         md->owner.cb_ctx = mngt;
    2387           0 :         md->cb = restore_chunk_state_cb;
    2388           0 :         ftl_md_restore(md);
    2389           0 : }
    2390             : 
    2391             : static void
    2392           0 : recover_open_chunk_cb(struct ftl_basic_rq *brq)
    2393             : {
    2394           0 :         struct ftl_mngt_process *mngt = brq->owner.priv;
    2395           0 :         struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
    2396           0 :         struct ftl_nv_cache *nvc = chunk->nv_cache;
    2397           0 :         struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
    2398             : 
    2399           0 :         chunk_free_p2l_map(chunk);
    2400             : 
    2401           0 :         if (!brq->success) {
    2402           0 :                 FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
    2403             :                            chunk->md->seq_id);
    2404           0 :                 ftl_mngt_fail_step(mngt);
    2405           0 :                 return;
    2406             :         }
    2407             : 
    2408           0 :         FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
    2409             :                       chunk->md->seq_id);
    2410             : 
    2411           0 :         TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
    2412           0 :         nvc->chunk_open_count--;
    2413             : 
    2414           0 :         TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
    2415           0 :         nvc->chunk_full_count++;
    2416             : 
    2417             :         /* This is closed chunk */
    2418           0 :         chunk->md->write_pointer = nvc->chunk_blocks;
    2419           0 :         chunk->md->blocks_written = nvc->chunk_blocks;
    2420             : 
    2421           0 :         ftl_mngt_continue_step(mngt);
    2422             : }
    2423             : 
    2424             : void
    2425           0 : ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
    2426             : {
    2427           0 :         struct ftl_nv_cache *nvc = &dev->nv_cache;
    2428             :         struct ftl_nv_cache_chunk *chunk;
    2429           0 :         struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
    2430             : 
    2431           0 :         if (!brq) {
    2432           0 :                 if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
    2433           0 :                         FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
    2434           0 :                         ftl_mngt_next_step(mngt);
    2435           0 :                         return;
    2436             :                 }
    2437             : 
    2438           0 :                 if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
    2439           0 :                         ftl_mngt_fail_step(mngt);
    2440           0 :                         return;
    2441             :                 }
    2442           0 :                 brq = ftl_mngt_get_step_ctx(mngt);
    2443           0 :                 ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
    2444             :         }
    2445             : 
    2446           0 :         if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
    2447           0 :                 if (!is_chunk_count_valid(nvc)) {
    2448           0 :                         FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
    2449           0 :                         ftl_mngt_fail_step(mngt);
    2450           0 :                         return;
    2451             :                 }
    2452             : 
    2453             :                 /*
    2454             :                  * Now all chunks loaded and closed, do final step of restoring
    2455             :                  * chunks state
    2456             :                  */
    2457           0 :                 if (ftl_nv_cache_load_state(nvc)) {
    2458           0 :                         ftl_mngt_fail_step(mngt);
    2459             :                 } else {
    2460           0 :                         ftl_mngt_next_step(mngt);
    2461             :                 }
    2462             :         } else {
    2463           0 :                 chunk = TAILQ_FIRST(&nvc->chunk_open_list);
    2464           0 :                 if (chunk_alloc_p2l_map(chunk)) {
    2465           0 :                         ftl_mngt_fail_step(mngt);
    2466           0 :                         return;
    2467             :                 }
    2468             : 
    2469           0 :                 brq->io.chunk = chunk;
    2470             : 
    2471           0 :                 FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
    2472             :                               chunk->offset, chunk->md->seq_id);
    2473           0 :                 restore_open_chunk(chunk, brq);
    2474             :         }
    2475             : }
    2476             : 
    2477             : int
    2478           0 : ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
    2479             : {
    2480             :         /* chunk_current is migrating to closed status when closing, any others should already be
    2481             :          * moved to free chunk list. Also need to wait for free md requests */
    2482           0 :         return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
    2483             : }
    2484             : 
    2485             : void
    2486           0 : ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
    2487             : {
    2488             :         struct ftl_nv_cache_chunk *chunk;
    2489             :         uint64_t free_space;
    2490             : 
    2491           0 :         nv_cache->halt = true;
    2492             : 
    2493             :         /* Set chunks on open list back to free state since no user data has been written to it */
    2494           0 :         while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
    2495           0 :                 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
    2496             : 
    2497             :                 /* Chunks are moved between lists on metadata update submission, but state is changed
    2498             :                  * on completion. Breaking early in such a case to make sure all the necessary resources
    2499             :                  * will be freed (during next pass(es) of ftl_nv_cache_halt).
    2500             :                  */
    2501           0 :                 if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
    2502           0 :                         break;
    2503             :                 }
    2504             : 
    2505           0 :                 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
    2506           0 :                 chunk_free_p2l_map(chunk);
    2507           0 :                 ftl_nv_cache_chunk_md_initialize(chunk->md);
    2508           0 :                 assert(nv_cache->chunk_open_count > 0);
    2509           0 :                 nv_cache->chunk_open_count--;
    2510             :         }
    2511             : 
    2512             :         /* Close current chunk by skipping all not written blocks */
    2513           0 :         chunk = nv_cache->chunk_current;
    2514           0 :         if (chunk != NULL) {
    2515           0 :                 nv_cache->chunk_current = NULL;
    2516           0 :                 if (chunk_is_closed(chunk)) {
    2517           0 :                         return;
    2518             :                 }
    2519             : 
    2520           0 :                 free_space = chunk_get_free_space(nv_cache, chunk);
    2521           0 :                 chunk->md->blocks_skipped = free_space;
    2522           0 :                 chunk->md->blocks_written += free_space;
    2523           0 :                 chunk->md->write_pointer += free_space;
    2524           0 :                 ftl_chunk_close(chunk);
    2525             :         }
    2526             : }
    2527             : 
    2528             : uint64_t
    2529           0 : ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
    2530             : {
    2531           0 :         struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
    2532             :         uint64_t seq_id, free_space;
    2533             : 
    2534           0 :         if (!chunk) {
    2535           0 :                 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
    2536           0 :                 if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
    2537           0 :                         return chunk->md->seq_id;
    2538             :                 } else {
    2539           0 :                         return 0;
    2540             :                 }
    2541             :         }
    2542             : 
    2543           0 :         if (chunk_is_closed(chunk)) {
    2544           0 :                 return 0;
    2545             :         }
    2546             : 
    2547           0 :         seq_id = nv_cache->chunk_current->md->seq_id;
    2548           0 :         free_space = chunk_get_free_space(nv_cache, chunk);
    2549             : 
    2550           0 :         chunk->md->blocks_skipped = free_space;
    2551           0 :         chunk->md->blocks_written += free_space;
    2552           0 :         chunk->md->write_pointer += free_space;
    2553           0 :         if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
    2554           0 :                 ftl_chunk_close(chunk);
    2555             :         }
    2556           0 :         nv_cache->chunk_current = NULL;
    2557             : 
    2558           0 :         seq_id++;
    2559           0 :         return seq_id;
    2560             : }
    2561             : 
    2562             : static double
    2563           0 : ftl_nv_cache_get_chunk_utilization(struct ftl_nv_cache *nv_cache,
    2564             :                                    struct ftl_nv_cache_chunk *chunk)
    2565             : {
    2566           0 :         double capacity = nv_cache->chunk_blocks;
    2567           0 :         double used = chunk->md->blocks_written + chunk->md->blocks_skipped;
    2568             : 
    2569           0 :         return used / capacity;
    2570             : }
    2571             : 
    2572             : static const char *
    2573           0 : ftl_nv_cache_get_chunk_state_name(struct ftl_nv_cache_chunk *chunk)
    2574             : {
    2575             :         static const char *names[] = {
    2576             :                 "FREE", "OPEN", "CLOSED", "INACTIVE"
    2577             :         };
    2578             : 
    2579           0 :         assert(chunk->md->state < SPDK_COUNTOF(names));
    2580           0 :         if (chunk->md->state < SPDK_COUNTOF(names)) {
    2581           0 :                 return names[chunk->md->state];
    2582             :         } else {
    2583           0 :                 assert(false);
    2584             :                 return "?";
    2585             :         }
    2586             : }
    2587             : 
    2588             : static void
    2589           0 : ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, const struct ftl_property *property,
    2590             :                             struct spdk_json_write_ctx *w)
    2591             : {
    2592             :         uint64_t i;
    2593             :         struct ftl_nv_cache_chunk *chunk;
    2594             : 
    2595           0 :         spdk_json_write_named_string(w, "type", dev->nv_cache.nvc_type->name);
    2596           0 :         spdk_json_write_named_array_begin(w, "chunks");
    2597           0 :         for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) {
    2598           0 :                 spdk_json_write_object_begin(w);
    2599           0 :                 spdk_json_write_named_uint64(w, "id", i);
    2600           0 :                 spdk_json_write_named_string(w, "state", ftl_nv_cache_get_chunk_state_name(chunk));
    2601           0 :                 spdk_json_write_named_double(w, "utilization",
    2602             :                                              ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk));
    2603           0 :                 spdk_json_write_object_end(w);
    2604             :         }
    2605           0 :         spdk_json_write_array_end(w);
    2606           0 : }
    2607             : 
    2608             : void
    2609           0 : ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md)
    2610             : {
    2611           0 :         memset(md, 0, sizeof(*md));
    2612           0 :         md->version = FTL_NVC_VERSION_CURRENT;
    2613           0 : }

Generated by: LCOV version 1.15