LCOV - code coverage report
Current view: top level - lib/ftl - ftl_nv_cache.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 0 1207 0.0 %
Date: 2024-07-12 19:28:37 Functions: 0 104 0.0 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2022 Intel Corporation.
       3             :  *   All rights reserved.
       4             :  */
       5             : 
       6             : 
       7             : #include "spdk/bdev.h"
       8             : #include "spdk/bdev_module.h"
       9             : #include "spdk/ftl.h"
      10             : #include "spdk/string.h"
      11             : 
      12             : #include "ftl_nv_cache.h"
      13             : #include "ftl_nv_cache_io.h"
      14             : #include "ftl_core.h"
      15             : #include "ftl_band.h"
      16             : #include "utils/ftl_addr_utils.h"
      17             : #include "mngt/ftl_mngt.h"
      18             : 
      19             : static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
      20             : static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev);
      21             : static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor);
      22             : static void compaction_process_ftl_done(struct ftl_rq *rq);
      23             : static void compaction_process_read_entry(void *arg);
      24             : static void ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev,
      25             :                                         const struct ftl_property *property,
      26             :                                         struct spdk_json_write_ctx *w);
      27             : 
      28             : static inline const struct ftl_layout_region *
      29           0 : nvc_data_region(struct ftl_nv_cache *nv_cache)
      30             : {
      31             :         struct spdk_ftl_dev *dev;
      32             : 
      33           0 :         dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
      34           0 :         return ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_DATA_NVC);
      35             : }
      36             : 
      37             : static inline void
      38           0 : nvc_validate_md(struct ftl_nv_cache *nv_cache,
      39             :                 struct ftl_nv_cache_chunk_md *chunk_md)
      40             : {
      41           0 :         struct ftl_md *md = nv_cache->md;
      42           0 :         void *buffer = ftl_md_get_buffer(md);
      43           0 :         uint64_t size = ftl_md_get_buffer_size(md);
      44           0 :         void *ptr = chunk_md;
      45             : 
      46           0 :         if (ptr < buffer) {
      47           0 :                 ftl_abort();
      48             :         }
      49             : 
      50           0 :         ptr += sizeof(*chunk_md);
      51           0 :         if (ptr > buffer + size) {
      52           0 :                 ftl_abort();
      53             :         }
      54           0 : }
      55             : 
      56             : static inline uint64_t
      57           0 : nvc_data_offset(struct ftl_nv_cache *nv_cache)
      58             : {
      59           0 :         return nvc_data_region(nv_cache)->current.offset;
      60             : }
      61             : 
      62             : static inline uint64_t
      63           0 : nvc_data_blocks(struct ftl_nv_cache *nv_cache)
      64             : {
      65           0 :         return nvc_data_region(nv_cache)->current.blocks;
      66             : }
      67             : 
      68             : size_t
      69           0 : ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
      70             : {
      71           0 :         struct spdk_ftl_dev *dev =  SPDK_CONTAINEROF(nv_cache,
      72             :                                     struct spdk_ftl_dev, nv_cache);
      73           0 :         return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
      74             :                                     FTL_BLOCK_SIZE);
      75             : }
      76             : 
      77             : static size_t
      78           0 : nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
      79             : {
      80             :         /* Map pool element holds the whole tail md */
      81           0 :         return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
      82             : }
      83             : 
      84             : static uint64_t
      85           0 : get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
      86             : {
      87           0 :         struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
      88             : 
      89           0 :         return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
      90             : }
      91             : 
      92             : int
      93           0 : ftl_nv_cache_init(struct spdk_ftl_dev *dev)
      94             : {
      95           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
      96             :         struct ftl_nv_cache_chunk *chunk;
      97             :         struct ftl_nv_cache_chunk_md *md;
      98             :         struct ftl_nv_cache_compactor *compactor;
      99             :         uint64_t i, offset;
     100             : 
     101           0 :         nv_cache->halt = true;
     102             : 
     103           0 :         nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
     104           0 :         if (!nv_cache->md) {
     105           0 :                 FTL_ERRLOG(dev, "No NV cache metadata object\n");
     106           0 :                 return -1;
     107             :         }
     108             : 
     109           0 :         nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
     110           0 :                                                nv_cache->md_size * dev->xfer_size,
     111             :                                                FTL_BLOCK_SIZE, SPDK_ENV_SOCKET_ID_ANY);
     112           0 :         if (!nv_cache->md_pool) {
     113           0 :                 FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
     114           0 :                 return -1;
     115             :         }
     116             : 
     117             :         /*
     118             :          * Initialize chunk info
     119             :          */
     120           0 :         nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
     121           0 :         nv_cache->chunk_count = dev->layout.nvc.chunk_count;
     122           0 :         nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
     123             : 
     124             :         /* Allocate chunks */
     125           0 :         nv_cache->chunks = calloc(nv_cache->chunk_count,
     126             :                                   sizeof(nv_cache->chunks[0]));
     127           0 :         if (!nv_cache->chunks) {
     128           0 :                 FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
     129           0 :                 return -1;
     130             :         }
     131             : 
     132           0 :         TAILQ_INIT(&nv_cache->chunk_free_list);
     133           0 :         TAILQ_INIT(&nv_cache->chunk_open_list);
     134           0 :         TAILQ_INIT(&nv_cache->chunk_full_list);
     135           0 :         TAILQ_INIT(&nv_cache->chunk_comp_list);
     136           0 :         TAILQ_INIT(&nv_cache->needs_free_persist_list);
     137             : 
     138             :         /* First chunk metadata */
     139           0 :         md = ftl_md_get_buffer(nv_cache->md);
     140           0 :         if (!md) {
     141           0 :                 FTL_ERRLOG(dev, "No NV cache metadata\n");
     142           0 :                 return -1;
     143             :         }
     144             : 
     145           0 :         nv_cache->chunk_free_count = nv_cache->chunk_count;
     146             : 
     147           0 :         chunk = nv_cache->chunks;
     148           0 :         offset = nvc_data_offset(nv_cache);
     149           0 :         for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
     150           0 :                 chunk->nv_cache = nv_cache;
     151           0 :                 chunk->md = md;
     152           0 :                 nvc_validate_md(nv_cache, md);
     153           0 :                 chunk->offset = offset;
     154           0 :                 offset += nv_cache->chunk_blocks;
     155           0 :                 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
     156             :         }
     157           0 :         assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
     158             : 
     159             :         /* Start compaction when full chunks exceed given % of entire chunks */
     160           0 :         nv_cache->chunk_compaction_threshold = nv_cache->chunk_count *
     161           0 :                                                dev->conf.nv_cache.chunk_compaction_threshold / 100;
     162           0 :         TAILQ_INIT(&nv_cache->compactor_list);
     163           0 :         for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) {
     164           0 :                 compactor = compactor_alloc(dev);
     165             : 
     166           0 :                 if (!compactor) {
     167           0 :                         FTL_ERRLOG(dev, "Cannot allocate compaction process\n");
     168           0 :                         return -1;
     169             :                 }
     170             : 
     171           0 :                 TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
     172             :         }
     173             : 
     174             : #define FTL_MAX_OPEN_CHUNKS 2
     175           0 :         nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
     176             :                                                 nv_cache_p2l_map_pool_elem_size(nv_cache),
     177             :                                                 FTL_BLOCK_SIZE,
     178             :                                                 SPDK_ENV_SOCKET_ID_ANY);
     179           0 :         if (!nv_cache->p2l_pool) {
     180           0 :                 return -ENOMEM;
     181             :         }
     182             : 
     183             :         /* One entry per open chunk */
     184           0 :         nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
     185             :                                   sizeof(struct ftl_nv_cache_chunk_md),
     186             :                                   FTL_BLOCK_SIZE,
     187             :                                   SPDK_ENV_SOCKET_ID_ANY);
     188           0 :         if (!nv_cache->chunk_md_pool) {
     189           0 :                 return -ENOMEM;
     190             :         }
     191             : 
     192             :         /* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
     193             :          * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely
     194             :          * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates
     195             :          * to free state at once) */
     196           0 :         nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
     197             :                                        sizeof(struct ftl_nv_cache_chunk_md),
     198             :                                        FTL_BLOCK_SIZE,
     199             :                                        SPDK_ENV_SOCKET_ID_ANY);
     200           0 :         if (!nv_cache->free_chunk_md_pool) {
     201           0 :                 return -ENOMEM;
     202             :         }
     203             : 
     204           0 :         nv_cache->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
     205           0 :                                           (spdk_get_ticks_hz() / 1000);
     206           0 :         nv_cache->chunk_free_target = spdk_divide_round_up(nv_cache->chunk_count *
     207           0 :                                       dev->conf.nv_cache.chunk_free_target,
     208             :                                       100);
     209             : 
     210           0 :         ftl_property_register(dev, "cache_device", NULL, 0, NULL, NULL, ftl_property_dump_cache_dev, NULL,
     211             :                               NULL, true);
     212           0 :         return 0;
     213             : }
     214             : 
     215             : void
     216           0 : ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
     217             : {
     218           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
     219             :         struct ftl_nv_cache_compactor *compactor;
     220             : 
     221           0 :         while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
     222           0 :                 compactor = TAILQ_FIRST(&nv_cache->compactor_list);
     223           0 :                 TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
     224             : 
     225           0 :                 compactor_free(dev, compactor);
     226             :         }
     227             : 
     228           0 :         ftl_mempool_destroy(nv_cache->md_pool);
     229           0 :         ftl_mempool_destroy(nv_cache->p2l_pool);
     230           0 :         ftl_mempool_destroy(nv_cache->chunk_md_pool);
     231           0 :         ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
     232           0 :         nv_cache->md_pool = NULL;
     233           0 :         nv_cache->p2l_pool = NULL;
     234           0 :         nv_cache->chunk_md_pool = NULL;
     235           0 :         nv_cache->free_chunk_md_pool = NULL;
     236             : 
     237           0 :         free(nv_cache->chunks);
     238           0 :         nv_cache->chunks = NULL;
     239           0 : }
     240             : 
     241             : static uint64_t
     242           0 : chunk_get_free_space(struct ftl_nv_cache *nv_cache,
     243             :                      struct ftl_nv_cache_chunk *chunk)
     244             : {
     245           0 :         assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
     246             :                nv_cache->chunk_blocks);
     247           0 :         return nv_cache->chunk_blocks - chunk->md->write_pointer -
     248           0 :                nv_cache->tail_md_chunk_blocks;
     249             : }
     250             : 
     251             : static bool
     252           0 : chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
     253             : {
     254           0 :         return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
     255             : }
     256             : 
     257             : static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
     258             : 
     259             : static uint64_t
     260           0 : ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
     261             : {
     262           0 :         uint64_t address = FTL_LBA_INVALID;
     263           0 :         uint64_t num_blocks = io->num_blocks;
     264             :         uint64_t free_space;
     265             :         struct ftl_nv_cache_chunk *chunk;
     266             : 
     267             :         do {
     268           0 :                 chunk = nv_cache->chunk_current;
     269             :                 /* Chunk has been closed so pick new one */
     270           0 :                 if (chunk && chunk_is_closed(chunk))  {
     271           0 :                         chunk = NULL;
     272             :                 }
     273             : 
     274           0 :                 if (!chunk) {
     275           0 :                         chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
     276           0 :                         if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
     277           0 :                                 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
     278           0 :                                 nv_cache->chunk_current = chunk;
     279             :                         } else {
     280             :                                 break;
     281             :                         }
     282             :                 }
     283             : 
     284           0 :                 free_space = chunk_get_free_space(nv_cache, chunk);
     285             : 
     286           0 :                 if (free_space >= num_blocks) {
     287             :                         /* Enough space in chunk */
     288             : 
     289             :                         /* Calculate address in NV cache */
     290           0 :                         address = chunk->offset + chunk->md->write_pointer;
     291             : 
     292             :                         /* Set chunk in IO */
     293           0 :                         io->nv_cache_chunk = chunk;
     294             : 
     295             :                         /* Move write pointer */
     296           0 :                         chunk->md->write_pointer += num_blocks;
     297           0 :                         break;
     298             :                 }
     299             : 
     300             :                 /* Not enough space in nv_cache_chunk */
     301           0 :                 nv_cache->chunk_current = NULL;
     302             : 
     303           0 :                 if (0 == free_space) {
     304           0 :                         continue;
     305             :                 }
     306             : 
     307           0 :                 chunk->md->blocks_skipped = free_space;
     308           0 :                 chunk->md->blocks_written += free_space;
     309           0 :                 chunk->md->write_pointer += free_space;
     310             : 
     311           0 :                 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
     312           0 :                         ftl_chunk_close(chunk);
     313             :                 }
     314             :         } while (1);
     315             : 
     316           0 :         return address;
     317             : }
     318             : 
     319             : void
     320           0 : ftl_nv_cache_fill_md(struct ftl_io *io)
     321             : {
     322           0 :         struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
     323             :         uint64_t i;
     324           0 :         union ftl_md_vss *metadata = io->md;
     325           0 :         uint64_t lba = ftl_io_get_lba(io, 0);
     326             : 
     327           0 :         for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
     328           0 :                 metadata->nv_cache.lba = lba;
     329           0 :                 metadata->nv_cache.seq_id = chunk->md->seq_id;
     330             :         }
     331           0 : }
     332             : 
     333             : uint64_t
     334           0 : chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
     335             : {
     336           0 :         return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
     337             : }
     338             : 
     339             : static void
     340           0 : chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
     341             :                      uint64_t advanced_blocks)
     342             : {
     343           0 :         chunk->md->blocks_written += advanced_blocks;
     344             : 
     345           0 :         assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
     346             : 
     347           0 :         if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
     348           0 :                 ftl_chunk_close(chunk);
     349             :         }
     350           0 : }
     351             : 
     352             : static uint64_t
     353           0 : chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
     354             : {
     355           0 :         return chunk->md->blocks_written - chunk->md->blocks_skipped -
     356           0 :                chunk->nv_cache->tail_md_chunk_blocks;
     357             : }
     358             : 
     359             : static bool
     360           0 : is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
     361             : {
     362           0 :         assert(chunk->md->blocks_written != 0);
     363             : 
     364           0 :         if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
     365           0 :                 return true;
     366             :         }
     367             : 
     368           0 :         return false;
     369             : }
     370             : 
     371             : static int
     372           0 : ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
     373             : {
     374           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     375           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     376           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
     377           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
     378             : 
     379           0 :         p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
     380             : 
     381           0 :         if (!p2l_map->chunk_dma_md) {
     382           0 :                 return -ENOMEM;
     383             :         }
     384             : 
     385           0 :         memset(p2l_map->chunk_dma_md, 0, region->entry_size * FTL_BLOCK_SIZE);
     386           0 :         return 0;
     387             : }
     388             : 
     389             : static void
     390           0 : ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
     391             : {
     392           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
     393             : 
     394           0 :         ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
     395           0 :         p2l_map->chunk_dma_md = NULL;
     396           0 : }
     397             : 
     398             : static void
     399           0 : ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
     400             : {
     401           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     402             : 
     403             :         /* Reset chunk */
     404           0 :         memset(chunk->md, 0, sizeof(*chunk->md));
     405             : 
     406           0 :         TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
     407           0 :         nv_cache->chunk_free_persist_count++;
     408           0 : }
     409             : 
     410             : static int
     411           0 : ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
     412             : {
     413           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     414           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     415           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
     416           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
     417             : 
     418           0 :         p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
     419             : 
     420           0 :         if (!p2l_map->chunk_dma_md) {
     421           0 :                 return -ENOMEM;
     422             :         }
     423             : 
     424           0 :         memset(p2l_map->chunk_dma_md, 0, region->entry_size * FTL_BLOCK_SIZE);
     425           0 :         return 0;
     426             : }
     427             : 
     428             : static void
     429           0 : ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
     430             : {
     431           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
     432             : 
     433           0 :         ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
     434           0 :         p2l_map->chunk_dma_md = NULL;
     435           0 : }
     436             : 
     437             : static void
     438           0 : chunk_free_cb(int status, void *ctx)
     439             : {
     440           0 :         struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
     441             : 
     442           0 :         if (spdk_likely(!status)) {
     443           0 :                 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     444             : 
     445           0 :                 nv_cache->chunk_free_persist_count--;
     446           0 :                 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
     447           0 :                 nv_cache->chunk_free_count++;
     448           0 :                 nv_cache->chunk_full_count--;
     449           0 :                 chunk->md->state = FTL_CHUNK_STATE_FREE;
     450           0 :                 chunk->md->close_seq_id = 0;
     451           0 :                 ftl_chunk_free_chunk_free_entry(chunk);
     452             :         } else {
     453             : #ifdef SPDK_FTL_RETRY_ON_ERROR
     454             :                 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
     455             : #else
     456           0 :                 ftl_abort();
     457             : #endif
     458             :         }
     459           0 : }
     460             : 
     461             : static void
     462           0 : ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
     463             : {
     464             :         int rc;
     465           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     466             :         struct ftl_p2l_map *p2l_map;
     467           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
     468           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
     469           0 :         struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
     470             : 
     471           0 :         TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
     472           0 :                 p2l_map = &chunk->p2l_map;
     473           0 :                 rc = ftl_chunk_alloc_chunk_free_entry(chunk);
     474           0 :                 if (rc) {
     475           0 :                         break;
     476             :                 }
     477             : 
     478           0 :                 TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
     479             : 
     480           0 :                 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
     481           0 :                 p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
     482           0 :                 p2l_map->chunk_dma_md->close_seq_id = 0;
     483           0 :                 p2l_map->chunk_dma_md->p2l_map_checksum = 0;
     484             : 
     485           0 :                 ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md, NULL,
     486             :                                      chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
     487             :         }
     488           0 : }
     489             : 
     490             : static void
     491           0 : compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
     492             : {
     493           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     494           0 :         struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
     495             :         double *ptr;
     496             : 
     497           0 :         if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
     498           0 :                 return;
     499             :         }
     500             : 
     501           0 :         if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
     502           0 :                 ptr = compaction_bw->buf + compaction_bw->first;
     503           0 :                 compaction_bw->first++;
     504           0 :                 if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
     505           0 :                         compaction_bw->first = 0;
     506             :                 }
     507           0 :                 compaction_bw->sum -= *ptr;
     508             :         } else {
     509           0 :                 ptr = compaction_bw->buf + compaction_bw->count;
     510           0 :                 compaction_bw->count++;
     511             :         }
     512             : 
     513           0 :         *ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
     514           0 :         chunk->compaction_length_tsc = 0;
     515             : 
     516           0 :         compaction_bw->sum += *ptr;
     517           0 :         nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
     518             : }
     519             : 
     520             : static void
     521           0 : chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
     522             : {
     523           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
     524           0 :         uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
     525             : 
     526           0 :         chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
     527           0 :         chunk->compaction_start_tsc = tsc;
     528             : 
     529           0 :         chunk->md->blocks_compacted += num_blocks;
     530           0 :         assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
     531           0 :         if (!is_chunk_compacted(chunk)) {
     532           0 :                 return;
     533             :         }
     534             : 
     535             :         /* Remove chunk from compacted list */
     536           0 :         TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
     537           0 :         nv_cache->chunk_comp_count--;
     538             : 
     539           0 :         compaction_stats_update(chunk);
     540             : 
     541           0 :         ftl_chunk_free(chunk);
     542             : }
     543             : 
     544             : static bool
     545           0 : is_compaction_required_for_upgrade(struct ftl_nv_cache *nv_cache)
     546             : {
     547           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     548             : 
     549           0 :         if (dev->conf.prep_upgrade_on_shutdown) {
     550           0 :                 if (nv_cache->chunk_full_count || nv_cache->chunk_open_count) {
     551           0 :                         return true;
     552             :                 }
     553             :         }
     554             : 
     555           0 :         return false;
     556             : }
     557             : 
     558             : static bool
     559           0 : is_compaction_required(struct ftl_nv_cache *nv_cache)
     560             : {
     561           0 :         if (spdk_unlikely(nv_cache->halt)) {
     562           0 :                 return is_compaction_required_for_upgrade(nv_cache);
     563             :         }
     564             : 
     565           0 :         if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) {
     566           0 :                 return true;
     567             :         }
     568             : 
     569           0 :         return false;
     570             : }
     571             : 
     572             : static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor);
     573             : static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp);
     574             : 
     575             : static void
     576           0 : _compaction_process_pin_lba(void *_comp)
     577             : {
     578           0 :         struct ftl_nv_cache_compactor *comp = _comp;
     579             : 
     580           0 :         compaction_process_pin_lba(comp);
     581           0 : }
     582             : 
     583             : static void
     584           0 : compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
     585             : {
     586           0 :         struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx;
     587           0 :         struct ftl_rq *rq = comp->rq;
     588             : 
     589           0 :         if (status) {
     590           0 :                 rq->iter.status = status;
     591           0 :                 pin_ctx->lba = FTL_LBA_INVALID;
     592             :         }
     593             : 
     594           0 :         if (--rq->iter.remaining == 0) {
     595           0 :                 if (rq->iter.status) {
     596             :                         /* unpin and try again */
     597           0 :                         ftl_rq_unpin(rq);
     598           0 :                         spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp);
     599           0 :                         return;
     600             :                 }
     601             : 
     602           0 :                 compaction_process_finish_read(comp);
     603             :         }
     604             : }
     605             : 
     606             : static void
     607           0 : compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
     608             : {
     609           0 :         struct ftl_rq *rq = comp->rq;
     610           0 :         struct spdk_ftl_dev *dev = rq->dev;
     611             :         struct ftl_rq_entry *entry;
     612             : 
     613           0 :         assert(rq->iter.count);
     614           0 :         rq->iter.remaining = rq->iter.count;
     615           0 :         rq->iter.status = 0;
     616             : 
     617           0 :         FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
     618           0 :                 struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
     619           0 :                 struct ftl_l2p_pin_ctx *pin_ctx = &entry->l2p_pin_ctx;
     620           0 :                 union ftl_md_vss *md = entry->io_md;
     621             : 
     622           0 :                 if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
     623           0 :                         ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
     624             :                 } else {
     625           0 :                         ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
     626             :                 }
     627             :         }
     628           0 : }
     629             : 
     630             : static void
     631           0 : compaction_process_read_entry_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
     632             : {
     633           0 :         struct ftl_rq_entry *entry = arg;
     634           0 :         struct ftl_rq *rq = ftl_rq_from_entry(entry);
     635           0 :         struct spdk_ftl_dev *dev = rq->dev;
     636           0 :         struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
     637             : 
     638           0 :         ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io);
     639             : 
     640           0 :         spdk_bdev_free_io(bdev_io);
     641             : 
     642           0 :         if (!success) {
     643             :                 /* retry */
     644           0 :                 spdk_thread_send_msg(spdk_get_thread(), compaction_process_read_entry, entry);
     645           0 :                 return;
     646             :         }
     647             : 
     648           0 :         assert(rq->iter.remaining >= entry->bdev_io.num_blocks);
     649           0 :         rq->iter.remaining -= entry->bdev_io.num_blocks;
     650           0 :         if (0 == rq->iter.remaining) {
     651             :                 /* All IOs processed, go to next phase - pining */
     652           0 :                 compaction_process_pin_lba(compactor);
     653             :         }
     654             : }
     655             : 
     656             : static void
     657           0 : compaction_process_read_entry(void *arg)
     658             : {
     659           0 :         struct ftl_rq_entry *entry = arg;
     660           0 :         struct ftl_rq *rq = ftl_rq_from_entry(entry);
     661           0 :         struct spdk_ftl_dev *dev = rq->dev;
     662             : 
     663           0 :         int rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, dev->nv_cache.bdev_desc,
     664             :                         dev->nv_cache.cache_ioch, entry->io_payload, entry->io_md,
     665             :                         entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
     666             :                         compaction_process_read_entry_cb, entry);
     667             : 
     668           0 :         if (spdk_unlikely(rc)) {
     669           0 :                 if (rc == -ENOMEM) {
     670           0 :                         struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
     671           0 :                         entry->bdev_io.wait_entry.bdev = bdev;
     672           0 :                         entry->bdev_io.wait_entry.cb_fn = compaction_process_read_entry;
     673           0 :                         entry->bdev_io.wait_entry.cb_arg = entry;
     674           0 :                         spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry);
     675             :                 } else {
     676           0 :                         ftl_abort();
     677             :                 }
     678             :         }
     679             : 
     680           0 :         dev->stats.io_activity_total += entry->bdev_io.num_blocks;
     681           0 : }
     682             : 
     683             : static bool
     684           0 : is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
     685             : {
     686           0 :         assert(chunk->md->blocks_written != 0);
     687             : 
     688           0 :         if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
     689           0 :                 return false;
     690             :         }
     691             : 
     692           0 :         return true;
     693             : }
     694             : 
     695             : static struct ftl_nv_cache_chunk *
     696           0 : get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
     697             : {
     698           0 :         struct ftl_nv_cache_chunk *chunk = NULL;
     699             : 
     700           0 :         if (!TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
     701           0 :                 chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
     702           0 :                 if (is_chunk_to_read(chunk)) {
     703           0 :                         return chunk;
     704             :                 }
     705             :         }
     706             : 
     707           0 :         if (!TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
     708           0 :                 chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
     709           0 :                 TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
     710             : 
     711           0 :                 assert(chunk->md->write_pointer);
     712             :         } else {
     713           0 :                 return NULL;
     714             :         }
     715             : 
     716           0 :         if (spdk_likely(chunk)) {
     717           0 :                 assert(chunk->md->write_pointer != 0);
     718           0 :                 TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
     719           0 :                 nv_cache->chunk_comp_count++;
     720             :         }
     721             : 
     722           0 :         return chunk;
     723             : }
     724             : 
     725             : static uint64_t
     726           0 : chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
     727             : {
     728             :         uint64_t blocks_written;
     729             :         uint64_t blocks_to_read;
     730             : 
     731           0 :         assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
     732           0 :         blocks_written = chunk_user_blocks_written(chunk);
     733             : 
     734           0 :         assert(blocks_written >= chunk->md->read_pointer);
     735           0 :         blocks_to_read = blocks_written - chunk->md->read_pointer;
     736             : 
     737           0 :         return blocks_to_read;
     738             : }
     739             : 
     740             : static void
     741           0 : compactor_deactivate(struct ftl_nv_cache_compactor *compactor)
     742             : {
     743           0 :         struct ftl_nv_cache *nv_cache = compactor->nv_cache;
     744             : 
     745           0 :         compactor->rq->iter.count = 0;
     746           0 :         assert(nv_cache->compaction_active_count);
     747           0 :         nv_cache->compaction_active_count--;
     748           0 :         TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
     749           0 : }
     750             : 
     751             : static void
     752           0 : compaction_process_invalidate_entry(struct ftl_rq_entry *entry)
     753             : {
     754           0 :         entry->addr = FTL_ADDR_INVALID;
     755           0 :         entry->lba = FTL_LBA_INVALID;
     756           0 :         entry->seq_id = 0;
     757           0 :         entry->owner.priv = NULL;
     758           0 : }
     759             : 
     760             : static void
     761           0 : compaction_process_pad(struct ftl_nv_cache_compactor *compactor, uint64_t idx)
     762             : {
     763           0 :         struct ftl_rq *rq = compactor->rq;
     764             :         struct ftl_rq_entry *entry;
     765             : 
     766           0 :         assert(idx < rq->num_blocks);
     767           0 :         FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[idx], entry, rq->num_blocks) {
     768           0 :                 compaction_process_invalidate_entry(entry);
     769             :         }
     770           0 : }
     771             : 
     772             : static void
     773           0 : compaction_process_read(struct ftl_nv_cache_compactor *compactor)
     774             : {
     775           0 :         struct ftl_rq *rq = compactor->rq;
     776           0 :         struct ftl_nv_cache *nv_cache = compactor->nv_cache;
     777           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     778             :         struct ftl_rq_entry *entry, *io;
     779             : 
     780           0 :         assert(rq->iter.count);
     781           0 :         rq->iter.remaining = rq->iter.count;
     782             : 
     783           0 :         io = rq->entries;
     784           0 :         io->bdev_io.num_blocks = 1;
     785           0 :         io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
     786           0 :         FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[1], entry,  rq->iter.count) {
     787           0 :                 if (entry->addr == io->addr + io->bdev_io.num_blocks) {
     788           0 :                         io->bdev_io.num_blocks++;
     789             :                 } else {
     790           0 :                         compaction_process_read_entry(io);
     791           0 :                         io = entry;
     792           0 :                         io->bdev_io.num_blocks = 1;
     793           0 :                         io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
     794             :                 }
     795             :         }
     796           0 :         compaction_process_read_entry(io);
     797           0 : }
     798             : 
     799             : static ftl_addr
     800           0 : compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
     801             : {
     802             :         ftl_addr start, pos;
     803           0 :         uint64_t skip, to_read = chunk_blocks_to_read(chunk);
     804             : 
     805           0 :         if (0 == to_read) {
     806           0 :                 return FTL_ADDR_INVALID;
     807             :         }
     808             : 
     809           0 :         start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
     810           0 :         pos = ftl_bitmap_find_first_set(dev->valid_map, start, start + to_read - 1);
     811             : 
     812           0 :         if (pos == UINT64_MAX) {
     813           0 :                 chunk->md->read_pointer += to_read;
     814           0 :                 chunk_compaction_advance(chunk, to_read);
     815           0 :                 return FTL_ADDR_INVALID;
     816             :         }
     817             : 
     818           0 :         assert(pos >= start);
     819           0 :         skip = pos - start;
     820           0 :         if (skip) {
     821           0 :                 chunk->md->read_pointer += skip;
     822           0 :                 chunk_compaction_advance(chunk, skip);
     823             :         }
     824             : 
     825           0 :         return pos;
     826             : }
     827             : 
     828             : static bool
     829           0 : compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry)
     830             : {
     831           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     832           0 :         struct ftl_nv_cache_chunk *chunk = NULL;
     833           0 :         ftl_addr addr = FTL_ADDR_INVALID;
     834             : 
     835           0 :         while (!chunk) {
     836             :                 /* Get currently handled chunk */
     837           0 :                 chunk = get_chunk_for_compaction(nv_cache);
     838           0 :                 if (!chunk) {
     839           0 :                         return false;
     840             :                 }
     841           0 :                 chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
     842             : 
     843             :                 /* Get next read position in chunk */
     844           0 :                 addr = compaction_chunk_read_pos(dev, chunk);
     845           0 :                 if (FTL_ADDR_INVALID == addr) {
     846           0 :                         chunk = NULL;
     847             :                 }
     848             :         }
     849             : 
     850           0 :         assert(FTL_ADDR_INVALID != addr);
     851             : 
     852             :         /* Set entry address info and chunk */
     853           0 :         entry->addr = addr;
     854           0 :         entry->owner.priv = chunk;
     855             : 
     856             :         /* Move read pointer in the chunk */
     857           0 :         chunk->md->read_pointer++;
     858             : 
     859           0 :         return true;
     860             : }
     861             : 
     862             : static void
     863           0 : compaction_process_start(struct ftl_nv_cache_compactor *compactor)
     864             : {
     865           0 :         struct ftl_rq *rq = compactor->rq;
     866           0 :         struct ftl_nv_cache *nv_cache = compactor->nv_cache;
     867             :         struct ftl_rq_entry *entry;
     868             : 
     869           0 :         assert(0 == compactor->rq->iter.count);
     870           0 :         FTL_RQ_ENTRY_LOOP(rq, entry, rq->num_blocks) {
     871           0 :                 if (!compaction_entry_read_pos(nv_cache, entry)) {
     872           0 :                         compaction_process_pad(compactor, entry->index);
     873           0 :                         break;
     874             :                 }
     875           0 :                 rq->iter.count++;
     876             :         }
     877             : 
     878           0 :         if (rq->iter.count) {
     879             :                 /* Schedule Read IOs */
     880           0 :                 compaction_process_read(compactor);
     881             :         } else {
     882           0 :                 compactor_deactivate(compactor);
     883             :         }
     884           0 : }
     885             : 
     886             : static void
     887           0 : compaction_process(struct ftl_nv_cache *nv_cache)
     888             : {
     889           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
     890             :         struct ftl_nv_cache_compactor *compactor;
     891             : 
     892           0 :         if (!is_compaction_required(nv_cache)) {
     893           0 :                 return;
     894             :         }
     895             : 
     896           0 :         compactor = TAILQ_FIRST(&nv_cache->compactor_list);
     897           0 :         if (!compactor) {
     898           0 :                 return;
     899             :         }
     900             : 
     901           0 :         TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
     902           0 :         compactor->nv_cache->compaction_active_count++;
     903           0 :         compaction_process_start(compactor);
     904           0 :         ftl_add_io_activity(dev);
     905             : }
     906             : 
     907             : static void
     908           0 : compaction_process_ftl_done(struct ftl_rq *rq)
     909             : {
     910           0 :         struct spdk_ftl_dev *dev = rq->dev;
     911           0 :         struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
     912           0 :         struct ftl_band *band = rq->io.band;
     913             :         struct ftl_rq_entry *entry;
     914             :         ftl_addr addr;
     915             : 
     916           0 :         if (spdk_unlikely(false == rq->success)) {
     917             :                 /* IO error retry writing */
     918             : #ifdef SPDK_FTL_RETRY_ON_ERROR
     919             :                 ftl_writer_queue_rq(&dev->writer_user, rq);
     920             :                 return;
     921             : #else
     922           0 :                 ftl_abort();
     923             : #endif
     924             :         }
     925             : 
     926           0 :         assert(rq->iter.count);
     927             : 
     928             :         /* Update L2P table */
     929           0 :         addr = rq->io.addr;
     930           0 :         FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
     931           0 :                 struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
     932             : 
     933           0 :                 if (entry->lba != FTL_LBA_INVALID) {
     934           0 :                         ftl_l2p_update_base(dev, entry->lba, addr, entry->addr);
     935           0 :                         ftl_l2p_unpin(dev, entry->lba, 1);
     936           0 :                         chunk_compaction_advance(chunk, 1);
     937             :                 } else {
     938           0 :                         assert(entry->addr == FTL_ADDR_INVALID);
     939             :                 }
     940             : 
     941           0 :                 addr = ftl_band_next_addr(band, addr, 1);
     942           0 :                 compaction_process_invalidate_entry(entry);
     943             :         }
     944             : 
     945           0 :         compactor_deactivate(compactor);
     946           0 : }
     947             : 
     948             : static void
     949           0 : compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
     950             : {
     951           0 :         struct ftl_rq *rq = compactor->rq;
     952           0 :         struct spdk_ftl_dev *dev = rq->dev;
     953             :         struct ftl_rq_entry *entry;
     954             :         ftl_addr current_addr;
     955           0 :         uint64_t skip = 0;
     956             : 
     957           0 :         FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
     958           0 :                 struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
     959           0 :                 union ftl_md_vss *md = entry->io_md;
     960             : 
     961           0 :                 if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
     962           0 :                         skip++;
     963           0 :                         compaction_process_invalidate_entry(entry);
     964           0 :                         chunk_compaction_advance(chunk, 1);
     965           0 :                         continue;
     966             :                 }
     967             : 
     968           0 :                 current_addr = ftl_l2p_get(dev, md->nv_cache.lba);
     969           0 :                 if (current_addr == entry->addr) {
     970           0 :                         entry->lba = md->nv_cache.lba;
     971           0 :                         entry->seq_id = chunk->md->seq_id;
     972             :                 } else {
     973             :                         /* This address already invalidated, just omit this block */
     974           0 :                         chunk_compaction_advance(chunk, 1);
     975           0 :                         ftl_l2p_unpin(dev, md->nv_cache.lba, 1);
     976           0 :                         compaction_process_invalidate_entry(entry);
     977           0 :                         skip++;
     978             :                 }
     979             :         }
     980             : 
     981           0 :         if (skip < rq->iter.count) {
     982             :                 /*
     983             :                  * Request contains data to be placed on FTL, compact it
     984             :                  */
     985           0 :                 ftl_writer_queue_rq(&dev->writer_user, rq);
     986             :         } else {
     987           0 :                 compactor_deactivate(compactor);
     988             :         }
     989           0 : }
     990             : 
     991             : static void
     992           0 : compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor)
     993             : {
     994           0 :         if (!compactor) {
     995           0 :                 return;
     996             :         }
     997             : 
     998           0 :         ftl_rq_del(compactor->rq);
     999           0 :         free(compactor);
    1000             : }
    1001             : 
    1002             : static struct ftl_nv_cache_compactor *
    1003           0 : compactor_alloc(struct spdk_ftl_dev *dev)
    1004             : {
    1005             :         struct ftl_nv_cache_compactor *compactor;
    1006             :         struct ftl_rq_entry *entry;
    1007             : 
    1008           0 :         compactor = calloc(1, sizeof(*compactor));
    1009           0 :         if (!compactor) {
    1010           0 :                 goto error;
    1011             :         }
    1012             : 
    1013             :         /* Allocate help request for reading */
    1014           0 :         compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
    1015           0 :         if (!compactor->rq) {
    1016           0 :                 goto error;
    1017             :         }
    1018             : 
    1019           0 :         compactor->nv_cache = &dev->nv_cache;
    1020           0 :         compactor->rq->owner.priv = compactor;
    1021           0 :         compactor->rq->owner.cb = compaction_process_ftl_done;
    1022           0 :         compactor->rq->owner.compaction = true;
    1023             : 
    1024           0 :         FTL_RQ_ENTRY_LOOP(compactor->rq, entry, compactor->rq->num_blocks) {
    1025           0 :                 compaction_process_invalidate_entry(entry);
    1026             :         }
    1027             : 
    1028           0 :         return compactor;
    1029             : 
    1030           0 : error:
    1031           0 :         compactor_free(dev, compactor);
    1032           0 :         return NULL;
    1033             : }
    1034             : 
    1035             : static void
    1036           0 : ftl_nv_cache_submit_cb_done(struct ftl_io *io)
    1037             : {
    1038           0 :         struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
    1039             : 
    1040           0 :         chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
    1041           0 :         io->nv_cache_chunk = NULL;
    1042             : 
    1043           0 :         ftl_mempool_put(nv_cache->md_pool, io->md);
    1044           0 :         ftl_io_complete(io);
    1045           0 : }
    1046             : 
    1047             : static void
    1048           0 : ftl_nv_cache_l2p_update(struct ftl_io *io)
    1049             : {
    1050           0 :         struct spdk_ftl_dev *dev = io->dev;
    1051           0 :         ftl_addr next_addr = io->addr;
    1052             :         size_t i;
    1053             : 
    1054           0 :         for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
    1055           0 :                 ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
    1056             :         }
    1057             : 
    1058           0 :         ftl_l2p_unpin(dev, io->lba, io->num_blocks);
    1059           0 :         ftl_nv_cache_submit_cb_done(io);
    1060           0 : }
    1061             : 
    1062             : static void
    1063           0 : ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
    1064             : {
    1065           0 :         struct ftl_io *io = cb_arg;
    1066             : 
    1067           0 :         ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io);
    1068             : 
    1069           0 :         spdk_bdev_free_io(bdev_io);
    1070             : 
    1071           0 :         if (spdk_unlikely(!success)) {
    1072           0 :                 FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
    1073             :                            io->addr);
    1074           0 :                 io->status = -EIO;
    1075           0 :                 ftl_nv_cache_submit_cb_done(io);
    1076             :         } else {
    1077           0 :                 ftl_nv_cache_l2p_update(io);
    1078             :         }
    1079           0 : }
    1080             : 
    1081             : static void
    1082           0 : nv_cache_write(void *_io)
    1083             : {
    1084           0 :         struct ftl_io *io = _io;
    1085           0 :         struct spdk_ftl_dev *dev = io->dev;
    1086           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
    1087             :         int rc;
    1088             : 
    1089           0 :         rc = spdk_bdev_writev_blocks_with_md(nv_cache->bdev_desc, nv_cache->cache_ioch,
    1090           0 :                                              io->iov, io->iov_cnt, io->md,
    1091             :                                              ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
    1092             :                                              ftl_nv_cache_submit_cb, io);
    1093           0 :         if (spdk_unlikely(rc)) {
    1094           0 :                 if (rc == -ENOMEM) {
    1095           0 :                         struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
    1096           0 :                         io->bdev_io_wait.bdev = bdev;
    1097           0 :                         io->bdev_io_wait.cb_fn = nv_cache_write;
    1098           0 :                         io->bdev_io_wait.cb_arg = io;
    1099           0 :                         spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
    1100             :                 } else {
    1101           0 :                         ftl_abort();
    1102             :                 }
    1103             :         }
    1104           0 : }
    1105             : 
    1106             : static void
    1107           0 : ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
    1108             : {
    1109           0 :         struct ftl_io *io = pin_ctx->cb_ctx;
    1110             :         size_t i;
    1111             : 
    1112           0 :         if (spdk_unlikely(status != 0)) {
    1113             :                 /* Retry on the internal L2P fault */
    1114           0 :                 FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
    1115             :                            io->addr);
    1116           0 :                 io->status = -EAGAIN;
    1117           0 :                 ftl_nv_cache_submit_cb_done(io);
    1118           0 :                 return;
    1119             :         }
    1120             : 
    1121             :         /* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
    1122           0 :         for (i = 0; i < io->num_blocks; ++i) {
    1123           0 :                 io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
    1124             :         }
    1125             : 
    1126           0 :         assert(io->iov_pos == 0);
    1127             : 
    1128           0 :         ftl_trace_submission(io->dev, io, io->addr, io->num_blocks);
    1129             : 
    1130           0 :         nv_cache_write(io);
    1131             : }
    1132             : 
    1133             : bool
    1134           0 : ftl_nv_cache_write(struct ftl_io *io)
    1135             : {
    1136           0 :         struct spdk_ftl_dev *dev = io->dev;
    1137             :         uint64_t cache_offset;
    1138             : 
    1139           0 :         io->md = ftl_mempool_get(dev->nv_cache.md_pool);
    1140           0 :         if (spdk_unlikely(!io->md)) {
    1141           0 :                 return false;
    1142             :         }
    1143             : 
    1144             :         /* Reserve area on the write buffer cache */
    1145           0 :         cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
    1146           0 :         if (cache_offset == FTL_LBA_INVALID) {
    1147             :                 /* No free space in NV cache, resubmit request */
    1148           0 :                 ftl_mempool_put(dev->nv_cache.md_pool, io->md);
    1149           0 :                 return false;
    1150             :         }
    1151           0 :         io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
    1152           0 :         io->nv_cache_chunk = dev->nv_cache.chunk_current;
    1153             : 
    1154           0 :         ftl_nv_cache_fill_md(io);
    1155           0 :         ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
    1156             :                     ftl_nv_cache_pin_cb, io,
    1157             :                     &io->l2p_pin_ctx);
    1158             : 
    1159           0 :         dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
    1160             : 
    1161           0 :         return true;
    1162             : }
    1163             : 
    1164             : int
    1165           0 : ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
    1166             :                   spdk_bdev_io_completion_cb cb, void *cb_arg)
    1167             : {
    1168             :         int rc;
    1169           0 :         struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
    1170             : 
    1171           0 :         assert(ftl_addr_in_nvc(io->dev, addr));
    1172             : 
    1173           0 :         rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
    1174           0 :                         ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
    1175             :                         num_blocks, cb, cb_arg);
    1176             : 
    1177           0 :         return rc;
    1178             : }
    1179             : 
    1180             : bool
    1181           0 : ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
    1182             : {
    1183           0 :         if (nv_cache->compaction_active_count) {
    1184           0 :                 return false;
    1185             :         }
    1186             : 
    1187           0 :         if (nv_cache->chunk_open_count > 0) {
    1188           0 :                 return false;
    1189             :         }
    1190             : 
    1191           0 :         if (is_compaction_required_for_upgrade(nv_cache)) {
    1192           0 :                 return false;
    1193             :         }
    1194             : 
    1195           0 :         return true;
    1196             : }
    1197             : 
    1198             : void
    1199           0 : ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
    1200             :                       uint64_t offset, uint64_t lba)
    1201             : {
    1202           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1203           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1204             : 
    1205           0 :         ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
    1206           0 : }
    1207             : 
    1208             : uint64_t
    1209           0 : ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
    1210             : {
    1211           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1212           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1213             : 
    1214           0 :         return ftl_lba_load(dev, p2l_map->chunk_map, offset);
    1215             : }
    1216             : 
    1217             : static void
    1218           0 : ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
    1219             : {
    1220           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1221           0 :         uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
    1222             :         uint64_t offset;
    1223             : 
    1224           0 :         offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
    1225           0 :         ftl_chunk_map_set_lba(chunk, offset, lba);
    1226           0 : }
    1227             : 
    1228             : struct ftl_nv_cache_chunk *
    1229           0 : ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
    1230             : {
    1231           0 :         struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
    1232             :         uint64_t chunk_idx;
    1233           0 :         uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
    1234             : 
    1235           0 :         assert(chunk != NULL);
    1236           0 :         chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
    1237           0 :         chunk += chunk_idx;
    1238             : 
    1239           0 :         return chunk;
    1240             : }
    1241             : 
    1242             : void
    1243           0 : ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
    1244             : {
    1245             :         struct ftl_nv_cache_chunk *chunk;
    1246             : 
    1247           0 :         chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
    1248             : 
    1249           0 :         assert(lba != FTL_LBA_INVALID);
    1250             : 
    1251           0 :         ftl_chunk_set_addr(chunk, lba, addr);
    1252           0 :         ftl_bitmap_set(dev->valid_map, addr);
    1253           0 : }
    1254             : 
    1255             : static void
    1256           0 : ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
    1257             : {
    1258             :         double err;
    1259             :         double modifier;
    1260             : 
    1261           0 :         err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
    1262           0 :         modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
    1263             : 
    1264           0 :         if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
    1265           0 :                 modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
    1266           0 :         } else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
    1267           0 :                 modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
    1268             :         }
    1269             : 
    1270           0 :         if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
    1271           0 :                 nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
    1272             :         } else {
    1273           0 :                 double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
    1274             :                                              FTL_BLOCK_SIZE;
    1275           0 :                 nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
    1276             :         }
    1277           0 : }
    1278             : 
    1279             : static void
    1280           0 : ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
    1281             : {
    1282           0 :         uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
    1283             : 
    1284           0 :         if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
    1285           0 :                 nv_cache->throttle.start_tsc = tsc;
    1286           0 :         } else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
    1287           0 :                 ftl_nv_cache_throttle_update(nv_cache);
    1288           0 :                 nv_cache->throttle.start_tsc = tsc;
    1289           0 :                 nv_cache->throttle.blocks_submitted = 0;
    1290             :         }
    1291           0 : }
    1292             : 
    1293             : static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
    1294             : 
    1295             : void
    1296           0 : ftl_nv_cache_process(struct spdk_ftl_dev *dev)
    1297             : {
    1298           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
    1299             : 
    1300           0 :         assert(dev->nv_cache.bdev_desc);
    1301             : 
    1302           0 :         if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
    1303           0 :             !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
    1304           0 :                 struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
    1305           0 :                 TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
    1306           0 :                 TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
    1307           0 :                 nv_cache->chunk_free_count--;
    1308           0 :                 chunk->md->seq_id = ftl_get_next_seq_id(dev);
    1309           0 :                 ftl_chunk_open(chunk);
    1310           0 :                 ftl_add_io_activity(dev);
    1311             :         }
    1312             : 
    1313           0 :         compaction_process(nv_cache);
    1314           0 :         ftl_chunk_persist_free_state(nv_cache);
    1315           0 :         ftl_nv_cache_process_throttle(nv_cache);
    1316           0 : }
    1317             : 
    1318             : static bool
    1319           0 : ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
    1320             : {
    1321           0 :         if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
    1322           0 :                 return true;
    1323             :         } else {
    1324           0 :                 return false;
    1325             :         }
    1326             : }
    1327             : 
    1328             : bool
    1329           0 : ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
    1330             : {
    1331           0 :         struct ftl_nv_cache *nv_cache = &dev->nv_cache;
    1332             : 
    1333           0 :         if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
    1334           0 :             ftl_nv_cache_full(nv_cache)) {
    1335           0 :                 return true;
    1336             :         }
    1337             : 
    1338           0 :         return false;
    1339             : }
    1340             : 
    1341             : static void
    1342           0 : chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
    1343             : {
    1344             : 
    1345           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1346           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1347             : 
    1348           0 :         ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
    1349           0 :         p2l_map->chunk_map = NULL;
    1350             : 
    1351           0 :         ftl_chunk_free_md_entry(chunk);
    1352           0 : }
    1353             : 
    1354             : int
    1355           0 : ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
    1356             : {
    1357           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1358             :         struct ftl_nv_cache_chunk *chunk;
    1359           0 :         int status = 0;
    1360             :         uint64_t i;
    1361             : 
    1362           0 :         assert(nv_cache->chunk_open_count == 0);
    1363             : 
    1364           0 :         if (nv_cache->compaction_active_count) {
    1365           0 :                 FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n");
    1366           0 :                 return -EINVAL;
    1367             :         }
    1368             : 
    1369           0 :         chunk = nv_cache->chunks;
    1370           0 :         if (!chunk) {
    1371           0 :                 FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n");
    1372           0 :                 return -ENOMEM;
    1373             :         }
    1374             : 
    1375           0 :         for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
    1376           0 :                 nvc_validate_md(nv_cache, chunk->md);
    1377             : 
    1378           0 :                 if (chunk->md->read_pointer)  {
    1379             :                         /* Only full chunks can be compacted */
    1380           0 :                         if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
    1381           0 :                                 assert(0);
    1382             :                                 status = -EINVAL;
    1383             :                                 break;
    1384             :                         }
    1385             : 
    1386             :                         /*
    1387             :                          * Chunk in the middle of compaction, start over after
    1388             :                          * load
    1389             :                          */
    1390           0 :                         chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
    1391           0 :                 } else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
    1392             :                         /* Full chunk */
    1393           0 :                 } else if (0 == chunk->md->blocks_written) {
    1394             :                         /* Empty chunk */
    1395             :                 } else {
    1396           0 :                         assert(0);
    1397             :                         status = -EINVAL;
    1398             :                         break;
    1399             :                 }
    1400             :         }
    1401             : 
    1402           0 :         if (status) {
    1403           0 :                 FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache"
    1404             :                            "metadata\n");
    1405             :         }
    1406             : 
    1407           0 :         return status;
    1408             : }
    1409             : 
    1410             : static int
    1411           0 : sort_chunks_cmp(const void *a, const void *b)
    1412             : {
    1413           0 :         struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
    1414           0 :         struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
    1415             : 
    1416           0 :         return a_chunk->md->seq_id - b_chunk->md->seq_id;
    1417             : }
    1418             : 
    1419             : static int
    1420           0 : sort_chunks(struct ftl_nv_cache *nv_cache)
    1421             : {
    1422             :         struct ftl_nv_cache_chunk **chunks_list;
    1423             :         struct ftl_nv_cache_chunk *chunk;
    1424             :         uint32_t i;
    1425             : 
    1426           0 :         if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
    1427           0 :                 return 0;
    1428             :         }
    1429             : 
    1430           0 :         chunks_list = calloc(nv_cache->chunk_full_count,
    1431             :                              sizeof(chunks_list[0]));
    1432           0 :         if (!chunks_list) {
    1433           0 :                 return -ENOMEM;
    1434             :         }
    1435             : 
    1436           0 :         i = 0;
    1437           0 :         TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
    1438           0 :                 chunks_list[i] = chunk;
    1439           0 :                 i++;
    1440             :         }
    1441           0 :         assert(i == nv_cache->chunk_full_count);
    1442             : 
    1443           0 :         qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
    1444             :               sort_chunks_cmp);
    1445             : 
    1446           0 :         TAILQ_INIT(&nv_cache->chunk_full_list);
    1447           0 :         for (i = 0; i < nv_cache->chunk_full_count; i++) {
    1448           0 :                 chunk = chunks_list[i];
    1449           0 :                 TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
    1450             :         }
    1451             : 
    1452           0 :         free(chunks_list);
    1453           0 :         return 0;
    1454             : }
    1455             : 
    1456             : static int
    1457           0 : chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
    1458             : {
    1459           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1460           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1461             : 
    1462           0 :         assert(p2l_map->ref_cnt == 0);
    1463           0 :         assert(p2l_map->chunk_map == NULL);
    1464             : 
    1465           0 :         p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
    1466             : 
    1467           0 :         if (!p2l_map->chunk_map) {
    1468           0 :                 return -ENOMEM;
    1469             :         }
    1470             : 
    1471           0 :         if (ftl_chunk_alloc_md_entry(chunk)) {
    1472           0 :                 ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
    1473           0 :                 p2l_map->chunk_map = NULL;
    1474           0 :                 return -ENOMEM;
    1475             :         }
    1476             : 
    1477             :         /* Set the P2L to FTL_LBA_INVALID */
    1478           0 :         memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
    1479             : 
    1480           0 :         return 0;
    1481             : }
    1482             : 
    1483             : int
    1484           0 : ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
    1485             : {
    1486             :         struct ftl_nv_cache_chunk *chunk;
    1487             :         uint64_t chunks_number, offset, i;
    1488           0 :         int status = 0;
    1489           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1490             : 
    1491           0 :         nv_cache->chunk_current = NULL;
    1492           0 :         TAILQ_INIT(&nv_cache->chunk_free_list);
    1493           0 :         TAILQ_INIT(&nv_cache->chunk_full_list);
    1494           0 :         nv_cache->chunk_full_count = nv_cache->chunk_free_count = 0;
    1495             : 
    1496           0 :         assert(nv_cache->chunk_open_count == 0);
    1497           0 :         offset = nvc_data_offset(nv_cache);
    1498           0 :         chunk = nv_cache->chunks;
    1499           0 :         if (!chunk) {
    1500           0 :                 FTL_ERRLOG(dev, "No NV cache metadata\n");
    1501           0 :                 return -1;
    1502             :         }
    1503             : 
    1504           0 :         for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
    1505           0 :                 chunk->nv_cache = nv_cache;
    1506           0 :                 nvc_validate_md(nv_cache, chunk->md);
    1507             : 
    1508           0 :                 if (offset != chunk->offset) {
    1509           0 :                         status = -EINVAL;
    1510           0 :                         goto error;
    1511             :                 }
    1512             : 
    1513           0 :                 if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
    1514             :                         /* Chunk full, move it on full list */
    1515           0 :                         TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
    1516           0 :                         nv_cache->chunk_full_count++;
    1517           0 :                 } else if (0 == chunk->md->blocks_written) {
    1518             :                         /* Chunk empty, move it on empty list */
    1519           0 :                         TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
    1520           0 :                         nv_cache->chunk_free_count++;
    1521             :                 } else {
    1522           0 :                         status = -EINVAL;
    1523           0 :                         goto error;
    1524             :                 }
    1525             : 
    1526           0 :                 offset += nv_cache->chunk_blocks;
    1527             :         }
    1528             : 
    1529           0 :         chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count;
    1530           0 :         assert(nv_cache->chunk_current == NULL);
    1531             : 
    1532           0 :         if (chunks_number != nv_cache->chunk_count) {
    1533           0 :                 FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n");
    1534           0 :                 status = -EINVAL;
    1535           0 :                 goto error;
    1536             :         }
    1537             : 
    1538           0 :         status = sort_chunks(nv_cache);
    1539           0 :         if (status) {
    1540           0 :                 FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
    1541             :         }
    1542             : 
    1543           0 :         FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
    1544             :                       nv_cache->chunk_full_count, nv_cache->chunk_free_count);
    1545             : 
    1546           0 :         if (0 == status) {
    1547           0 :                 FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n");
    1548             :         } else {
    1549           0 :                 FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n");
    1550             :         }
    1551             : 
    1552           0 : error:
    1553           0 :         return status;
    1554             : }
    1555             : 
    1556             : void
    1557           0 : ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
    1558             :                             uint64_t *close_seq_id)
    1559             : {
    1560           0 :         uint64_t i, o_seq_id = 0, c_seq_id = 0;
    1561             :         struct ftl_nv_cache_chunk *chunk;
    1562             : 
    1563           0 :         chunk = nv_cache->chunks;
    1564           0 :         assert(chunk);
    1565             : 
    1566             :         /* Iterate over chunks and get their max open and close seq id */
    1567           0 :         for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
    1568           0 :                 o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
    1569           0 :                 c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
    1570             :         }
    1571             : 
    1572           0 :         *open_seq_id = o_seq_id;
    1573           0 :         *close_seq_id = c_seq_id;
    1574           0 : }
    1575             : 
    1576             : typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
    1577             : 
    1578             : static void
    1579           0 : write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
    1580             : {
    1581           0 :         struct ftl_basic_rq *brq = arg;
    1582           0 :         struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
    1583             : 
    1584           0 :         ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
    1585             : 
    1586           0 :         brq->success = success;
    1587           0 :         if (spdk_likely(success)) {
    1588           0 :                 chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
    1589             :         }
    1590             : 
    1591           0 :         spdk_bdev_free_io(bdev_io);
    1592           0 :         brq->owner.cb(brq);
    1593           0 : }
    1594             : 
    1595             : static void
    1596           0 : _ftl_chunk_basic_rq_write(void *_brq)
    1597             : {
    1598           0 :         struct ftl_basic_rq *brq = _brq;
    1599           0 :         struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
    1600           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1601             :         int rc;
    1602             : 
    1603           0 :         rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
    1604             :                         brq->io_payload, NULL, brq->io.addr,
    1605             :                         brq->num_blocks, write_brq_end, brq);
    1606           0 :         if (spdk_unlikely(rc)) {
    1607           0 :                 if (rc == -ENOMEM) {
    1608           0 :                         struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
    1609           0 :                         brq->io.bdev_io_wait.bdev = bdev;
    1610           0 :                         brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write;
    1611           0 :                         brq->io.bdev_io_wait.cb_arg = brq;
    1612           0 :                         spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
    1613             :                 } else {
    1614           0 :                         ftl_abort();
    1615             :                 }
    1616             :         }
    1617           0 : }
    1618             : 
    1619             : static void
    1620           0 : ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
    1621             : {
    1622           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1623           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1624             : 
    1625           0 :         brq->io.chunk = chunk;
    1626           0 :         brq->success = false;
    1627             : 
    1628           0 :         _ftl_chunk_basic_rq_write(brq);
    1629             : 
    1630           0 :         chunk->md->write_pointer += brq->num_blocks;
    1631           0 :         dev->stats.io_activity_total += brq->num_blocks;
    1632           0 : }
    1633             : 
    1634             : static void
    1635           0 : read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
    1636             : {
    1637           0 :         struct ftl_basic_rq *brq = arg;
    1638             : 
    1639           0 :         ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
    1640             : 
    1641           0 :         brq->success = success;
    1642             : 
    1643           0 :         brq->owner.cb(brq);
    1644           0 :         spdk_bdev_free_io(bdev_io);
    1645           0 : }
    1646             : 
    1647             : static int
    1648           0 : ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
    1649             : {
    1650           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1651           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1652             :         int rc;
    1653             : 
    1654           0 :         brq->io.chunk = chunk;
    1655           0 :         brq->success = false;
    1656             : 
    1657           0 :         rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
    1658             :                         brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
    1659             : 
    1660           0 :         if (spdk_likely(!rc)) {
    1661           0 :                 dev->stats.io_activity_total += brq->num_blocks;
    1662             :         }
    1663             : 
    1664           0 :         return rc;
    1665             : }
    1666             : 
    1667             : static void
    1668           0 : chunk_open_cb(int status, void *ctx)
    1669             : {
    1670           0 :         struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
    1671             : 
    1672           0 :         if (spdk_unlikely(status)) {
    1673             : #ifdef SPDK_FTL_RETRY_ON_ERROR
    1674             :                 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
    1675             :                 return;
    1676             : #else
    1677           0 :                 ftl_abort();
    1678             : #endif
    1679             :         }
    1680             : 
    1681           0 :         chunk->md->state = FTL_CHUNK_STATE_OPEN;
    1682           0 : }
    1683             : 
    1684             : static void
    1685           0 : ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
    1686             : {
    1687           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1688           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1689           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
    1690           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
    1691             : 
    1692           0 :         if (chunk_alloc_p2l_map(chunk)) {
    1693           0 :                 assert(0);
    1694             :                 /*
    1695             :                  * We control number of opening chunk and it shall be consistent with size of chunk
    1696             :                  * P2L map pool
    1697             :                  */
    1698             :                 ftl_abort();
    1699             :                 return;
    1700             :         }
    1701             : 
    1702           0 :         chunk->nv_cache->chunk_open_count++;
    1703             : 
    1704           0 :         assert(chunk->md->write_pointer == 0);
    1705           0 :         assert(chunk->md->blocks_written == 0);
    1706             : 
    1707           0 :         memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
    1708           0 :         p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN;
    1709           0 :         p2l_map->chunk_dma_md->p2l_map_checksum = 0;
    1710             : 
    1711           0 :         ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md,
    1712             :                              NULL, chunk_open_cb, chunk,
    1713             :                              &chunk->md_persist_entry_ctx);
    1714             : }
    1715             : 
    1716             : static void
    1717           0 : chunk_close_cb(int status, void *ctx)
    1718             : {
    1719           0 :         struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
    1720             : 
    1721           0 :         assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
    1722             : 
    1723           0 :         if (spdk_likely(!status)) {
    1724           0 :                 chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
    1725           0 :                 chunk_free_p2l_map(chunk);
    1726             : 
    1727           0 :                 assert(chunk->nv_cache->chunk_open_count > 0);
    1728           0 :                 chunk->nv_cache->chunk_open_count--;
    1729             : 
    1730             :                 /* Chunk full move it on full list */
    1731           0 :                 TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
    1732           0 :                 chunk->nv_cache->chunk_full_count++;
    1733             : 
    1734           0 :                 chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
    1735             : 
    1736           0 :                 chunk->md->state = FTL_CHUNK_STATE_CLOSED;
    1737             :         } else {
    1738             : #ifdef SPDK_FTL_RETRY_ON_ERROR
    1739             :                 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
    1740             : #else
    1741           0 :                 ftl_abort();
    1742             : #endif
    1743             :         }
    1744           0 : }
    1745             : 
    1746             : static void
    1747           0 : chunk_map_write_cb(struct ftl_basic_rq *brq)
    1748             : {
    1749           0 :         struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
    1750           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1751           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1752           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
    1753           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
    1754             :         uint32_t chunk_map_crc;
    1755             : 
    1756           0 :         if (spdk_likely(brq->success)) {
    1757           0 :                 chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
    1758           0 :                                                    chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
    1759           0 :                 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
    1760           0 :                 p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
    1761           0 :                 p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
    1762           0 :                 ftl_md_persist_entry(md, get_chunk_idx(chunk), chunk->p2l_map.chunk_dma_md,
    1763             :                                      NULL, chunk_close_cb, chunk,
    1764             :                                      &chunk->md_persist_entry_ctx);
    1765             :         } else {
    1766             : #ifdef SPDK_FTL_RETRY_ON_ERROR
    1767             :                 /* retry */
    1768             :                 chunk->md->write_pointer -= brq->num_blocks;
    1769             :                 ftl_chunk_basic_rq_write(chunk, brq);
    1770             : #else
    1771           0 :                 ftl_abort();
    1772             : #endif
    1773             :         }
    1774           0 : }
    1775             : 
    1776             : static void
    1777           0 : ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
    1778             : {
    1779           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1780           0 :         struct ftl_basic_rq *brq = &chunk->metadata_rq;
    1781           0 :         void *metadata = chunk->p2l_map.chunk_map;
    1782             : 
    1783           0 :         chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
    1784           0 :         ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
    1785           0 :         ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
    1786             : 
    1787           0 :         assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
    1788           0 :         brq->io.addr = chunk->offset + chunk->md->write_pointer;
    1789             : 
    1790           0 :         ftl_chunk_basic_rq_write(chunk, brq);
    1791           0 : }
    1792             : 
    1793             : static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
    1794             :                                   void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
    1795             : static void read_tail_md_cb(struct ftl_basic_rq *brq);
    1796             : static void recover_open_chunk_cb(struct ftl_basic_rq *brq);
    1797             : 
    1798             : static void
    1799           0 : restore_chunk_close_cb(int status, void *ctx)
    1800             : {
    1801           0 :         struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
    1802           0 :         struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
    1803           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1804             : 
    1805           0 :         if (spdk_unlikely(status)) {
    1806           0 :                 parent->success = false;
    1807             :         } else {
    1808           0 :                 chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
    1809           0 :                 chunk->md->state = FTL_CHUNK_STATE_CLOSED;
    1810             :         }
    1811             : 
    1812           0 :         read_tail_md_cb(parent);
    1813           0 : }
    1814             : 
    1815             : static void
    1816           0 : restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
    1817             : {
    1818           0 :         struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
    1819           0 :         struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
    1820           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1821           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
    1822           0 :         struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
    1823             :         uint32_t chunk_map_crc;
    1824             : 
    1825             :         /* Set original callback */
    1826           0 :         ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);
    1827             : 
    1828           0 :         if (spdk_unlikely(!parent->success)) {
    1829           0 :                 read_tail_md_cb(parent);
    1830           0 :                 return;
    1831             :         }
    1832             : 
    1833           0 :         chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
    1834           0 :                                            chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
    1835           0 :         memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
    1836           0 :         p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
    1837           0 :         p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
    1838           0 :         p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
    1839           0 :         p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
    1840             : 
    1841           0 :         ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md, NULL,
    1842             :                              restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
    1843             : }
    1844             : 
    1845             : static void
    1846           0 : restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
    1847             : {
    1848           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1849             :         void *metadata;
    1850             : 
    1851           0 :         chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
    1852             : 
    1853           0 :         metadata = chunk->p2l_map.chunk_map;
    1854           0 :         ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
    1855           0 :         ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);
    1856             : 
    1857           0 :         parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
    1858           0 :         parent->io.chunk = chunk;
    1859             : 
    1860           0 :         ftl_chunk_basic_rq_write(chunk, parent);
    1861           0 : }
    1862             : 
    1863             : static void
    1864           0 : read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
    1865             : {
    1866           0 :         struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
    1867           0 :         struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
    1868           0 :         struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
    1869           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1870           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1871             :         union ftl_md_vss *md;
    1872           0 :         uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
    1873           0 :         uint64_t len = bdev_io->u.bdev.num_blocks;
    1874           0 :         ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
    1875             :         int rc;
    1876             : 
    1877           0 :         ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
    1878             : 
    1879           0 :         spdk_bdev_free_io(bdev_io);
    1880             : 
    1881           0 :         if (!success) {
    1882           0 :                 parent->success = false;
    1883           0 :                 read_tail_md_cb(parent);
    1884           0 :                 return;
    1885             :         }
    1886             : 
    1887           0 :         while (rq->iter.idx < rq->iter.count) {
    1888             :                 /* Get metadata */
    1889           0 :                 md = rq->entries[rq->iter.idx].io_md;
    1890           0 :                 if (md->nv_cache.seq_id != chunk->md->seq_id) {
    1891           0 :                         md->nv_cache.lba = FTL_LBA_INVALID;
    1892             :                 }
    1893             :                 /*
    1894             :                  * The p2l map contains effectively random data at this point (since it contains arbitrary
    1895             :                  * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
    1896             :                  */
    1897             : 
    1898           0 :                 ftl_chunk_set_addr(chunk,  md->nv_cache.lba, addr + rq->iter.idx);
    1899           0 :                 rq->iter.idx++;
    1900             :         }
    1901             : 
    1902           0 :         if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
    1903           0 :                 cache_offset += len;
    1904           0 :                 len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
    1905           0 :                 rq->iter.idx = 0;
    1906           0 :                 rq->iter.count = len;
    1907             : 
    1908           0 :                 rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
    1909             :                                 nv_cache->cache_ioch,
    1910             :                                 rq->io_payload,
    1911             :                                 rq->io_md,
    1912             :                                 cache_offset, len,
    1913             :                                 read_open_chunk_cb,
    1914             :                                 rq);
    1915             : 
    1916           0 :                 if (rc) {
    1917           0 :                         ftl_rq_del(rq);
    1918           0 :                         parent->success = false;
    1919           0 :                         read_tail_md_cb(parent);
    1920           0 :                         return;
    1921             :                 }
    1922             :         } else {
    1923           0 :                 ftl_rq_del(rq);
    1924           0 :                 restore_fill_tail_md(parent, chunk);
    1925             :         }
    1926             : }
    1927             : 
    1928             : static void
    1929           0 : restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
    1930             : {
    1931           0 :         struct ftl_nv_cache *nv_cache = chunk->nv_cache;
    1932           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
    1933             :         struct ftl_rq *rq;
    1934             :         uint64_t addr;
    1935           0 :         uint64_t len = dev->xfer_size;
    1936             :         int rc;
    1937             : 
    1938             :         /*
    1939             :          * We've just read the p2l map, prefill it with INVALID LBA
    1940             :          * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
    1941             :          */
    1942           0 :         memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
    1943             : 
    1944             :         /* Need to read user data, recalculate chunk's P2L and write tail md with it */
    1945           0 :         rq = ftl_rq_new(dev, dev->nv_cache.md_size);
    1946           0 :         if (!rq) {
    1947           0 :                 parent->success = false;
    1948           0 :                 read_tail_md_cb(parent);
    1949           0 :                 return;
    1950             :         }
    1951             : 
    1952           0 :         rq->owner.priv = parent;
    1953           0 :         rq->iter.idx = 0;
    1954           0 :         rq->iter.count = len;
    1955             : 
    1956           0 :         addr = chunk->offset;
    1957             : 
    1958           0 :         len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);
    1959             : 
    1960           0 :         rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
    1961             :                         nv_cache->cache_ioch,
    1962             :                         rq->io_payload,
    1963             :                         rq->io_md,
    1964             :                         addr, len,
    1965             :                         read_open_chunk_cb,
    1966             :                         rq);
    1967             : 
    1968           0 :         if (rc) {
    1969           0 :                 ftl_rq_del(rq);
    1970           0 :                 parent->success = false;
    1971           0 :                 read_tail_md_cb(parent);
    1972             :         }
    1973             : }
    1974             : 
    1975             : static void
    1976           0 : read_tail_md_cb(struct ftl_basic_rq *brq)
    1977             : {
    1978           0 :         brq->owner.cb(brq);
    1979           0 : }
    1980             : 
    1981             : static int
    1982           0 : ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
    1983             :                        void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
    1984             : {
    1985           0 :         struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
    1986             :         void *metadata;
    1987             :         int rc;
    1988             : 
    1989           0 :         metadata = chunk->p2l_map.chunk_map;
    1990           0 :         ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
    1991           0 :         ftl_basic_rq_set_owner(brq, cb, cb_ctx);
    1992             : 
    1993           0 :         brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
    1994           0 :         rc = ftl_chunk_basic_rq_read(chunk, brq);
    1995             : 
    1996           0 :         return rc;
    1997             : }
    1998             : 
    1999             : struct restore_chunk_md_ctx {
    2000             :         ftl_chunk_md_cb cb;
    2001             :         void *cb_ctx;
    2002             :         int status;
    2003             :         uint64_t qd;
    2004             :         uint64_t id;
    2005             : };
    2006             : 
    2007             : static inline bool
    2008           0 : is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
    2009             : {
    2010           0 :         uint64_t chunk_count = 0;
    2011             : 
    2012           0 :         chunk_count += nv_cache->chunk_open_count;
    2013           0 :         chunk_count += nv_cache->chunk_free_count;
    2014           0 :         chunk_count += nv_cache->chunk_full_count;
    2015           0 :         chunk_count += nv_cache->chunk_comp_count;
    2016             : 
    2017           0 :         return chunk_count == nv_cache->chunk_count;
    2018             : }
    2019             : 
    2020             : static void
    2021           0 : walk_tail_md_cb(struct ftl_basic_rq *brq)
    2022             : {
    2023           0 :         struct ftl_mngt_process *mngt = brq->owner.priv;
    2024           0 :         struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
    2025           0 :         struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
    2026           0 :         int rc = 0;
    2027             : 
    2028           0 :         if (brq->success) {
    2029           0 :                 rc = ctx->cb(chunk, ctx->cb_ctx);
    2030             :         } else {
    2031           0 :                 rc = -EIO;
    2032             :         }
    2033             : 
    2034           0 :         if (rc) {
    2035           0 :                 ctx->status = rc;
    2036             :         }
    2037           0 :         ctx->qd--;
    2038           0 :         chunk_free_p2l_map(chunk);
    2039           0 :         ftl_mngt_continue_step(mngt);
    2040           0 : }
    2041             : 
    2042             : static void
    2043           0 : ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
    2044             :                                uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
    2045             : {
    2046           0 :         struct ftl_nv_cache *nvc = &dev->nv_cache;
    2047             :         struct restore_chunk_md_ctx *ctx;
    2048             : 
    2049           0 :         ctx = ftl_mngt_get_step_ctx(mngt);
    2050           0 :         if (!ctx) {
    2051           0 :                 if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
    2052           0 :                         ftl_mngt_fail_step(mngt);
    2053           0 :                         return;
    2054             :                 }
    2055           0 :                 ctx = ftl_mngt_get_step_ctx(mngt);
    2056           0 :                 assert(ctx);
    2057             : 
    2058           0 :                 ctx->cb = cb;
    2059           0 :                 ctx->cb_ctx = cb_ctx;
    2060             :         }
    2061             : 
    2062             :         /*
    2063             :          * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
    2064             :          * are processed before returning an error (if any were found) or continuing on.
    2065             :          */
    2066           0 :         if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
    2067           0 :                 if (!is_chunk_count_valid(nvc)) {
    2068           0 :                         FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
    2069           0 :                         assert(false);
    2070             :                         ctx->status = -EINVAL;
    2071             :                 }
    2072             : 
    2073           0 :                 if (ctx->status) {
    2074           0 :                         ftl_mngt_fail_step(mngt);
    2075             :                 } else {
    2076           0 :                         ftl_mngt_next_step(mngt);
    2077             :                 }
    2078           0 :                 return;
    2079             :         }
    2080             : 
    2081           0 :         while (ctx->id < nvc->chunk_count) {
    2082           0 :                 struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
    2083             :                 int rc;
    2084             : 
    2085           0 :                 if (!chunk->recovery) {
    2086             :                         /* This chunk is empty and not used in recovery */
    2087           0 :                         ctx->id++;
    2088           0 :                         continue;
    2089             :                 }
    2090             : 
    2091           0 :                 if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
    2092           0 :                         ctx->id++;
    2093           0 :                         continue;
    2094             :                 }
    2095             : 
    2096           0 :                 if (chunk_alloc_p2l_map(chunk)) {
    2097             :                         /* No more free P2L map, break and continue later */
    2098           0 :                         break;
    2099             :                 }
    2100           0 :                 ctx->id++;
    2101             : 
    2102           0 :                 rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
    2103             : 
    2104           0 :                 if (0 == rc) {
    2105           0 :                         ctx->qd++;
    2106             :                 } else {
    2107           0 :                         chunk_free_p2l_map(chunk);
    2108           0 :                         ctx->status = rc;
    2109             :                 }
    2110             :         }
    2111             : 
    2112           0 :         if (0 == ctx->qd) {
    2113             :                 /*
    2114             :                  * No QD could happen due to all leftover chunks being in free state.
    2115             :                  * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
    2116             :                  * For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
    2117             :                  * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
    2118             :                  */
    2119           0 :                 ftl_mngt_continue_step(mngt);
    2120             :         }
    2121             : 
    2122             : }
    2123             : 
    2124             : void
    2125           0 : ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
    2126             :                               ftl_chunk_md_cb cb, void *cb_ctx)
    2127             : {
    2128           0 :         ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
    2129           0 : }
    2130             : 
    2131             : static void
    2132           0 : restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
    2133             : {
    2134           0 :         struct ftl_mngt_process *mngt = md->owner.cb_ctx;
    2135           0 :         struct ftl_nv_cache *nvc = &dev->nv_cache;
    2136             :         struct ftl_nv_cache_chunk *chunk;
    2137             :         uint64_t i;
    2138             : 
    2139           0 :         if (status) {
    2140             :                 /* Restore error, end step */
    2141           0 :                 ftl_mngt_fail_step(mngt);
    2142           0 :                 return;
    2143             :         }
    2144             : 
    2145           0 :         for (i = 0; i < nvc->chunk_count; i++) {
    2146           0 :                 chunk = &nvc->chunks[i];
    2147             : 
    2148           0 :                 switch (chunk->md->state) {
    2149           0 :                 case FTL_CHUNK_STATE_FREE:
    2150           0 :                         break;
    2151           0 :                 case FTL_CHUNK_STATE_OPEN:
    2152           0 :                         TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
    2153           0 :                         nvc->chunk_free_count--;
    2154             : 
    2155           0 :                         TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
    2156           0 :                         nvc->chunk_open_count++;
    2157             : 
    2158             :                         /* Chunk is not empty, mark it to be recovered */
    2159           0 :                         chunk->recovery = true;
    2160           0 :                         break;
    2161           0 :                 case FTL_CHUNK_STATE_CLOSED:
    2162           0 :                         TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
    2163           0 :                         nvc->chunk_free_count--;
    2164             : 
    2165           0 :                         TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
    2166           0 :                         nvc->chunk_full_count++;
    2167             : 
    2168             :                         /* Chunk is not empty, mark it to be recovered */
    2169           0 :                         chunk->recovery = true;
    2170           0 :                         break;
    2171           0 :                 default:
    2172           0 :                         status = -EINVAL;
    2173             :                 }
    2174             :         }
    2175             : 
    2176           0 :         if (status) {
    2177           0 :                 ftl_mngt_fail_step(mngt);
    2178             :         } else {
    2179           0 :                 ftl_mngt_next_step(mngt);
    2180             :         }
    2181             : }
    2182             : 
    2183             : void
    2184           0 : ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
    2185             : {
    2186           0 :         struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
    2187             : 
    2188           0 :         md->owner.cb_ctx = mngt;
    2189           0 :         md->cb = restore_chunk_state_cb;
    2190           0 :         ftl_md_restore(md);
    2191           0 : }
    2192             : 
    2193             : static void
    2194           0 : recover_open_chunk_cb(struct ftl_basic_rq *brq)
    2195             : {
    2196           0 :         struct ftl_mngt_process *mngt = brq->owner.priv;
    2197           0 :         struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
    2198           0 :         struct ftl_nv_cache *nvc = chunk->nv_cache;
    2199           0 :         struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
    2200             : 
    2201           0 :         chunk_free_p2l_map(chunk);
    2202             : 
    2203           0 :         if (!brq->success) {
    2204           0 :                 FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
    2205             :                            chunk->md->seq_id);
    2206           0 :                 ftl_mngt_fail_step(mngt);
    2207           0 :                 return;
    2208             :         }
    2209             : 
    2210           0 :         FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
    2211             :                       chunk->md->seq_id);
    2212             : 
    2213           0 :         TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
    2214           0 :         nvc->chunk_open_count--;
    2215             : 
    2216           0 :         TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
    2217           0 :         nvc->chunk_full_count++;
    2218             : 
    2219             :         /* This is closed chunk */
    2220           0 :         chunk->md->write_pointer = nvc->chunk_blocks;
    2221           0 :         chunk->md->blocks_written = nvc->chunk_blocks;
    2222             : 
    2223           0 :         ftl_mngt_continue_step(mngt);
    2224             : }
    2225             : 
    2226             : void
    2227           0 : ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
    2228             : {
    2229           0 :         struct ftl_nv_cache *nvc = &dev->nv_cache;
    2230             :         struct ftl_nv_cache_chunk *chunk;
    2231           0 :         struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
    2232             : 
    2233           0 :         if (!brq) {
    2234           0 :                 if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
    2235           0 :                         FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
    2236           0 :                         ftl_mngt_next_step(mngt);
    2237           0 :                         return;
    2238             :                 }
    2239             : 
    2240           0 :                 if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
    2241           0 :                         ftl_mngt_fail_step(mngt);
    2242           0 :                         return;
    2243             :                 }
    2244           0 :                 brq = ftl_mngt_get_step_ctx(mngt);
    2245           0 :                 ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
    2246             :         }
    2247             : 
    2248           0 :         if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
    2249           0 :                 if (!is_chunk_count_valid(nvc)) {
    2250           0 :                         FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
    2251           0 :                         ftl_mngt_fail_step(mngt);
    2252           0 :                         return;
    2253             :                 }
    2254             : 
    2255             :                 /*
    2256             :                  * Now all chunks loaded and closed, do final step of restoring
    2257             :                  * chunks state
    2258             :                  */
    2259           0 :                 if (ftl_nv_cache_load_state(nvc)) {
    2260           0 :                         ftl_mngt_fail_step(mngt);
    2261             :                 } else {
    2262           0 :                         ftl_mngt_next_step(mngt);
    2263             :                 }
    2264             :         } else {
    2265           0 :                 chunk = TAILQ_FIRST(&nvc->chunk_open_list);
    2266           0 :                 if (chunk_alloc_p2l_map(chunk)) {
    2267           0 :                         ftl_mngt_fail_step(mngt);
    2268           0 :                         return;
    2269             :                 }
    2270             : 
    2271           0 :                 brq->io.chunk = chunk;
    2272             : 
    2273           0 :                 FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
    2274             :                               chunk->offset, chunk->md->seq_id);
    2275           0 :                 restore_open_chunk(chunk, brq);
    2276             :         }
    2277             : }
    2278             : 
    2279             : int
    2280           0 : ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
    2281             : {
    2282             :         /* chunk_current is migrating to closed status when closing, any others should already be
    2283             :          * moved to free chunk list. Also need to wait for free md requests */
    2284           0 :         return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
    2285             : }
    2286             : 
    2287             : void
    2288           0 : ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
    2289             : {
    2290             :         struct ftl_nv_cache_chunk *chunk;
    2291             :         uint64_t free_space;
    2292             : 
    2293           0 :         nv_cache->halt = true;
    2294             : 
    2295             :         /* Set chunks on open list back to free state since no user data has been written to it */
    2296           0 :         while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
    2297           0 :                 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
    2298             : 
    2299             :                 /* Chunks are moved between lists on metadata update submission, but state is changed
    2300             :                  * on completion. Breaking early in such a case to make sure all the necessary resources
    2301             :                  * will be freed (during next pass(es) of ftl_nv_cache_halt).
    2302             :                  */
    2303           0 :                 if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
    2304           0 :                         break;
    2305             :                 }
    2306             : 
    2307           0 :                 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
    2308           0 :                 chunk_free_p2l_map(chunk);
    2309           0 :                 memset(chunk->md, 0, sizeof(*chunk->md));
    2310           0 :                 assert(nv_cache->chunk_open_count > 0);
    2311           0 :                 nv_cache->chunk_open_count--;
    2312             :         }
    2313             : 
    2314             :         /* Close current chunk by skipping all not written blocks */
    2315           0 :         chunk = nv_cache->chunk_current;
    2316           0 :         if (chunk != NULL) {
    2317           0 :                 nv_cache->chunk_current = NULL;
    2318           0 :                 if (chunk_is_closed(chunk)) {
    2319           0 :                         return;
    2320             :                 }
    2321             : 
    2322           0 :                 free_space = chunk_get_free_space(nv_cache, chunk);
    2323           0 :                 chunk->md->blocks_skipped = free_space;
    2324           0 :                 chunk->md->blocks_written += free_space;
    2325           0 :                 chunk->md->write_pointer += free_space;
    2326           0 :                 ftl_chunk_close(chunk);
    2327             :         }
    2328             : }
    2329             : 
    2330             : uint64_t
    2331           0 : ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
    2332             : {
    2333           0 :         struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
    2334             :         uint64_t seq_id, free_space;
    2335             : 
    2336           0 :         if (!chunk) {
    2337           0 :                 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
    2338           0 :                 if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
    2339           0 :                         return chunk->md->seq_id;
    2340             :                 } else {
    2341           0 :                         return 0;
    2342             :                 }
    2343             :         }
    2344             : 
    2345           0 :         if (chunk_is_closed(chunk)) {
    2346           0 :                 return 0;
    2347             :         }
    2348             : 
    2349           0 :         seq_id = nv_cache->chunk_current->md->seq_id;
    2350           0 :         free_space = chunk_get_free_space(nv_cache, chunk);
    2351             : 
    2352           0 :         chunk->md->blocks_skipped = free_space;
    2353           0 :         chunk->md->blocks_written += free_space;
    2354           0 :         chunk->md->write_pointer += free_space;
    2355           0 :         if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
    2356           0 :                 ftl_chunk_close(chunk);
    2357             :         }
    2358           0 :         nv_cache->chunk_current = NULL;
    2359             : 
    2360           0 :         seq_id++;
    2361           0 :         return seq_id;
    2362             : }
    2363             : 
    2364             : static double
    2365           0 : ftl_nv_cache_get_chunk_utilization(struct ftl_nv_cache *nv_cache,
    2366             :                                    struct ftl_nv_cache_chunk *chunk)
    2367             : {
    2368           0 :         double capacity = nv_cache->chunk_blocks;
    2369           0 :         double used = chunk->md->blocks_written + chunk->md->blocks_skipped;
    2370             : 
    2371           0 :         return used / capacity;
    2372             : }
    2373             : 
    2374             : static const char *
    2375           0 : ftl_nv_cache_get_chunk_state_name(struct ftl_nv_cache_chunk *chunk)
    2376             : {
    2377             :         static const char *names[] = {
    2378             :                 "FREE", "OPEN", "CLOSED",
    2379             :         };
    2380             : 
    2381           0 :         assert(chunk->md->state < SPDK_COUNTOF(names));
    2382           0 :         if (chunk->md->state < SPDK_COUNTOF(names)) {
    2383           0 :                 return names[chunk->md->state];
    2384             :         } else {
    2385           0 :                 assert(false);
    2386             :                 return "?";
    2387             :         }
    2388             : }
    2389             : 
    2390             : static void
    2391           0 : ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, const struct ftl_property *property,
    2392             :                             struct spdk_json_write_ctx *w)
    2393             : {
    2394             :         uint64_t i;
    2395             :         struct ftl_nv_cache_chunk *chunk;
    2396             : 
    2397           0 :         spdk_json_write_named_string(w, "type", dev->nv_cache.nvc_desc->name);
    2398           0 :         spdk_json_write_named_array_begin(w, "chunks");
    2399           0 :         for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) {
    2400           0 :                 spdk_json_write_object_begin(w);
    2401           0 :                 spdk_json_write_named_uint64(w, "id", i);
    2402           0 :                 spdk_json_write_named_string(w, "state", ftl_nv_cache_get_chunk_state_name(chunk));
    2403           0 :                 spdk_json_write_named_double(w, "utilization",
    2404             :                                              ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk));
    2405           0 :                 spdk_json_write_object_end(w);
    2406             :         }
    2407           0 :         spdk_json_write_array_end(w);
    2408           0 : }

Generated by: LCOV version 1.15