LCOV - code coverage report
Current view: top level - lib/ftl - ftl_nv_cache.h (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 0 3 0.0 %
Date: 2024-12-16 00:20:33 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright 2023 Solidigm All Rights Reserved
       3             :  *   Copyright (C) 2022 Intel Corporation.
       4             :  *   All rights reserved.
       5             :  */
       6             : 
       7             : #ifndef FTL_NV_CACHE_H
       8             : #define FTL_NV_CACHE_H
       9             : 
      10             : #include "spdk/stdinc.h"
      11             : #include "spdk/crc32.h"
      12             : 
      13             : #include "ftl_io.h"
      14             : #include "ftl_utils.h"
      15             : #include "ftl_internal.h"
      16             : #include "nvc/ftl_nvc_dev.h"
      17             : 
      18             : /*
      19             :  * FTL non volatile cache is divided into groups of blocks called chunks.
      20             :  * Size of each chunk is multiple of xfer size plus additional metadata.
      21             :  * For each block associated lba is stored in metadata. Cache space is
      22             :  * written chunk by chunk sequentially. When number of free chunks reaches
      23             :  * some threshold oldest chunks are moved from cache to backend storage to
      24             :  * create space for new user data.
      25             :  */
      26             : 
      27             : #define FTL_NVC_VERSION_0       0
      28             : #define FTL_NVC_VERSION_1       1
      29             : #define FTL_NVC_VERSION_2       2
      30             : 
      31             : #define FTL_NVC_VERSION_CURRENT FTL_NVC_VERSION_2
      32             : 
      33             : #define FTL_NV_CACHE_NUM_COMPACTORS 8
      34             : 
      35             : /*
      36             :  * Parameters controlling nv cache write throttling.
      37             :  *
      38             :  * The write throttle limit value is calculated as follows:
      39             :  * limit = compaction_average_bw * (1.0 + modifier)
      40             :  *
      41             :  * The modifier depends on the number of free chunks vs the configured threshold. Its value is
      42             :  * zero if the number of free chunks is at the threshold, negative if below and positive if above.
      43             :  */
      44             : 
      45             : /* Interval in milliseconds between write throttle updates. */
      46             : #define FTL_NV_CACHE_THROTTLE_INTERVAL_MS       20
      47             : /* Throttle modifier proportional gain */
      48             : #define FTL_NV_CACHE_THROTTLE_MODIFIER_KP       20
      49             : /* Min and max modifier values */
      50             : #define FTL_NV_CACHE_THROTTLE_MODIFIER_MIN      -0.8
      51             : #define FTL_NV_CACHE_THROTTLE_MODIFIER_MAX      0.5
      52             : 
      53             : struct ftl_nvcache_restore;
      54             : typedef void (*ftl_nv_cache_restore_fn)(struct ftl_nvcache_restore *, int, void *cb_arg);
      55             : 
      56             : enum ftl_chunk_state {
      57             :         FTL_CHUNK_STATE_FREE,
      58             :         FTL_CHUNK_STATE_OPEN,
      59             :         FTL_CHUNK_STATE_CLOSED,
      60             :         FTL_CHUNK_STATE_INACTIVE,
      61             :         FTL_CHUNK_STATE_MAX
      62             : };
      63             : 
      64             : struct ftl_nv_cache_chunk_md {
      65             :         /* Chunk metadata version */
      66             :         uint64_t version;
      67             : 
      68             :         /* Sequence id of writing */
      69             :         uint64_t seq_id;
      70             : 
      71             :         /* Sequence ID when chunk was closed */
      72             :         uint64_t close_seq_id;
      73             : 
      74             :         /* Current lba to write */
      75             :         uint32_t write_pointer;
      76             : 
      77             :         /* Number of blocks written */
      78             :         uint32_t blocks_written;
      79             : 
      80             :         /* Number of skipped block (case when IO size is greater than blocks left in chunk) */
      81             :         uint32_t blocks_skipped;
      82             : 
      83             :         /* Next block to be compacted */
      84             :         uint32_t read_pointer;
      85             : 
      86             :         /* Number of compacted (both valid and invalid) blocks */
      87             :         uint32_t blocks_compacted;
      88             : 
      89             :         /* Chunk state */
      90             :         enum ftl_chunk_state state;
      91             : 
      92             :         /* CRC32 checksum of the associated P2L map when chunk is in closed state */
      93             :         uint32_t p2l_map_checksum;
      94             : 
      95             :         /* P2L IO log type */
      96             :         enum ftl_layout_region_type p2l_log_type;
      97             : 
      98             :         /* Reserved */
      99             :         uint8_t reserved[4040];
     100             : } __attribute__((packed));
     101             : 
     102             : SPDK_STATIC_ASSERT(sizeof(struct ftl_nv_cache_chunk_md) == FTL_BLOCK_SIZE,
     103             :                    "FTL NV Chunk metadata size is invalid");
     104             : 
     105             : struct ftl_nv_cache_chunk {
     106             :         struct ftl_nv_cache *nv_cache;
     107             : 
     108             :         struct ftl_nv_cache_chunk_md *md;
     109             : 
     110             :         /* Offset from start lba of the cache */
     111             :         uint64_t offset;
     112             : 
     113             :         /* P2L map */
     114             :         struct ftl_p2l_map p2l_map;
     115             : 
     116             :         /* Metadata request */
     117             :         struct ftl_basic_rq metadata_rq;
     118             : 
     119             :         TAILQ_ENTRY(ftl_nv_cache_chunk) entry;
     120             : 
     121             :         /* This flag is used to indicate chunk is used in recovery */
     122             :         bool recovery;
     123             : 
     124             :         /* Compaction start time */
     125             :         uint64_t compaction_start_tsc;
     126             : 
     127             :         /* Compaction duration */
     128             :         uint64_t compaction_length_tsc;
     129             : 
     130             :         /* For writing metadata */
     131             :         struct ftl_md_io_entry_ctx md_persist_entry_ctx;
     132             : 
     133             :         /* P2L Log for IOs */
     134             :         struct ftl_p2l_log *p2l_log;
     135             : };
     136             : 
     137             : struct ftl_nv_cache_compactor {
     138             :         struct ftl_nv_cache *nv_cache;
     139             :         struct ftl_rq *rq;
     140             :         TAILQ_ENTRY(ftl_nv_cache_compactor) entry;
     141             :         struct spdk_bdev_io_wait_entry bdev_io_wait;
     142             : };
     143             : 
     144             : struct ftl_nv_cache {
     145             :         /* Flag indicating halt request */
     146             :         bool halt;
     147             : 
     148             :         /* NV cache device type */
     149             :         const struct ftl_nv_cache_device_type *nvc_type;
     150             : 
     151             :         /* Write buffer cache bdev */
     152             :         struct spdk_bdev_desc *bdev_desc;
     153             : 
     154             :         /* Persistent cache IO channel */
     155             :         struct spdk_io_channel *cache_ioch;
     156             : 
     157             :         /* Metadata pool */
     158             :         struct ftl_mempool *md_pool;
     159             : 
     160             :         /* P2L map memory pool */
     161             :         struct ftl_mempool *p2l_pool;
     162             : 
     163             :         /* Chunk md memory pool */
     164             :         struct ftl_mempool *chunk_md_pool;
     165             : 
     166             :         /* Chunk md memory pool for freeing chunks */
     167             :         struct ftl_mempool *free_chunk_md_pool;
     168             : 
     169             :         /* Block Metadata size */
     170             :         uint64_t md_size;
     171             : 
     172             :         /* NV cache metadata object handle */
     173             :         struct ftl_md *md;
     174             : 
     175             :         /* Number of blocks in chunk */
     176             :         uint64_t chunk_blocks;
     177             : 
     178             :         /* Number of blocks in tail md per chunk */
     179             :         uint64_t tail_md_chunk_blocks;
     180             : 
     181             :         /* Number of chunks */
     182             :         uint64_t chunk_count;
     183             : 
     184             :         /* Current processed chunk */
     185             :         struct ftl_nv_cache_chunk *chunk_current;
     186             : 
     187             :         /* Free chunks list */
     188             :         TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_free_list;
     189             :         uint64_t chunk_free_count;
     190             : 
     191             :         /* Open chunks list */
     192             :         TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_open_list;
     193             :         uint64_t chunk_open_count;
     194             : 
     195             :         /* Full chunks list */
     196             :         TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_full_list;
     197             :         uint64_t chunk_full_count;
     198             : 
     199             :         /* Chunks being compacted */
     200             :         TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_comp_list;
     201             :         uint64_t chunk_comp_count;
     202             : 
     203             :         /* Chunks being freed */
     204             :         TAILQ_HEAD(, ftl_nv_cache_chunk) needs_free_persist_list;
     205             :         uint64_t chunk_free_persist_count;
     206             : 
     207             :         /* Chunks which are inactive */
     208             :         TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_inactive_list;
     209             :         uint64_t chunk_inactive_count;
     210             : 
     211             :         TAILQ_HEAD(, ftl_nv_cache_compactor) compactor_list;
     212             :         uint64_t compaction_active_count;
     213             :         uint64_t chunk_compaction_threshold;
     214             : 
     215             :         struct ftl_nv_cache_chunk *chunks;
     216             : 
     217             :         uint64_t last_seq_id;
     218             : 
     219             :         uint64_t chunk_free_target;
     220             : 
     221             :         /* Simple moving average of recent compaction velocity values */
     222             :         double compaction_sma;
     223             : 
     224             : #define FTL_NV_CACHE_COMPACTION_SMA_N (FTL_NV_CACHE_NUM_COMPACTORS * 2)
     225             :         /* Circular buffer holding values for calculating compaction SMA */
     226             :         struct compaction_bw_stats {
     227             :                 double buf[FTL_NV_CACHE_COMPACTION_SMA_N];
     228             :                 ptrdiff_t first;
     229             :                 size_t count;
     230             :                 double sum;
     231             :         } compaction_recent_bw;
     232             : 
     233             :         struct {
     234             :                 uint64_t interval_tsc;
     235             :                 uint64_t start_tsc;
     236             :                 uint64_t blocks_submitted;
     237             :                 uint64_t blocks_submitted_limit;
     238             :         } throttle;
     239             : };
     240             : 
     241             : typedef void (*nvc_scrub_cb)(struct spdk_ftl_dev *dev, void *cb_ctx, int status);
     242             : 
     243             : void ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx);
     244             : 
     245             : int ftl_nv_cache_init(struct spdk_ftl_dev *dev);
     246             : void ftl_nv_cache_deinit(struct spdk_ftl_dev *dev);
     247             : bool ftl_nv_cache_write(struct ftl_io *io);
     248             : void ftl_nv_cache_write_complete(struct ftl_io *io, bool success);
     249             : void ftl_nv_cache_fill_md(struct ftl_io *io);
     250             : int ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
     251             :                       spdk_bdev_io_completion_cb cb, void *cb_arg);
     252             : bool ftl_nv_cache_throttle(struct spdk_ftl_dev *dev);
     253             : void ftl_nv_cache_process(struct spdk_ftl_dev *dev);
     254             : 
     255             : void ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
     256             :                            uint64_t offset, uint64_t lba);
     257             : uint64_t ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset);
     258             : 
     259             : void ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr);
     260             : 
     261             : void ftl_nv_cache_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr);
     262             : 
     263             : int ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache);
     264             : 
     265             : int ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache);
     266             : 
     267             : void ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache);
     268             : 
     269             : int ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache);
     270             : 
     271             : static inline void
     272           0 : ftl_nv_cache_resume(struct ftl_nv_cache *nv_cache)
     273             : {
     274           0 :         nv_cache->halt = false;
     275           0 : }
     276             : 
     277             : bool ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache);
     278             : 
     279             : size_t ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache);
     280             : 
     281             : uint64_t chunk_tail_md_offset(struct ftl_nv_cache *nv_cache);
     282             : /**
     283             :  * @brief Iterates over NV caches chunks and returns the max open and closed sequence id
     284             :  *
     285             :  * @param nv_cache FLT NV cache
     286             :  * @param[out] open_seq_id Max detected open sequence id
     287             :  * @param[out] close_seq_id Max detected close sequence id
     288             :  */
     289             : void ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
     290             :                                  uint64_t *close_seq_id);
     291             : 
     292             : void ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
     293             : 
     294             : void ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
     295             : 
     296             : typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx);
     297             : 
     298             : void ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
     299             :                                    ftl_chunk_md_cb cb, void *cb_ctx);
     300             : 
     301             : struct ftl_nv_cache_chunk *ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev,
     302             :                 ftl_addr addr);
     303             : 
     304             : uint64_t ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache);
     305             : 
     306             : void ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md);
     307             : 
     308             : #endif  /* FTL_NV_CACHE_H */

Generated by: LCOV version 1.15