Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2022 Intel Corporation.
3 : * All rights reserved.
4 : */
5 :
6 : #include "spdk/stdinc.h"
7 : #include "spdk/cpuset.h"
8 : #include "spdk/queue.h"
9 : #include "spdk/thread.h"
10 : #include "spdk/event.h"
11 : #include "spdk/ftl.h"
12 : #include "spdk/conf.h"
13 : #include "spdk/env.h"
14 : #include "spdk/util.h"
15 :
16 : #include "ftl_core.h"
17 : #include "ftl_l2p_cache.h"
18 : #include "ftl_layout.h"
19 : #include "ftl_nv_cache_io.h"
20 : #include "mngt/ftl_mngt_steps.h"
21 : #include "utils/ftl_defs.h"
22 : #include "utils/ftl_addr_utils.h"
23 :
24 : struct ftl_l2p_cache_page_io_ctx {
25 : struct ftl_l2p_cache *cache;
26 : uint64_t updates;
27 : spdk_bdev_io_completion_cb cb;
28 : struct spdk_bdev_io_wait_entry bdev_io_wait;
29 : };
30 :
31 : enum ftl_l2p_page_state {
32 : L2P_CACHE_PAGE_INIT, /* Page in memory not initialized from disk page */
33 : L2P_CACHE_PAGE_READY, /* Page initialized from disk */
34 : L2P_CACHE_PAGE_FLUSHING, /* Page is being flushed to disk and removed from memory */
35 : L2P_CACHE_PAGE_PERSISTING, /* Page is being flushed to disk and not removed from memory */
36 : L2P_CACHE_PAGE_CLEARING, /* Page is being initialized with INVALID addresses */
37 : L2P_CACHE_PAGE_CORRUPTED /* Page corrupted */
38 : };
39 :
40 : struct ftl_l2p_page {
41 : uint64_t updates; /* Number of times an L2P entry was updated in the page since it was last persisted */
42 : TAILQ_HEAD(, ftl_l2p_page_wait_ctx) ppe_list; /* for deferred pins */
43 : TAILQ_ENTRY(ftl_l2p_page) list_entry;
44 : uint64_t page_no;
45 : enum ftl_l2p_page_state state;
46 : uint64_t pin_ref_cnt;
47 : struct ftl_l2p_cache_page_io_ctx ctx;
48 : bool on_lru_list;
49 : void *page_buffer;
50 : uint64_t ckpt_seq_id;
51 : ftl_df_obj_id obj_id;
52 : };
53 :
54 : struct ftl_l2p_page_set;
55 :
56 : struct ftl_l2p_page_wait_ctx {
57 : uint16_t pg_pin_issued;
58 : uint16_t pg_pin_completed;
59 : struct ftl_l2p_page_set *parent;
60 : uint64_t pg_no;
61 : TAILQ_ENTRY(ftl_l2p_page_wait_ctx) list_entry;
62 : };
63 :
64 : /* A L2P page contains 1024 4B entries (or 512 8B ones for big drives).
65 : * Currently internal IO will only pin 1 LBA at a time, so only one entry should be needed.
66 : * User IO is split on internal xfer_size boundaries, which is currently set to 1MiB (256 blocks),
67 : * so one entry should also be enough.
68 : * TODO: We should probably revisit this though, when/if the xfer_size is based on io requirements of the
69 : * bottom device (e.g. RAID5F), since then big IOs (especially unaligned ones) could potentially break this.
70 : */
71 : #define L2P_MAX_PAGES_TO_PIN 4
72 : struct ftl_l2p_page_set {
73 : uint16_t to_pin_cnt;
74 : uint16_t pinned_cnt;
75 : uint16_t pin_fault_cnt;
76 : uint8_t locked;
77 : uint8_t deferred;
78 : struct ftl_l2p_pin_ctx *pin_ctx;
79 : TAILQ_ENTRY(ftl_l2p_page_set) list_entry;
80 : struct ftl_l2p_page_wait_ctx entry[L2P_MAX_PAGES_TO_PIN];
81 : };
82 :
83 : struct ftl_l2p_l1_map_entry {
84 : ftl_df_obj_id page_obj_id;
85 : };
86 :
87 : enum ftl_l2p_cache_state {
88 : L2P_CACHE_INIT,
89 : L2P_CACHE_RUNNING,
90 : L2P_CACHE_IN_SHUTDOWN,
91 : L2P_CACHE_SHUTDOWN_DONE,
92 : };
93 :
94 : struct ftl_l2p_cache_process_ctx {
95 : int status;
96 : ftl_l2p_cb cb;
97 : void *cb_ctx;
98 : uint64_t idx;
99 : uint64_t qd;
100 : };
101 :
102 : struct ftl_l2p_cache {
103 : struct spdk_ftl_dev *dev;
104 : struct ftl_l2p_l1_map_entry *l2_mapping;
105 : struct ftl_md *l2_md;
106 : struct ftl_md *l2_ctx_md;
107 : struct ftl_mempool *l2_ctx_pool;
108 : struct ftl_md *l1_md;
109 :
110 : TAILQ_HEAD(l2p_lru_list, ftl_l2p_page) lru_list;
111 : /* TODO: A lot of / and % operations are done on this value, consider adding a shift based field and calculactions instead */
112 : uint64_t lbas_in_page;
113 : uint64_t num_pages; /* num pages to hold the entire L2P */
114 :
115 : uint64_t ios_in_flight; /* Currently in flight IOs, to determine l2p shutdown readiness */
116 : enum ftl_l2p_cache_state state;
117 : uint32_t l2_pgs_avail;
118 : uint32_t l2_pgs_evicting;
119 : uint32_t l2_pgs_resident_max;
120 : uint32_t evict_keep;
121 : struct ftl_mempool *page_sets_pool;
122 : TAILQ_HEAD(, ftl_l2p_page_set) deferred_page_set_list; /* for deferred page sets */
123 :
124 : /* Process trim in background */
125 : struct {
126 : #define FTL_L2P_MAX_LAZY_TRIM_QD 1
127 : /* Trim queue depth */
128 : uint32_t qd;
129 : /* Currently processed page */
130 : uint64_t page_no;
131 : /* Context for page pinning */
132 : struct ftl_l2p_pin_ctx pin_ctx;
133 : } lazy_trim;
134 :
135 : /* This is a context for a management process */
136 : struct ftl_l2p_cache_process_ctx mctx;
137 :
138 : /* MD layout cache: Offset on a device in FTL_BLOCK_SIZE unit */
139 : uint64_t cache_layout_offset;
140 :
141 : /* MD layout cache: Device of region */
142 : struct spdk_bdev_desc *cache_layout_bdev_desc;
143 :
144 : /* MD layout cache: IO channel of region */
145 : struct spdk_io_channel *cache_layout_ioch;
146 : };
147 :
148 : typedef void (*ftl_l2p_cache_clear_cb)(struct ftl_l2p_cache *cache, int status, void *ctx_page);
149 : typedef void (*ftl_l2p_cache_persist_cb)(struct ftl_l2p_cache *cache, int status, void *ctx_page);
150 : typedef void (*ftl_l2p_cache_sync_cb)(struct spdk_ftl_dev *dev, int status, void *page,
151 : void *user_ctx);
152 :
153 : static bool page_set_is_done(struct ftl_l2p_page_set *page_set);
154 : static void page_set_end(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
155 : struct ftl_l2p_page_set *page_set);
156 : static void page_out_io_retry(void *arg);
157 : static void page_in_io_retry(void *arg);
158 :
159 : static inline void
160 0 : ftl_l2p_page_queue_wait_ctx(struct ftl_l2p_page *page,
161 : struct ftl_l2p_page_wait_ctx *ppe)
162 : {
163 0 : TAILQ_INSERT_TAIL(&page->ppe_list, ppe, list_entry);
164 0 : }
165 :
166 : static inline uint64_t
167 0 : ftl_l2p_cache_get_l1_page_size(void)
168 : {
169 0 : return 1UL << 12;
170 : }
171 :
172 : static inline uint64_t
173 0 : ftl_l2p_cache_get_lbas_in_page(struct ftl_l2p_cache *cache)
174 : {
175 0 : return cache->lbas_in_page;
176 : }
177 :
178 : static inline size_t
179 0 : ftl_l2p_cache_get_page_all_size(void)
180 : {
181 0 : return sizeof(struct ftl_l2p_page) + ftl_l2p_cache_get_l1_page_size();
182 : }
183 :
184 : static void
185 0 : ftl_l2p_cache_lru_remove_page(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
186 : {
187 0 : assert(page);
188 0 : assert(page->on_lru_list);
189 :
190 0 : TAILQ_REMOVE(&cache->lru_list, page, list_entry);
191 0 : page->on_lru_list = false;
192 0 : }
193 :
194 : static void
195 0 : ftl_l2p_cache_lru_add_page(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
196 : {
197 0 : assert(page);
198 0 : assert(!page->on_lru_list);
199 :
200 0 : TAILQ_INSERT_HEAD(&cache->lru_list, page, list_entry);
201 :
202 0 : page->on_lru_list = true;
203 0 : }
204 :
205 : static void
206 0 : ftl_l2p_cache_lru_promote_page(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
207 : {
208 0 : if (!page->on_lru_list) {
209 0 : return;
210 : }
211 :
212 0 : ftl_l2p_cache_lru_remove_page(cache, page);
213 0 : ftl_l2p_cache_lru_add_page(cache, page);
214 0 : }
215 :
216 : static inline void
217 0 : ftl_l2p_cache_page_insert(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
218 : {
219 0 : struct ftl_l2p_l1_map_entry *me = cache->l2_mapping;
220 0 : assert(me);
221 :
222 0 : assert(me[page->page_no].page_obj_id == FTL_DF_OBJ_ID_INVALID);
223 0 : me[page->page_no].page_obj_id = page->obj_id;
224 0 : }
225 :
226 : static void
227 0 : ftl_l2p_cache_page_remove(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
228 : {
229 0 : struct ftl_l2p_l1_map_entry *me = cache->l2_mapping;
230 0 : assert(me);
231 0 : assert(me[page->page_no].page_obj_id != FTL_DF_OBJ_ID_INVALID);
232 0 : assert(TAILQ_EMPTY(&page->ppe_list));
233 :
234 0 : me[page->page_no].page_obj_id = FTL_DF_OBJ_ID_INVALID;
235 0 : cache->l2_pgs_avail++;
236 0 : ftl_mempool_put(cache->l2_ctx_pool, page);
237 0 : }
238 :
239 : static inline struct ftl_l2p_page *
240 0 : ftl_l2p_cache_get_coldest_page(struct ftl_l2p_cache *cache)
241 : {
242 0 : return TAILQ_LAST(&cache->lru_list, l2p_lru_list);
243 : }
244 :
245 : static inline struct ftl_l2p_page *
246 0 : ftl_l2p_cache_get_hotter_page(struct ftl_l2p_page *page)
247 : {
248 0 : return TAILQ_PREV(page, l2p_lru_list, list_entry);
249 : }
250 :
251 : static inline uint64_t
252 0 : ftl_l2p_cache_page_get_bdev_offset(struct ftl_l2p_cache *cache,
253 : struct ftl_l2p_page *page)
254 : {
255 0 : return cache->cache_layout_offset + page->page_no;
256 : }
257 :
258 : static inline struct spdk_bdev_desc *
259 0 : ftl_l2p_cache_get_bdev_desc(struct ftl_l2p_cache *cache)
260 : {
261 0 : return cache->cache_layout_bdev_desc;
262 : }
263 :
264 : static inline struct spdk_io_channel *
265 0 : ftl_l2p_cache_get_bdev_iochannel(struct ftl_l2p_cache *cache)
266 : {
267 0 : return cache->cache_layout_ioch;
268 : }
269 :
270 : static struct ftl_l2p_page *
271 0 : ftl_l2p_cache_page_alloc(struct ftl_l2p_cache *cache, size_t page_no)
272 : {
273 0 : struct ftl_l2p_page *page = ftl_mempool_get(cache->l2_ctx_pool);
274 0 : ftl_bug(!page);
275 :
276 0 : cache->l2_pgs_avail--;
277 :
278 0 : memset(page, 0, sizeof(*page));
279 :
280 0 : page->obj_id = ftl_mempool_get_df_obj_id(cache->l2_ctx_pool, page);
281 :
282 0 : page->page_buffer = (char *)ftl_md_get_buffer(cache->l1_md) + ftl_mempool_get_df_obj_index(
283 0 : cache->l2_ctx_pool, page) * FTL_BLOCK_SIZE;
284 :
285 0 : TAILQ_INIT(&page->ppe_list);
286 :
287 0 : page->page_no = page_no;
288 0 : page->state = L2P_CACHE_PAGE_INIT;
289 :
290 0 : return page;
291 0 : }
292 :
293 : static inline bool
294 0 : ftl_l2p_cache_page_can_remove(struct ftl_l2p_page *page)
295 : {
296 0 : return (!page->updates &&
297 0 : page->state != L2P_CACHE_PAGE_INIT &&
298 0 : !page->pin_ref_cnt);
299 : }
300 :
301 : static inline ftl_addr
302 0 : ftl_l2p_cache_get_addr(struct spdk_ftl_dev *dev,
303 : struct ftl_l2p_cache *cache, struct ftl_l2p_page *page, uint64_t lba)
304 : {
305 0 : return ftl_addr_load(dev, page->page_buffer, lba % cache->lbas_in_page);
306 : }
307 :
308 : static inline void
309 0 : ftl_l2p_cache_set_addr(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
310 : struct ftl_l2p_page *page, uint64_t lba, ftl_addr addr)
311 : {
312 0 : ftl_addr_store(dev, page->page_buffer, lba % cache->lbas_in_page, addr);
313 0 : }
314 :
315 : static void
316 0 : ftl_l2p_page_set_invalid(struct spdk_ftl_dev *dev, struct ftl_l2p_page *page)
317 : {
318 0 : ftl_addr addr;
319 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
320 0 : uint64_t naddr;
321 :
322 0 : page->updates++;
323 :
324 0 : naddr = ftl_l2p_cache_get_lbas_in_page(cache);
325 0 : for (uint64_t i = 0; i < naddr; i++) {
326 0 : addr = ftl_addr_load(dev, page->page_buffer, i);
327 0 : if (addr == FTL_ADDR_INVALID) {
328 0 : continue;
329 : }
330 :
331 0 : ftl_invalidate_addr(dev, addr);
332 0 : ftl_l2p_cache_set_addr(dev, cache, page, i, FTL_ADDR_INVALID);
333 0 : }
334 0 : }
335 :
336 : static inline void
337 0 : ftl_l2p_cache_page_pin(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
338 : {
339 0 : page->pin_ref_cnt++;
340 : /* Pinned pages can't be evicted (since L2P sets/gets will be executed on it), so remove them from LRU */
341 0 : if (page->on_lru_list) {
342 0 : ftl_l2p_cache_lru_remove_page(cache, page);
343 0 : }
344 0 : }
345 :
346 : static inline void
347 0 : ftl_l2p_cache_page_unpin(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
348 : {
349 0 : page->pin_ref_cnt--;
350 0 : if (!page->pin_ref_cnt && !page->on_lru_list && page->state != L2P_CACHE_PAGE_FLUSHING) {
351 : /* L2P_CACHE_PAGE_FLUSHING: the page is currently being evicted.
352 : * In such a case, the page can't be returned to the rank list, because
353 : * the ongoing eviction will remove it if no pg updates had happened.
354 : * Moreover, the page could make it to the top of the rank list and be
355 : * selected for another eviction, while the ongoing one did not finish yet.
356 : *
357 : * Depending on the page updates tracker, the page will be evicted
358 : * or returned to the rank list in context of the eviction completion
359 : * cb - see page_out_io_complete().
360 : */
361 0 : ftl_l2p_cache_lru_add_page(cache, page);
362 0 : }
363 0 : }
364 :
365 : static inline bool
366 0 : ftl_l2p_cache_page_can_evict(struct ftl_l2p_page *page)
367 : {
368 0 : return (page->state == L2P_CACHE_PAGE_FLUSHING ||
369 0 : page->state == L2P_CACHE_PAGE_PERSISTING ||
370 0 : page->state == L2P_CACHE_PAGE_INIT ||
371 0 : page->pin_ref_cnt) ? false : true;
372 : }
373 :
374 : static bool
375 0 : ftl_l2p_cache_evict_continue(struct ftl_l2p_cache *cache)
376 : {
377 0 : return cache->l2_pgs_avail + cache->l2_pgs_evicting < cache->evict_keep;
378 : }
379 :
380 : static void *
381 0 : _ftl_l2p_cache_init(struct spdk_ftl_dev *dev, size_t addr_size, uint64_t l2p_size)
382 : {
383 0 : struct ftl_l2p_cache *cache;
384 0 : uint64_t l2_pages = spdk_divide_round_up(l2p_size, ftl_l2p_cache_get_l1_page_size());
385 0 : size_t l2_size = l2_pages * sizeof(struct ftl_l2p_l1_map_entry);
386 :
387 0 : cache = calloc(1, sizeof(struct ftl_l2p_cache));
388 0 : if (cache == NULL) {
389 0 : return NULL;
390 : }
391 0 : cache->dev = dev;
392 :
393 0 : cache->l2_md = ftl_md_create(dev,
394 0 : spdk_divide_round_up(l2_size, FTL_BLOCK_SIZE), 0,
395 : FTL_L2P_CACHE_MD_NAME_L2,
396 0 : ftl_md_create_shm_flags(dev), NULL);
397 :
398 0 : if (cache->l2_md == NULL) {
399 0 : goto fail_l2_md;
400 : }
401 0 : cache->l2_mapping = ftl_md_get_buffer(cache->l2_md);
402 :
403 0 : cache->lbas_in_page = dev->layout.l2p.lbas_in_page;
404 0 : cache->num_pages = l2_pages;
405 :
406 0 : return cache;
407 : fail_l2_md:
408 0 : free(cache);
409 0 : return NULL;
410 0 : }
411 :
412 : static struct ftl_l2p_page *
413 0 : get_l2p_page_by_df_id(struct ftl_l2p_cache *cache, size_t page_no)
414 : {
415 0 : struct ftl_l2p_l1_map_entry *me = cache->l2_mapping;
416 0 : ftl_df_obj_id obj_id = me[page_no].page_obj_id;
417 :
418 0 : if (obj_id != FTL_DF_OBJ_ID_INVALID) {
419 0 : return ftl_mempool_get_df_ptr(cache->l2_ctx_pool, obj_id);
420 : }
421 :
422 0 : return NULL;
423 0 : }
424 :
425 : int
426 0 : ftl_l2p_cache_init(struct spdk_ftl_dev *dev)
427 : {
428 0 : uint64_t l2p_size = dev->num_lbas * dev->layout.l2p.addr_size;
429 0 : struct ftl_l2p_cache *cache;
430 0 : const struct ftl_layout_region *reg;
431 0 : void *l2p = _ftl_l2p_cache_init(dev, dev->layout.l2p.addr_size, l2p_size);
432 0 : size_t page_sets_pool_size = 1 << 15;
433 0 : size_t max_resident_size, max_resident_pgs;
434 :
435 0 : if (!l2p) {
436 0 : return -1;
437 : }
438 0 : dev->l2p = l2p;
439 :
440 0 : cache = (struct ftl_l2p_cache *)dev->l2p;
441 0 : cache->page_sets_pool = ftl_mempool_create(page_sets_pool_size,
442 : sizeof(struct ftl_l2p_page_set),
443 : 64, SPDK_ENV_NUMA_ID_ANY);
444 0 : if (!cache->page_sets_pool) {
445 0 : return -1;
446 : }
447 :
448 0 : max_resident_size = dev->conf.l2p_dram_limit << 20;
449 0 : max_resident_pgs = max_resident_size / ftl_l2p_cache_get_page_all_size();
450 :
451 0 : if (max_resident_pgs > cache->num_pages) {
452 0 : SPDK_NOTICELOG("l2p memory limit higher than entire L2P size\n");
453 0 : max_resident_pgs = cache->num_pages;
454 0 : }
455 :
456 : /* Round down max res pgs to the nearest # of l2/l1 pgs */
457 0 : max_resident_size = max_resident_pgs * ftl_l2p_cache_get_page_all_size();
458 0 : SPDK_NOTICELOG("l2p maximum resident size is: %"PRIu64" (of %"PRIu64") MiB\n",
459 : max_resident_size >> 20, dev->conf.l2p_dram_limit);
460 :
461 0 : TAILQ_INIT(&cache->deferred_page_set_list);
462 0 : TAILQ_INIT(&cache->lru_list);
463 :
464 0 : cache->l2_ctx_md = ftl_md_create(dev,
465 0 : spdk_divide_round_up(max_resident_pgs * SPDK_ALIGN_CEIL(sizeof(struct ftl_l2p_page), 64),
466 0 : FTL_BLOCK_SIZE), 0, FTL_L2P_CACHE_MD_NAME_L2_CTX, ftl_md_create_shm_flags(dev), NULL);
467 :
468 0 : if (cache->l2_ctx_md == NULL) {
469 0 : return -1;
470 : }
471 :
472 0 : cache->l2_pgs_resident_max = max_resident_pgs;
473 0 : cache->l2_pgs_avail = max_resident_pgs;
474 0 : cache->l2_pgs_evicting = 0;
475 0 : cache->l2_ctx_pool = ftl_mempool_create_ext(ftl_md_get_buffer(cache->l2_ctx_md),
476 0 : max_resident_pgs, sizeof(struct ftl_l2p_page), 64);
477 :
478 0 : if (cache->l2_ctx_pool == NULL) {
479 0 : return -1;
480 : }
481 :
482 : #define FTL_L2P_CACHE_PAGE_AVAIL_MAX 16UL << 10
483 : #define FTL_L2P_CACHE_PAGE_AVAIL_RATIO 5UL
484 0 : cache->evict_keep = spdk_divide_round_up(cache->num_pages * FTL_L2P_CACHE_PAGE_AVAIL_RATIO, 100);
485 0 : cache->evict_keep = spdk_min(FTL_L2P_CACHE_PAGE_AVAIL_MAX, cache->evict_keep);
486 :
487 0 : if (!ftl_fast_startup(dev) && !ftl_fast_recovery(dev)) {
488 0 : memset(cache->l2_mapping, (int)FTL_DF_OBJ_ID_INVALID, ftl_md_get_buffer_size(cache->l2_md));
489 0 : ftl_mempool_initialize_ext(cache->l2_ctx_pool);
490 0 : }
491 :
492 0 : cache->l1_md = ftl_md_create(dev,
493 0 : max_resident_pgs, 0,
494 : FTL_L2P_CACHE_MD_NAME_L1,
495 0 : ftl_md_create_shm_flags(dev), NULL);
496 :
497 0 : if (cache->l1_md == NULL) {
498 0 : return -1;
499 : }
500 :
501 : /* Cache MD layout */
502 0 : reg = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_L2P];
503 0 : cache->cache_layout_offset = reg->current.offset;
504 0 : cache->cache_layout_bdev_desc = reg->bdev_desc;
505 0 : cache->cache_layout_ioch = reg->ioch;
506 :
507 0 : return 0;
508 0 : }
509 :
510 : static void
511 0 : ftl_l2p_cache_deinit_l2(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache)
512 : {
513 0 : ftl_md_destroy(cache->l2_ctx_md, ftl_md_destroy_shm_flags(dev));
514 0 : cache->l2_ctx_md = NULL;
515 :
516 0 : ftl_mempool_destroy_ext(cache->l2_ctx_pool);
517 0 : cache->l2_ctx_pool = NULL;
518 :
519 0 : ftl_md_destroy(cache->l1_md, ftl_md_destroy_shm_flags(dev));
520 0 : cache->l1_md = NULL;
521 :
522 0 : ftl_mempool_destroy(cache->page_sets_pool);
523 0 : cache->page_sets_pool = NULL;
524 0 : }
525 :
526 : static void
527 0 : _ftl_l2p_cache_deinit(struct spdk_ftl_dev *dev)
528 : {
529 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
530 :
531 0 : ftl_l2p_cache_deinit_l2(dev, cache);
532 0 : ftl_md_destroy(cache->l2_md, ftl_md_destroy_shm_flags(dev));
533 0 : free(cache);
534 0 : }
535 :
536 : void
537 0 : ftl_l2p_cache_deinit(struct spdk_ftl_dev *dev)
538 : {
539 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
540 :
541 0 : if (!cache) {
542 0 : return;
543 : }
544 0 : assert(cache->state == L2P_CACHE_SHUTDOWN_DONE || cache->state == L2P_CACHE_INIT);
545 :
546 0 : _ftl_l2p_cache_deinit(dev);
547 0 : dev->l2p = 0;
548 0 : }
549 :
550 : static void
551 0 : process_init_ctx(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
552 : ftl_l2p_cb cb, void *cb_ctx)
553 : {
554 0 : struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;
555 :
556 0 : assert(NULL == ctx->cb_ctx);
557 0 : assert(0 == cache->l2_pgs_evicting);
558 :
559 0 : memset(ctx, 0, sizeof(*ctx));
560 :
561 0 : ctx->cb = cb;
562 0 : ctx->cb_ctx = cb_ctx;
563 0 : }
564 :
565 : static void
566 0 : process_finish(struct ftl_l2p_cache *cache)
567 : {
568 0 : struct ftl_l2p_cache_process_ctx ctx = cache->mctx;
569 :
570 0 : assert(cache->l2_pgs_avail == cache->l2_pgs_resident_max);
571 0 : assert(0 == ctx.qd);
572 :
573 0 : memset(&cache->mctx, 0, sizeof(cache->mctx));
574 0 : ctx.cb(cache->dev, ctx.status, ctx.cb_ctx);
575 0 : }
576 :
577 : static void process_page_out_retry(void *_page);
578 : static void process_persist(struct ftl_l2p_cache *cache);
579 :
580 : static void
581 0 : process_page_in(struct ftl_l2p_page *page, spdk_bdev_io_completion_cb cb)
582 : {
583 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache;
584 0 : int rc;
585 :
586 0 : assert(page->page_buffer);
587 :
588 0 : rc = ftl_nv_cache_bdev_read_blocks_with_md(cache->dev, ftl_l2p_cache_get_bdev_desc(cache),
589 0 : ftl_l2p_cache_get_bdev_iochannel(cache),
590 0 : page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page),
591 0 : 1, cb, page);
592 :
593 0 : if (rc) {
594 0 : cb(NULL, false, page);
595 0 : }
596 0 : }
597 :
598 : static void
599 0 : process_persist_page_out_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
600 : {
601 0 : struct ftl_l2p_page *page = arg;
602 0 : struct ftl_l2p_cache *cache = page->ctx.cache;
603 0 : struct spdk_ftl_dev *dev = cache->dev;
604 0 : struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;
605 :
606 0 : assert(bdev_io);
607 0 : ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_L2P, bdev_io);
608 0 : spdk_bdev_free_io(bdev_io);
609 :
610 0 : if (!success) {
611 0 : ctx->status = -EIO;
612 0 : }
613 :
614 0 : if (ftl_bitmap_get(dev->trim_map, ctx->idx)) {
615 : /*
616 : * Page had been trimmed, in persist path before IO, it was invalidated entirely
617 : * now clear trim flag
618 : */
619 0 : ftl_bitmap_clear(dev->trim_map, page->page_no);
620 0 : }
621 0 : ftl_l2p_cache_page_remove(cache, page);
622 :
623 0 : ctx->qd--;
624 0 : process_persist(cache);
625 0 : }
626 :
627 : static void
628 0 : process_page_out(struct ftl_l2p_page *page, spdk_bdev_io_completion_cb cb)
629 : {
630 0 : struct spdk_bdev *bdev;
631 0 : struct spdk_bdev_io_wait_entry *bdev_io_wait;
632 0 : struct ftl_l2p_cache *cache = page->ctx.cache;
633 0 : struct spdk_ftl_dev *dev = cache->dev;
634 0 : int rc;
635 :
636 0 : assert(page->page_buffer);
637 :
638 0 : rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, ftl_l2p_cache_get_bdev_desc(cache),
639 0 : ftl_l2p_cache_get_bdev_iochannel(cache),
640 0 : page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page),
641 0 : 1, cb, page);
642 :
643 0 : if (spdk_likely(0 == rc)) {
644 0 : return;
645 : }
646 :
647 0 : if (rc == -ENOMEM) {
648 0 : bdev = spdk_bdev_desc_get_bdev(ftl_l2p_cache_get_bdev_desc(cache));
649 0 : bdev_io_wait = &page->ctx.bdev_io_wait;
650 0 : bdev_io_wait->bdev = bdev;
651 0 : bdev_io_wait->cb_fn = process_page_out_retry;
652 0 : bdev_io_wait->cb_arg = page;
653 0 : page->ctx.cb = cb;
654 :
655 0 : rc = spdk_bdev_queue_io_wait(bdev, ftl_l2p_cache_get_bdev_iochannel(cache), bdev_io_wait);
656 0 : ftl_bug(rc);
657 0 : } else {
658 0 : ftl_abort();
659 : }
660 0 : }
661 :
662 : static void
663 0 : process_page_out_retry(void *_page)
664 : {
665 0 : struct ftl_l2p_page *page = _page;
666 :
667 0 : process_page_out(page, page->ctx.cb);
668 0 : }
669 :
670 : static void process_trim(struct ftl_l2p_cache *cache);
671 :
672 : static void
673 0 : process_trim_page_out_cb(struct spdk_bdev_io *bdev_io, bool success, void *ctx_page)
674 : {
675 0 : struct ftl_l2p_page *page = (struct ftl_l2p_page *)ctx_page;
676 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache;
677 0 : struct spdk_ftl_dev *dev = cache->dev;
678 0 : struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;
679 :
680 0 : assert(bdev_io);
681 0 : ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_L2P, bdev_io);
682 0 : spdk_bdev_free_io(bdev_io);
683 :
684 0 : if (!success) {
685 0 : ctx->status = -EIO;
686 0 : }
687 :
688 0 : assert(!page->on_lru_list);
689 0 : assert(ftl_bitmap_get(dev->trim_map, page->page_no));
690 0 : ftl_bitmap_clear(dev->trim_map, page->page_no);
691 0 : ftl_l2p_cache_page_remove(cache, page);
692 :
693 0 : ctx->qd--;
694 0 : process_trim(cache);
695 0 : }
696 :
697 : static void
698 0 : process_trim_page_in_cb(struct spdk_bdev_io *bdev_io, bool success, void *ctx_page)
699 : {
700 0 : struct ftl_l2p_page *page = (struct ftl_l2p_page *)ctx_page;
701 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache;
702 0 : struct spdk_ftl_dev *dev = cache->dev;
703 0 : struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;
704 :
705 0 : if (bdev_io) {
706 0 : ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_L2P, bdev_io);
707 0 : spdk_bdev_free_io(bdev_io);
708 0 : }
709 0 : if (success) {
710 0 : assert(ftl_bitmap_get(dev->trim_map, page->page_no));
711 0 : ftl_l2p_page_set_invalid(dev, page);
712 0 : process_page_out(page, process_trim_page_out_cb);
713 0 : } else {
714 0 : ctx->status = -EIO;
715 0 : ctx->qd--;
716 0 : process_trim(cache);
717 : }
718 0 : }
719 :
720 : static void
721 0 : process_trim(struct ftl_l2p_cache *cache)
722 : {
723 0 : struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;
724 :
725 0 : while (ctx->idx < cache->num_pages && ctx->qd < 64) {
726 0 : struct ftl_l2p_page *page;
727 :
728 0 : if (!ftl_bitmap_get(cache->dev->trim_map, ctx->idx)) {
729 : /* Page had not been trimmed, continue */
730 0 : ctx->idx++;
731 0 : continue;
732 : }
733 :
734 : /* All pages were removed in persist phase */
735 0 : assert(get_l2p_page_by_df_id(cache, ctx->idx) == NULL);
736 :
737 : /* Allocate page to invalidate it */
738 0 : page = ftl_l2p_cache_page_alloc(cache, ctx->idx);
739 0 : if (!page) {
740 : /* All pages utilized so far, continue when they will be back available */
741 0 : assert(ctx->qd);
742 0 : break;
743 : }
744 :
745 0 : page->state = L2P_CACHE_PAGE_CLEARING;
746 0 : page->ctx.cache = cache;
747 :
748 0 : ftl_l2p_cache_page_insert(cache, page);
749 0 : process_page_in(page, process_trim_page_in_cb);
750 :
751 0 : ctx->qd++;
752 0 : ctx->idx++;
753 0 : }
754 :
755 0 : if (0 == ctx->qd) {
756 0 : process_finish(cache);
757 0 : }
758 0 : }
759 :
760 : void
761 0 : ftl_l2p_cache_trim(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
762 : {
763 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
764 :
765 0 : process_init_ctx(dev, cache, cb, cb_ctx);
766 0 : process_trim(cache);
767 0 : }
768 :
769 : static void
770 0 : clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
771 : {
772 0 : ftl_l2p_cb cb = md->owner.private;
773 0 : void *cb_cntx = md->owner.cb_ctx;
774 :
775 0 : cb(dev, status, cb_cntx);
776 0 : }
777 :
778 : void
779 0 : ftl_l2p_cache_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
780 : {
781 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_L2P];
782 0 : ftl_addr invalid_addr = FTL_ADDR_INVALID;
783 :
784 0 : md->cb = clear_cb;
785 0 : md->owner.cb_ctx = cb_ctx;
786 0 : md->owner.private = cb;
787 :
788 0 : ftl_md_clear(md, invalid_addr, NULL);
789 0 : }
790 :
791 : static void
792 0 : l2p_shm_restore_clean(struct spdk_ftl_dev *dev)
793 : {
794 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
795 0 : struct ftl_l2p_l1_map_entry *me = cache->l2_mapping;
796 0 : struct ftl_l2p_page *page;
797 0 : ftl_df_obj_id obj_id;
798 0 : uint64_t page_no;
799 :
800 0 : for (page_no = 0; page_no < cache->num_pages; ++page_no) {
801 0 : obj_id = me[page_no].page_obj_id;
802 0 : if (obj_id == FTL_DF_OBJ_ID_INVALID) {
803 0 : continue;
804 : }
805 :
806 0 : page = ftl_mempool_claim_df(cache->l2_ctx_pool, obj_id);
807 0 : assert(page);
808 0 : assert(page->obj_id == ftl_mempool_get_df_obj_id(cache->l2_ctx_pool, page));
809 0 : assert(page->page_no == page_no);
810 0 : assert(page->state != L2P_CACHE_PAGE_INIT);
811 0 : assert(page->state != L2P_CACHE_PAGE_CLEARING);
812 0 : assert(cache->l2_pgs_avail > 0);
813 0 : cache->l2_pgs_avail--;
814 :
815 0 : page->page_buffer = (char *)ftl_md_get_buffer(cache->l1_md) + ftl_mempool_get_df_obj_index(
816 0 : cache->l2_ctx_pool, page) * FTL_BLOCK_SIZE;
817 :
818 0 : TAILQ_INIT(&page->ppe_list);
819 :
820 0 : page->pin_ref_cnt = 0;
821 0 : page->on_lru_list = 0;
822 0 : memset(&page->ctx, 0, sizeof(page->ctx));
823 :
824 0 : ftl_l2p_cache_lru_add_page(cache, page);
825 0 : }
826 :
827 0 : ftl_mempool_initialize_ext(cache->l2_ctx_pool);
828 0 : }
829 :
830 : static void
831 0 : l2p_shm_restore_dirty(struct spdk_ftl_dev *dev)
832 : {
833 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
834 0 : struct ftl_l2p_l1_map_entry *me = cache->l2_mapping;
835 0 : struct ftl_l2p_page *page;
836 0 : ftl_df_obj_id obj_id;
837 0 : uint64_t page_no;
838 :
839 0 : for (page_no = 0; page_no < cache->num_pages; ++page_no) {
840 0 : obj_id = me[page_no].page_obj_id;
841 0 : if (obj_id == FTL_DF_OBJ_ID_INVALID) {
842 0 : continue;
843 : }
844 :
845 0 : page = ftl_mempool_claim_df(cache->l2_ctx_pool, obj_id);
846 0 : assert(page);
847 0 : assert(page->obj_id == ftl_mempool_get_df_obj_id(cache->l2_ctx_pool, page));
848 0 : assert(page->page_no == page_no);
849 0 : assert(page->state != L2P_CACHE_PAGE_CLEARING);
850 0 : assert(cache->l2_pgs_avail > 0);
851 0 : cache->l2_pgs_avail--;
852 :
853 0 : if (page->state == L2P_CACHE_PAGE_INIT) {
854 0 : me[page_no].page_obj_id = FTL_DF_OBJ_ID_INVALID;
855 0 : cache->l2_pgs_avail++;
856 0 : ftl_mempool_release_df(cache->l2_ctx_pool, obj_id);
857 0 : continue;
858 : }
859 :
860 0 : page->state = L2P_CACHE_PAGE_READY;
861 : /* Assume page is dirty after crash */
862 0 : page->updates = 1;
863 0 : page->page_buffer = (char *)ftl_md_get_buffer(cache->l1_md) + ftl_mempool_get_df_obj_index(
864 0 : cache->l2_ctx_pool, page) * FTL_BLOCK_SIZE;
865 :
866 0 : TAILQ_INIT(&page->ppe_list);
867 :
868 0 : page->pin_ref_cnt = 0;
869 0 : page->on_lru_list = 0;
870 0 : memset(&page->ctx, 0, sizeof(page->ctx));
871 :
872 0 : ftl_l2p_cache_lru_add_page(cache, page);
873 0 : }
874 :
875 0 : ftl_mempool_initialize_ext(cache->l2_ctx_pool);
876 0 : }
877 :
878 : void
879 0 : ftl_l2p_cache_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
880 : {
881 0 : if (ftl_fast_startup(dev)) {
882 0 : l2p_shm_restore_clean(dev);
883 0 : }
884 :
885 0 : if (ftl_fast_recovery(dev)) {
886 0 : l2p_shm_restore_dirty(dev);
887 0 : }
888 :
889 0 : cb(dev, 0, cb_ctx);
890 0 : }
891 :
892 : static void
893 0 : process_persist(struct ftl_l2p_cache *cache)
894 : {
895 0 : struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;
896 0 : struct spdk_ftl_dev *dev = cache->dev;
897 :
898 0 : while (ctx->idx < cache->num_pages && ctx->qd < 64) {
899 0 : struct ftl_l2p_page *page = get_l2p_page_by_df_id(cache, ctx->idx);
900 0 : ctx->idx++;
901 :
902 0 : if (!page) {
903 0 : continue;
904 : }
905 :
906 : /* Finish trim if the page was marked */
907 0 : if (ftl_bitmap_get(dev->trim_map, ctx->idx)) {
908 0 : ftl_l2p_page_set_invalid(dev, page);
909 0 : }
910 :
911 0 : if (page->on_lru_list) {
912 0 : ftl_l2p_cache_lru_remove_page(cache, page);
913 0 : }
914 :
915 0 : if (page->updates) {
916 : /* Need to persist the page */
917 0 : page->state = L2P_CACHE_PAGE_PERSISTING;
918 0 : page->ctx.cache = cache;
919 0 : ctx->qd++;
920 0 : process_page_out(page, process_persist_page_out_cb);
921 0 : } else {
922 0 : ftl_l2p_cache_page_remove(cache, page);
923 : }
924 0 : }
925 :
926 0 : if (0 == ctx->qd) {
927 0 : process_finish(cache);
928 0 : }
929 0 : }
930 :
931 : void
932 0 : ftl_l2p_cache_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
933 : {
934 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
935 :
936 0 : process_init_ctx(dev, cache, cb, cb_ctx);
937 0 : process_persist(cache);
938 0 : }
939 :
940 : bool
941 0 : ftl_l2p_cache_is_halted(struct spdk_ftl_dev *dev)
942 : {
943 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
944 :
945 0 : return cache->state == L2P_CACHE_SHUTDOWN_DONE;
946 0 : }
947 :
948 : void
949 0 : ftl_l2p_cache_halt(struct spdk_ftl_dev *dev)
950 : {
951 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
952 :
953 0 : if (cache->state != L2P_CACHE_SHUTDOWN_DONE) {
954 0 : cache->state = L2P_CACHE_IN_SHUTDOWN;
955 0 : if (!cache->ios_in_flight && !cache->l2_pgs_evicting) {
956 0 : cache->state = L2P_CACHE_SHUTDOWN_DONE;
957 0 : }
958 0 : }
959 0 : }
960 :
961 : void
962 0 : ftl_l2p_cache_resume(struct spdk_ftl_dev *dev)
963 : {
964 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
965 :
966 0 : assert(cache->state == L2P_CACHE_INIT);
967 0 : cache->state = L2P_CACHE_RUNNING;
968 0 : }
969 :
970 : static inline struct ftl_l2p_page *
971 0 : get_page(struct ftl_l2p_cache *cache, uint64_t lba)
972 : {
973 0 : return get_l2p_page_by_df_id(cache, lba / cache->lbas_in_page);
974 : }
975 :
976 : static inline void
977 0 : ftl_l2p_cache_init_page_set(struct ftl_l2p_page_set *page_set, struct ftl_l2p_pin_ctx *pin_ctx)
978 : {
979 0 : page_set->to_pin_cnt = 0;
980 0 : page_set->pinned_cnt = 0;
981 0 : page_set->pin_fault_cnt = 0;
982 0 : page_set->locked = 0;
983 0 : page_set->deferred = 0;
984 0 : page_set->pin_ctx = pin_ctx;
985 0 : }
986 :
987 : static inline bool
988 0 : ftl_l2p_cache_running(struct ftl_l2p_cache *cache)
989 : {
990 0 : return cache->state == L2P_CACHE_RUNNING;
991 : }
992 :
993 : static inline bool
994 0 : ftl_l2p_cache_page_is_pinnable(struct ftl_l2p_page *page)
995 : {
996 0 : return page->state != L2P_CACHE_PAGE_INIT;
997 : }
998 :
999 : void
1000 0 : ftl_l2p_cache_pin(struct spdk_ftl_dev *dev, struct ftl_l2p_pin_ctx *pin_ctx)
1001 : {
1002 0 : assert(dev->num_lbas >= pin_ctx->lba + pin_ctx->count);
1003 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
1004 0 : struct ftl_l2p_page_set *page_set;
1005 0 : bool defer_pin = false;
1006 :
1007 : /* Calculate first and last page to pin, count of them */
1008 0 : uint64_t start = pin_ctx->lba / cache->lbas_in_page;
1009 0 : uint64_t end = (pin_ctx->lba + pin_ctx->count - 1) / cache->lbas_in_page;
1010 0 : uint64_t count = end - start + 1;
1011 0 : uint64_t i;
1012 :
1013 0 : if (spdk_unlikely(count > L2P_MAX_PAGES_TO_PIN)) {
1014 0 : ftl_l2p_pin_complete(dev, -E2BIG, pin_ctx);
1015 0 : return;
1016 : }
1017 :
1018 : /* Get and initialize page sets */
1019 0 : assert(ftl_l2p_cache_running(cache));
1020 0 : page_set = ftl_mempool_get(cache->page_sets_pool);
1021 0 : if (!page_set) {
1022 0 : ftl_l2p_pin_complete(dev, -EAGAIN, pin_ctx);
1023 0 : return;
1024 : }
1025 0 : ftl_l2p_cache_init_page_set(page_set, pin_ctx);
1026 :
1027 0 : struct ftl_l2p_page_wait_ctx *entry = page_set->entry;
1028 0 : for (i = start; i <= end; i++, entry++) {
1029 0 : struct ftl_l2p_page *page;
1030 0 : entry->parent = page_set;
1031 0 : entry->pg_no = i;
1032 0 : entry->pg_pin_completed = false;
1033 0 : entry->pg_pin_issued = false;
1034 :
1035 0 : page_set->to_pin_cnt++;
1036 :
1037 : /* Try get page and pin */
1038 0 : page = get_l2p_page_by_df_id(cache, i);
1039 0 : if (page) {
1040 0 : if (ftl_l2p_cache_page_is_pinnable(page)) {
1041 : /* Page available and we can pin it */
1042 0 : page_set->pinned_cnt++;
1043 0 : entry->pg_pin_issued = true;
1044 0 : entry->pg_pin_completed = true;
1045 0 : ftl_l2p_cache_page_pin(cache, page);
1046 0 : } else {
1047 : /* The page is being loaded */
1048 : /* Queue the page pin entry to be executed on page in */
1049 0 : ftl_l2p_page_queue_wait_ctx(page, entry);
1050 0 : entry->pg_pin_issued = true;
1051 : }
1052 0 : } else {
1053 : /* The page is not in the cache, queue the page_set to page in */
1054 0 : defer_pin = true;
1055 : }
1056 0 : }
1057 :
1058 : /* Check if page set is done */
1059 0 : if (page_set_is_done(page_set)) {
1060 0 : page_set_end(dev, cache, page_set);
1061 0 : } else if (defer_pin) {
1062 0 : TAILQ_INSERT_TAIL(&cache->deferred_page_set_list, page_set, list_entry);
1063 0 : page_set->deferred = 1;
1064 0 : }
1065 0 : }
1066 :
1067 : void
1068 0 : ftl_l2p_cache_unpin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count)
1069 : {
1070 0 : assert(dev->num_lbas >= lba + count);
1071 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
1072 0 : struct ftl_l2p_page *page;
1073 0 : uint64_t start = lba / cache->lbas_in_page;
1074 0 : uint64_t end = (lba + count - 1) / cache->lbas_in_page;
1075 0 : uint64_t i;
1076 :
1077 0 : assert(count);
1078 0 : assert(start < cache->num_pages);
1079 0 : assert(end < cache->num_pages);
1080 :
1081 0 : for (i = start; i <= end; i++) {
1082 0 : page = get_l2p_page_by_df_id(cache, i);
1083 0 : ftl_bug(!page);
1084 0 : ftl_l2p_cache_page_unpin(cache, page);
1085 0 : }
1086 0 : }
1087 :
1088 : ftl_addr
1089 0 : ftl_l2p_cache_get(struct spdk_ftl_dev *dev, uint64_t lba)
1090 : {
1091 0 : assert(dev->num_lbas > lba);
1092 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
1093 0 : struct ftl_l2p_page *page = get_page(cache, lba);
1094 0 : ftl_addr addr;
1095 :
1096 0 : ftl_bug(!page);
1097 0 : assert(ftl_l2p_cache_running(cache));
1098 0 : assert(page->pin_ref_cnt);
1099 :
1100 0 : if (ftl_bitmap_get(dev->trim_map, page->page_no)) {
1101 0 : ftl_l2p_page_set_invalid(dev, page);
1102 0 : ftl_bitmap_clear(dev->trim_map, page->page_no);
1103 0 : }
1104 :
1105 0 : ftl_l2p_cache_lru_promote_page(cache, page);
1106 0 : addr = ftl_l2p_cache_get_addr(dev, cache, page, lba);
1107 :
1108 0 : return addr;
1109 0 : }
1110 :
1111 : void
1112 0 : ftl_l2p_cache_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
1113 : {
1114 0 : assert(dev->num_lbas > lba);
1115 0 : struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
1116 0 : struct ftl_l2p_page *page = get_page(cache, lba);
1117 :
1118 0 : ftl_bug(!page);
1119 0 : assert(ftl_l2p_cache_running(cache));
1120 0 : assert(page->pin_ref_cnt);
1121 :
1122 0 : if (ftl_bitmap_get(dev->trim_map, page->page_no)) {
1123 0 : ftl_l2p_page_set_invalid(dev, page);
1124 0 : ftl_bitmap_clear(dev->trim_map, page->page_no);
1125 0 : }
1126 :
1127 0 : page->updates++;
1128 0 : ftl_l2p_cache_lru_promote_page(cache, page);
1129 0 : ftl_l2p_cache_set_addr(dev, cache, page, lba, addr);
1130 0 : }
1131 :
1132 : static struct ftl_l2p_page *
1133 0 : page_allocate(struct ftl_l2p_cache *cache, uint64_t page_no)
1134 : {
1135 0 : struct ftl_l2p_page *page = ftl_l2p_cache_page_alloc(cache, page_no);
1136 0 : ftl_l2p_cache_page_insert(cache, page);
1137 :
1138 0 : return page;
1139 0 : }
1140 :
1141 : static bool
1142 0 : page_set_is_done(struct ftl_l2p_page_set *page_set)
1143 : {
1144 0 : if (page_set->locked) {
1145 0 : return false;
1146 : }
1147 :
1148 0 : assert(page_set->pinned_cnt + page_set->pin_fault_cnt <= page_set->to_pin_cnt);
1149 0 : return (page_set->pinned_cnt + page_set->pin_fault_cnt == page_set->to_pin_cnt);
1150 0 : }
1151 :
1152 : static void
1153 0 : page_set_unpin(struct ftl_l2p_cache *cache, struct ftl_l2p_page_set *page_set)
1154 : {
1155 0 : uint64_t i;
1156 0 : struct ftl_l2p_page_wait_ctx *pentry = page_set->entry;
1157 :
1158 0 : for (i = 0; i < page_set->to_pin_cnt; i++, pentry++) {
1159 0 : struct ftl_l2p_page *pinned_page;
1160 :
1161 0 : if (false == pentry->pg_pin_completed) {
1162 0 : continue;
1163 : }
1164 :
1165 0 : pinned_page = get_l2p_page_by_df_id(cache, pentry->pg_no);
1166 0 : ftl_bug(!pinned_page);
1167 :
1168 0 : ftl_l2p_cache_page_unpin(cache, pinned_page);
1169 0 : }
1170 0 : }
1171 :
1172 : static void
1173 0 : page_set_end(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
1174 : struct ftl_l2p_page_set *page_set)
1175 : {
1176 0 : if (spdk_likely(0 == page_set->pin_fault_cnt)) {
1177 0 : ftl_l2p_pin_complete(dev, 0, page_set->pin_ctx);
1178 0 : } else {
1179 0 : page_set_unpin(cache, page_set);
1180 0 : ftl_l2p_pin_complete(dev, -EIO, page_set->pin_ctx);
1181 : }
1182 :
1183 0 : if (page_set->deferred) {
1184 0 : TAILQ_REMOVE(&cache->deferred_page_set_list, page_set, list_entry);
1185 0 : }
1186 :
1187 0 : assert(0 == page_set->locked);
1188 0 : ftl_mempool_put(cache->page_sets_pool, page_set);
1189 0 : }
1190 :
1191 : static void
1192 0 : page_in_io_complete(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
1193 : struct ftl_l2p_page *page, bool success)
1194 : {
1195 0 : struct ftl_l2p_page_set *page_set;
1196 0 : struct ftl_l2p_page_wait_ctx *pentry;
1197 :
1198 0 : cache->ios_in_flight--;
1199 :
1200 0 : assert(0 == page->pin_ref_cnt);
1201 0 : assert(L2P_CACHE_PAGE_INIT == page->state);
1202 0 : assert(false == page->on_lru_list);
1203 :
1204 0 : if (spdk_likely(success)) {
1205 0 : page->state = L2P_CACHE_PAGE_READY;
1206 0 : }
1207 :
1208 0 : while ((pentry = TAILQ_FIRST(&page->ppe_list))) {
1209 0 : TAILQ_REMOVE(&page->ppe_list, pentry, list_entry);
1210 :
1211 0 : page_set = pentry->parent;
1212 :
1213 0 : assert(false == pentry->pg_pin_completed);
1214 :
1215 0 : if (success) {
1216 0 : ftl_l2p_cache_page_pin(cache, page);
1217 0 : page_set->pinned_cnt++;
1218 0 : pentry->pg_pin_completed = true;
1219 0 : } else {
1220 0 : page_set->pin_fault_cnt++;
1221 : }
1222 :
1223 : /* Check if page_set is done */
1224 0 : if (page_set_is_done(page_set)) {
1225 0 : page_set_end(dev, cache, page_set);
1226 0 : }
1227 : }
1228 :
1229 0 : if (spdk_unlikely(!success)) {
1230 0 : ftl_bug(page->on_lru_list);
1231 0 : ftl_l2p_cache_page_remove(cache, page);
1232 0 : }
1233 0 : }
1234 :
1235 : static void
1236 0 : page_in_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1237 : {
1238 0 : struct ftl_l2p_page *page = cb_arg;
1239 0 : struct ftl_l2p_cache *cache = page->ctx.cache;
1240 0 : struct spdk_ftl_dev *dev = cache->dev;
1241 :
1242 0 : ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_L2P, bdev_io);
1243 0 : spdk_bdev_free_io(bdev_io);
1244 0 : page_in_io_complete(dev, cache, page, success);
1245 0 : }
1246 :
1247 : static void
1248 0 : page_in_io(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
1249 : {
1250 0 : struct spdk_io_channel *ioch;
1251 0 : struct spdk_bdev *bdev;
1252 0 : struct spdk_bdev_io_wait_entry *bdev_io_wait;
1253 0 : int rc;
1254 0 : page->ctx.cache = cache;
1255 :
1256 0 : rc = ftl_nv_cache_bdev_read_blocks_with_md(cache->dev, ftl_l2p_cache_get_bdev_desc(cache),
1257 0 : ftl_l2p_cache_get_bdev_iochannel(cache),
1258 0 : page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page),
1259 0 : 1, page_in_io_cb, page);
1260 0 : cache->ios_in_flight++;
1261 0 : if (spdk_likely(0 == rc)) {
1262 0 : return;
1263 : }
1264 :
1265 0 : if (rc == -ENOMEM) {
1266 0 : ioch = ftl_l2p_cache_get_bdev_iochannel(cache);
1267 0 : bdev = spdk_bdev_desc_get_bdev(ftl_l2p_cache_get_bdev_desc(cache));
1268 0 : bdev_io_wait = &page->ctx.bdev_io_wait;
1269 0 : bdev_io_wait->bdev = bdev;
1270 0 : bdev_io_wait->cb_fn = page_in_io_retry;
1271 0 : bdev_io_wait->cb_arg = page;
1272 :
1273 0 : rc = spdk_bdev_queue_io_wait(bdev, ioch, bdev_io_wait);
1274 0 : ftl_bug(rc);
1275 0 : } else {
1276 0 : ftl_abort();
1277 : }
1278 0 : }
1279 :
1280 : static void
1281 0 : page_in_io_retry(void *arg)
1282 : {
1283 0 : struct ftl_l2p_page *page = arg;
1284 0 : struct ftl_l2p_cache *cache = page->ctx.cache;
1285 0 : struct spdk_ftl_dev *dev = cache->dev;
1286 :
1287 0 : cache->ios_in_flight--;
1288 0 : page_in_io(dev, cache, page);
1289 0 : }
1290 :
1291 : static void
1292 0 : page_in(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
1293 : struct ftl_l2p_page_set *page_set, struct ftl_l2p_page_wait_ctx *pentry)
1294 : {
1295 0 : struct ftl_l2p_page *page;
1296 0 : bool page_in = false;
1297 :
1298 : /* Get page */
1299 0 : page = get_l2p_page_by_df_id(cache, pentry->pg_no);
1300 0 : if (!page) {
1301 : /* Page not allocated yet, do it */
1302 0 : page = page_allocate(cache, pentry->pg_no);
1303 0 : page_in = true;
1304 0 : }
1305 :
1306 0 : if (ftl_l2p_cache_page_is_pinnable(page)) {
1307 0 : ftl_l2p_cache_page_pin(cache, page);
1308 0 : page_set->pinned_cnt++;
1309 0 : pentry->pg_pin_issued = true;
1310 0 : pentry->pg_pin_completed = true;
1311 0 : } else {
1312 0 : pentry->pg_pin_issued = true;
1313 0 : ftl_l2p_page_queue_wait_ctx(page, pentry);
1314 : }
1315 :
1316 0 : if (page_in) {
1317 0 : page_in_io(dev, cache, page);
1318 0 : }
1319 0 : }
1320 :
1321 : static int
1322 0 : ftl_l2p_cache_process_page_sets(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache)
1323 : {
1324 0 : struct ftl_l2p_page_set *page_set;
1325 0 : struct ftl_l2p_page_wait_ctx *pentry;
1326 0 : uint64_t i;
1327 :
1328 0 : page_set = TAILQ_FIRST(&cache->deferred_page_set_list);
1329 0 : if (!page_set) {
1330 : /* No page_set */
1331 0 : return -ECHILD;
1332 : }
1333 :
1334 0 : if (page_set->to_pin_cnt > cache->l2_pgs_avail) {
1335 : /* No enough page to pin, wait */
1336 0 : return -EBUSY;
1337 : }
1338 0 : if (cache->ios_in_flight > 512) {
1339 : /* Too big QD */
1340 0 : return -EBUSY;
1341 : }
1342 :
1343 0 : ftl_add_io_activity(dev);
1344 :
1345 0 : TAILQ_REMOVE(&cache->deferred_page_set_list, page_set, list_entry);
1346 0 : page_set->deferred = 0;
1347 0 : page_set->locked = 1;
1348 :
1349 : /* Now we can start pinning */
1350 0 : pentry = page_set->entry;
1351 0 : for (i = 0; i < page_set->to_pin_cnt; i++, pentry++) {
1352 0 : if (!pentry->pg_pin_issued) {
1353 0 : page_in(dev, cache, page_set, pentry);
1354 0 : }
1355 0 : }
1356 :
1357 0 : page_set->locked = 0;
1358 :
1359 : /* Check if page_set is done */
1360 0 : if (page_set_is_done(page_set)) {
1361 0 : page_set_end(dev, cache, page_set);
1362 0 : }
1363 :
1364 0 : return 0;
1365 0 : }
1366 :
1367 : static struct ftl_l2p_page *
1368 0 : eviction_get_page(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache)
1369 : {
1370 0 : uint64_t i = 0;
1371 0 : struct ftl_l2p_page *page = ftl_l2p_cache_get_coldest_page(cache);
1372 :
1373 0 : while (page) {
1374 0 : ftl_bug(L2P_CACHE_PAGE_READY != page->state);
1375 0 : ftl_bug(page->pin_ref_cnt);
1376 :
1377 0 : if (ftl_l2p_cache_page_can_evict(page)) {
1378 0 : ftl_l2p_cache_lru_remove_page(cache, page);
1379 0 : return page;
1380 : }
1381 :
1382 : /*
1383 : * Practically only one iteration is needed to find a page. It is because
1384 : * the rank of pages contains only ready and unpinned pages
1385 : */
1386 0 : ftl_bug(++i > 1024);
1387 :
1388 0 : page = ftl_l2p_cache_get_hotter_page(page);
1389 : }
1390 :
1391 0 : return NULL;
1392 0 : }
1393 :
1394 : static void
1395 0 : page_out_io_complete(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
1396 : struct ftl_l2p_page *page, bool success)
1397 : {
1398 0 : cache->l2_pgs_evicting--;
1399 :
1400 0 : ftl_bug(page->ctx.updates > page->updates);
1401 0 : ftl_bug(!TAILQ_EMPTY(&page->ppe_list));
1402 0 : ftl_bug(page->on_lru_list);
1403 :
1404 0 : if (spdk_likely(success)) {
1405 0 : page->updates -= page->ctx.updates;
1406 0 : }
1407 :
1408 0 : if (success && ftl_l2p_cache_page_can_remove(page)) {
1409 0 : ftl_l2p_cache_page_remove(cache, page);
1410 0 : } else {
1411 0 : if (!page->pin_ref_cnt) {
1412 0 : ftl_l2p_cache_lru_add_page(cache, page);
1413 0 : }
1414 0 : page->state = L2P_CACHE_PAGE_READY;
1415 : }
1416 0 : }
1417 :
1418 : static void
1419 0 : page_out_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1420 : {
1421 0 : struct ftl_l2p_page *page = cb_arg;
1422 0 : struct ftl_l2p_cache *cache = page->ctx.cache;
1423 0 : struct spdk_ftl_dev *dev = cache->dev;
1424 :
1425 0 : ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_L2P, bdev_io);
1426 0 : spdk_bdev_free_io(bdev_io);
1427 0 : page_out_io_complete(dev, cache, page, success);
1428 0 : }
1429 :
1430 : static void
1431 0 : page_out_io(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
1432 : struct ftl_l2p_page *page)
1433 : {
1434 0 : struct spdk_io_channel *ioch;
1435 0 : struct spdk_bdev *bdev;
1436 0 : struct spdk_bdev_io_wait_entry *bdev_io_wait;
1437 0 : int rc;
1438 :
1439 0 : page->ctx.cache = cache;
1440 :
1441 0 : rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, ftl_l2p_cache_get_bdev_desc(cache),
1442 0 : ftl_l2p_cache_get_bdev_iochannel(cache),
1443 0 : page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page),
1444 0 : 1, page_out_io_cb, page);
1445 :
1446 0 : cache->l2_pgs_evicting++;
1447 0 : if (spdk_likely(0 == rc)) {
1448 0 : return;
1449 : }
1450 :
1451 0 : if (rc == -ENOMEM) {
1452 0 : ioch = ftl_l2p_cache_get_bdev_iochannel(cache);
1453 0 : bdev = spdk_bdev_desc_get_bdev(ftl_l2p_cache_get_bdev_desc(cache));
1454 0 : bdev_io_wait = &page->ctx.bdev_io_wait;
1455 0 : bdev_io_wait->bdev = bdev;
1456 0 : bdev_io_wait->cb_fn = page_out_io_retry;
1457 0 : bdev_io_wait->cb_arg = page;
1458 :
1459 0 : rc = spdk_bdev_queue_io_wait(bdev, ioch, bdev_io_wait);
1460 0 : ftl_bug(rc);
1461 0 : } else {
1462 0 : ftl_abort();
1463 : }
1464 0 : }
1465 :
1466 : static void
1467 0 : page_out_io_retry(void *arg)
1468 : {
1469 0 : struct ftl_l2p_page *page = arg;
1470 0 : struct ftl_l2p_cache *cache = page->ctx.cache;
1471 0 : struct spdk_ftl_dev *dev = cache->dev;
1472 :
1473 0 : cache->l2_pgs_evicting--;
1474 0 : page_out_io(dev, cache, page);
1475 0 : }
1476 :
1477 : static void
1478 0 : ftl_l2p_cache_process_eviction(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache)
1479 : {
1480 0 : struct ftl_l2p_page *page;
1481 :
1482 0 : if (!ftl_l2p_cache_evict_continue(cache)) {
1483 0 : return;
1484 : }
1485 :
1486 0 : if (cache->l2_pgs_evicting > 512) {
1487 0 : return;
1488 : }
1489 :
1490 0 : ftl_add_io_activity(dev);
1491 :
1492 0 : page = eviction_get_page(dev, cache);
1493 0 : if (spdk_unlikely(!page)) {
1494 0 : return;
1495 : }
1496 :
1497 0 : if (page->updates) {
1498 0 : page->state = L2P_CACHE_PAGE_FLUSHING;
1499 0 : page->ctx.updates = page->updates;
1500 0 : page_out_io(dev, cache, page);
1501 0 : } else {
1502 : /* Page clean and we can remove it */
1503 0 : ftl_l2p_cache_page_remove(cache, page);
1504 : }
1505 0 : }
1506 :
1507 : static void
1508 0 : ftl_l2p_lazy_trim_process_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
1509 : {
1510 0 : struct ftl_l2p_cache *cache = dev->l2p;
1511 :
1512 0 : cache->lazy_trim.qd--;
1513 :
1514 : /* We will retry on next ftl_l2p_lazy_trim_process */
1515 0 : if (spdk_unlikely(status != 0)) {
1516 0 : return;
1517 : }
1518 :
1519 0 : if (ftl_l2p_cache_running(cache)) {
1520 0 : ftl_l2p_cache_get(dev, pin_ctx->lba);
1521 0 : }
1522 :
1523 0 : ftl_l2p_cache_unpin(dev, pin_ctx->lba, pin_ctx->count);
1524 0 : }
1525 :
1526 : static void
1527 0 : ftl_l2p_lazy_trim_process(struct spdk_ftl_dev *dev)
1528 : {
1529 0 : struct ftl_l2p_cache *cache = dev->l2p;
1530 0 : struct ftl_l2p_pin_ctx *pin_ctx;
1531 0 : uint64_t page_no;
1532 :
1533 0 : if (spdk_likely(!dev->trim_in_progress)) {
1534 0 : return;
1535 : }
1536 :
1537 0 : if (cache->lazy_trim.qd == FTL_L2P_MAX_LAZY_TRIM_QD) {
1538 0 : return;
1539 : }
1540 :
1541 0 : page_no = ftl_bitmap_find_first_set(dev->trim_map, cache->lazy_trim.page_no, UINT64_MAX);
1542 0 : if (page_no == UINT64_MAX) {
1543 0 : cache->lazy_trim.page_no = 0;
1544 :
1545 : /* Check trim map from beginning to detect unprocessed trims */
1546 0 : page_no = ftl_bitmap_find_first_set(dev->trim_map, cache->lazy_trim.page_no, UINT64_MAX);
1547 0 : if (page_no == UINT64_MAX) {
1548 0 : dev->trim_in_progress = false;
1549 0 : return;
1550 : }
1551 0 : }
1552 :
1553 0 : cache->lazy_trim.page_no = page_no;
1554 :
1555 0 : pin_ctx = &cache->lazy_trim.pin_ctx;
1556 :
1557 0 : cache->lazy_trim.qd++;
1558 0 : assert(cache->lazy_trim.qd <= FTL_L2P_MAX_LAZY_TRIM_QD);
1559 0 : assert(page_no < cache->num_pages);
1560 :
1561 0 : pin_ctx->lba = page_no * cache->lbas_in_page;
1562 0 : pin_ctx->count = 1;
1563 0 : pin_ctx->cb = ftl_l2p_lazy_trim_process_cb;
1564 0 : pin_ctx->cb_ctx = pin_ctx;
1565 :
1566 0 : ftl_l2p_cache_pin(dev, pin_ctx);
1567 0 : }
1568 :
1569 : void
1570 0 : ftl_l2p_cache_process(struct spdk_ftl_dev *dev)
1571 : {
1572 0 : struct ftl_l2p_cache *cache = dev->l2p;
1573 0 : int i;
1574 :
1575 0 : if (spdk_unlikely(cache->state != L2P_CACHE_RUNNING)) {
1576 0 : return;
1577 : }
1578 :
1579 0 : for (i = 0; i < 256; i++) {
1580 0 : if (ftl_l2p_cache_process_page_sets(dev, cache)) {
1581 0 : break;
1582 : }
1583 0 : }
1584 :
1585 0 : ftl_l2p_cache_process_eviction(dev, cache);
1586 0 : ftl_l2p_lazy_trim_process(dev);
1587 0 : }
|