Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2022 Intel Corporation.
3 : * Copyright 2023 Solidigm All Rights Reserved
4 : * All rights reserved.
5 : */
6 :
7 : #include "spdk/bdev_module.h"
8 : #include "spdk/crc32.h"
9 :
10 : #include "ftl_internal.h"
11 : #include "ftl_band.h"
12 : #include "ftl_core.h"
13 : #include "ftl_layout.h"
14 : #include "ftl_nv_cache_io.h"
15 : #include "ftl_writer.h"
16 : #include "mngt/ftl_mngt.h"
17 :
18 : struct ftl_p2l_ckpt {
19 : TAILQ_ENTRY(ftl_p2l_ckpt) link;
20 : union ftl_md_vss *vss_md_page;
21 : struct ftl_md *md;
22 : struct ftl_layout_region *layout_region;
23 : uint64_t num_pages;
24 : uint64_t pages_per_xfer;
25 :
26 : #if defined(DEBUG)
27 : uint64_t dbg_bmp_sz;
28 : void *dbg_bmp;
29 : struct ftl_bitmap *bmp;
30 : #endif
31 : };
32 :
33 : static struct ftl_p2l_ckpt *
34 19 : ftl_p2l_ckpt_new(struct spdk_ftl_dev *dev, int region_type)
35 : {
36 19 : struct ftl_p2l_ckpt *ckpt;
37 19 : struct ftl_layout_region *region = ftl_layout_region_get(dev, region_type);
38 :
39 19 : ckpt = calloc(1, sizeof(struct ftl_p2l_ckpt));
40 19 : if (!ckpt) {
41 0 : return NULL;
42 : }
43 :
44 19 : ckpt->layout_region = region;
45 19 : ckpt->md = dev->layout.md[region_type];
46 19 : ckpt->pages_per_xfer = dev->layout.p2l.pages_per_xfer;
47 19 : ckpt->num_pages = dev->layout.p2l.ckpt_pages;
48 19 : if (dev->nv_cache.md_size) {
49 0 : ckpt->vss_md_page = ftl_md_vss_buf_alloc(region, region->num_entries);
50 0 : if (!ckpt->vss_md_page) {
51 0 : free(ckpt);
52 0 : return NULL;
53 : }
54 0 : }
55 :
56 : #if defined(DEBUG)
57 : /* The bitmap size must be a multiple of word size (8b) - round up */
58 19 : ckpt->dbg_bmp_sz = spdk_divide_round_up(ckpt->num_pages, 8);
59 :
60 19 : ckpt->dbg_bmp = calloc(1, ckpt->dbg_bmp_sz);
61 19 : assert(ckpt->dbg_bmp);
62 19 : ckpt->bmp = ftl_bitmap_create(ckpt->dbg_bmp, ckpt->dbg_bmp_sz);
63 19 : assert(ckpt->bmp);
64 : #endif
65 :
66 19 : return ckpt;
67 19 : }
68 :
69 : static void
70 19 : ftl_p2l_ckpt_destroy(struct ftl_p2l_ckpt *ckpt)
71 : {
72 : #if defined(DEBUG)
73 19 : ftl_bitmap_destroy(ckpt->bmp);
74 19 : free(ckpt->dbg_bmp);
75 : #endif
76 19 : spdk_dma_free(ckpt->vss_md_page);
77 19 : free(ckpt);
78 19 : }
79 :
80 : int
81 0 : ftl_p2l_ckpt_init(struct spdk_ftl_dev *dev)
82 : {
83 0 : int region_type;
84 0 : struct ftl_p2l_ckpt *ckpt;
85 :
86 0 : TAILQ_INIT(&dev->p2l_ckpt.free);
87 0 : TAILQ_INIT(&dev->p2l_ckpt.inuse);
88 0 : for (region_type = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
89 0 : region_type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX;
90 0 : region_type++) {
91 0 : ckpt = ftl_p2l_ckpt_new(dev, region_type);
92 0 : if (!ckpt) {
93 0 : return -1;
94 : }
95 0 : TAILQ_INSERT_TAIL(&dev->p2l_ckpt.free, ckpt, link);
96 0 : }
97 0 : return 0;
98 0 : }
99 :
100 : void
101 0 : ftl_p2l_ckpt_deinit(struct spdk_ftl_dev *dev)
102 : {
103 0 : struct ftl_p2l_ckpt *ckpt, *ckpt_next;
104 :
105 0 : TAILQ_FOREACH_SAFE(ckpt, &dev->p2l_ckpt.free, link, ckpt_next) {
106 0 : TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
107 0 : ftl_p2l_ckpt_destroy(ckpt);
108 0 : }
109 :
110 0 : TAILQ_FOREACH_SAFE(ckpt, &dev->p2l_ckpt.inuse, link, ckpt_next) {
111 0 : TAILQ_REMOVE(&dev->p2l_ckpt.inuse, ckpt, link);
112 0 : ftl_p2l_ckpt_destroy(ckpt);
113 0 : }
114 0 : }
115 :
116 : struct ftl_p2l_ckpt *
117 0 : ftl_p2l_ckpt_acquire(struct spdk_ftl_dev *dev)
118 : {
119 0 : struct ftl_p2l_ckpt *ckpt;
120 :
121 0 : ckpt = TAILQ_FIRST(&dev->p2l_ckpt.free);
122 0 : assert(ckpt);
123 0 : TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
124 0 : TAILQ_INSERT_TAIL(&dev->p2l_ckpt.inuse, ckpt, link);
125 0 : return ckpt;
126 0 : }
127 :
128 : void
129 0 : ftl_p2l_ckpt_release(struct spdk_ftl_dev *dev, struct ftl_p2l_ckpt *ckpt)
130 : {
131 0 : assert(ckpt);
132 : #if defined(DEBUG)
133 0 : memset(ckpt->dbg_bmp, 0, ckpt->dbg_bmp_sz);
134 : #endif
135 0 : TAILQ_REMOVE(&dev->p2l_ckpt.inuse, ckpt, link);
136 0 : TAILQ_INSERT_TAIL(&dev->p2l_ckpt.free, ckpt, link);
137 0 : }
138 :
139 : static void
140 0 : ftl_p2l_ckpt_issue_end(int status, void *arg)
141 : {
142 0 : struct ftl_rq *rq = arg;
143 0 : assert(rq);
144 :
145 0 : if (status) {
146 : #ifdef SPDK_FTL_RETRY_ON_ERROR
147 : /* retry */
148 : ftl_md_persist_entry_retry(&rq->md_persist_entry_ctx);
149 : return;
150 : #else
151 0 : ftl_abort();
152 : #endif
153 0 : }
154 :
155 0 : assert(rq->io.band->queue_depth > 0);
156 0 : rq->io.band->queue_depth--;
157 :
158 0 : rq->owner.cb(rq);
159 0 : }
160 :
161 : void
162 4 : ftl_p2l_ckpt_issue(struct ftl_rq *rq)
163 : {
164 4 : struct ftl_rq_entry *iter = rq->entries;
165 4 : struct spdk_ftl_dev *dev = rq->dev;
166 4 : ftl_addr addr = rq->io.addr;
167 4 : struct ftl_p2l_ckpt *ckpt = NULL;
168 4 : struct ftl_p2l_ckpt_page_no_vss *map_page;
169 4 : struct ftl_band *band;
170 4 : uint64_t band_offs, p2l_map_page_no, cur_page, i, j;
171 :
172 4 : assert(rq);
173 4 : band = rq->io.band;
174 4 : ckpt = band->p2l_map.p2l_ckpt;
175 4 : assert(ckpt);
176 4 : assert(rq->num_blocks == dev->xfer_size);
177 :
178 : /* Derive the P2L map page no */
179 4 : band_offs = ftl_band_block_offset_from_addr(band, rq->io.addr);
180 4 : p2l_map_page_no = band_offs / dev->xfer_size * ckpt->pages_per_xfer;
181 4 : assert(p2l_map_page_no < ckpt->num_pages);
182 :
183 : /* Get the corresponding P2L map page - the underlying stored data has the same entries as in the end metadata of band P2L (ftl_p2l_map_entry),
184 : * however we're interested in a whole page (4KiB) worth of content and submit it in two requests with additional metadata
185 : */
186 4 : map_page = ftl_md_get_buffer(ckpt->md);
187 4 : assert(map_page);
188 4 : map_page += p2l_map_page_no;
189 4 : i = 0;
190 10 : for (cur_page = 0; cur_page < ckpt->pages_per_xfer; cur_page++) {
191 6 : struct ftl_p2l_ckpt_page_no_vss *page = map_page + cur_page;
192 : /* Update the band P2L map */
193 770 : for (j = 0; i < rq->num_blocks && j < FTL_NUM_P2L_ENTRIES_NO_VSS; i++, iter++, j++) {
194 764 : if (iter->lba != FTL_LBA_INVALID) {
195 : /* This is compaction or reloc */
196 764 : assert(!ftl_addr_in_nvc(rq->dev, addr));
197 764 : ftl_band_set_p2l(band, iter->lba, addr, iter->seq_id);
198 764 : }
199 764 : page->map[j].lba = iter->lba;
200 764 : page->map[j].seq_id = iter->seq_id;
201 764 : addr = ftl_band_next_addr(band, addr, 1);
202 764 : }
203 :
204 : /* Set up the md */
205 6 : page->metadata.p2l_ckpt.seq_id = band->md->seq;
206 6 : page->metadata.p2l_ckpt.count = j;
207 :
208 : #if defined(DEBUG)
209 6 : ftl_bitmap_set(ckpt->bmp, p2l_map_page_no + cur_page);
210 : #endif
211 6 : page->metadata.p2l_ckpt.p2l_checksum = spdk_crc32c_update(page->map,
212 : FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0);
213 6 : }
214 : /* Save the P2L map entry */
215 4 : ftl_md_persist_entries(ckpt->md, p2l_map_page_no, ckpt->pages_per_xfer, map_page, NULL,
216 4 : ftl_p2l_ckpt_issue_end, rq, &rq->md_persist_entry_ctx);
217 4 : }
218 :
219 : #if defined(DEBUG)
220 : static void
221 0 : ftl_p2l_validate_pages(struct ftl_band *band, struct ftl_p2l_ckpt *ckpt,
222 : uint64_t page_begin, uint64_t page_end, bool val)
223 : {
224 0 : uint64_t page_no;
225 :
226 0 : for (page_no = page_begin; page_no < page_end; page_no++) {
227 0 : assert(ftl_bitmap_get(ckpt->bmp, page_no) == val);
228 0 : }
229 0 : }
230 :
231 : void
232 0 : ftl_p2l_validate_ckpt(struct ftl_band *band)
233 : {
234 0 : struct ftl_p2l_ckpt *ckpt = band->p2l_map.p2l_ckpt;
235 0 : uint64_t num_blks_tail_md = ftl_tail_md_num_blocks(band->dev);
236 0 : uint64_t num_pages_tail_md;
237 :
238 0 : if (!ckpt) {
239 0 : return;
240 : }
241 :
242 0 : num_pages_tail_md = num_blks_tail_md / band->dev->xfer_size * ckpt->pages_per_xfer;
243 :
244 0 : assert(num_blks_tail_md % band->dev->xfer_size == 0);
245 :
246 : /* all data pages written */
247 0 : ftl_p2l_validate_pages(band, ckpt,
248 0 : 0, ckpt->num_pages - num_pages_tail_md, true);
249 :
250 : /* tail md pages not written */
251 0 : ftl_p2l_validate_pages(band, ckpt, ckpt->num_pages - num_pages_tail_md,
252 0 : ckpt->num_pages, false);
253 0 : }
254 : #endif
255 :
256 : static struct ftl_band *
257 0 : ftl_get_band_from_region(struct spdk_ftl_dev *dev, enum ftl_layout_region_type type)
258 : {
259 0 : struct ftl_band *band = NULL;
260 0 : uint64_t i;
261 :
262 0 : assert(type >= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN);
263 0 : assert(type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX);
264 :
265 0 : for (i = 0; i < ftl_get_num_bands(dev); i++) {
266 0 : band = &dev->bands[i];
267 0 : if ((band->md->state == FTL_BAND_STATE_OPEN ||
268 0 : band->md->state == FTL_BAND_STATE_FULL) &&
269 0 : band->md->p2l_md_region == type) {
270 0 : return band;
271 : }
272 0 : }
273 :
274 0 : return NULL;
275 0 : }
276 :
277 : static void ftl_mngt_persist_band_p2l(struct ftl_mngt_process *mngt, struct ftl_p2l_sync_ctx *ctx);
278 :
279 : static void
280 0 : ftl_p2l_ckpt_persist_end(int status, void *arg)
281 : {
282 0 : struct ftl_mngt_process *mngt = arg;
283 0 : struct ftl_p2l_sync_ctx *ctx;
284 :
285 0 : assert(mngt);
286 :
287 0 : if (status) {
288 0 : ftl_mngt_fail_step(mngt);
289 0 : return;
290 : }
291 :
292 0 : ctx = ftl_mngt_get_step_ctx(mngt);
293 0 : ctx->xfer_start++;
294 :
295 0 : if (ctx->xfer_start == ctx->xfer_end) {
296 0 : ctx->md_region++;
297 0 : ftl_mngt_continue_step(mngt);
298 0 : } else {
299 0 : ftl_mngt_persist_band_p2l(mngt, ctx);
300 : }
301 0 : }
302 :
303 : static void
304 4 : ftl_mngt_persist_band_p2l(struct ftl_mngt_process *mngt, struct ftl_p2l_sync_ctx *ctx)
305 : {
306 4 : struct ftl_band *band = ctx->band;
307 4 : struct ftl_p2l_ckpt_page_no_vss *map_page;
308 4 : struct ftl_p2l_map_entry *band_entries;
309 4 : struct ftl_p2l_ckpt *ckpt;
310 4 : struct spdk_ftl_dev *dev = band->dev;
311 4 : uint64_t cur_page;
312 4 : uint64_t lbas_synced = 0;
313 :
314 4 : ckpt = band->p2l_map.p2l_ckpt;
315 :
316 4 : map_page = ftl_md_get_buffer(ckpt->md);
317 4 : assert(map_page);
318 :
319 4 : map_page += ctx->xfer_start * ckpt->pages_per_xfer;
320 :
321 10 : for (cur_page = 0; cur_page < ckpt->pages_per_xfer; cur_page++) {
322 6 : struct ftl_p2l_ckpt_page_no_vss *page = map_page + cur_page;
323 6 : uint64_t lbas_to_copy = spdk_min(FTL_NUM_P2L_ENTRIES_NO_VSS, dev->xfer_size - lbas_synced);
324 :
325 6 : band_entries = band->p2l_map.band_map + ctx->xfer_start * dev->xfer_size + lbas_synced;
326 6 : memcpy(page->map, band_entries, lbas_to_copy * sizeof(struct ftl_p2l_map_entry));
327 :
328 6 : page->metadata.p2l_ckpt.seq_id = band->md->seq;
329 6 : page->metadata.p2l_ckpt.p2l_checksum = spdk_crc32c_update(page->map,
330 : FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0);
331 6 : page->metadata.p2l_ckpt.count = lbas_to_copy;
332 6 : lbas_synced += lbas_to_copy;
333 6 : }
334 :
335 4 : assert(lbas_synced == dev->xfer_size);
336 : /* Save the P2L map entry */
337 4 : ftl_md_persist_entries(ckpt->md, ctx->xfer_start * ckpt->pages_per_xfer, ckpt->pages_per_xfer,
338 4 : map_page, NULL,
339 4 : ftl_p2l_ckpt_persist_end, mngt, &band->md_persist_entry_ctx);
340 4 : }
341 :
342 : void
343 0 : ftl_mngt_persist_bands_p2l(struct ftl_mngt_process *mngt)
344 : {
345 0 : struct ftl_p2l_sync_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
346 0 : struct ftl_band *band;
347 0 : uint64_t band_offs, num_xfers;
348 :
349 0 : if (ctx->md_region > FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
350 0 : ftl_mngt_next_step(mngt);
351 0 : return;
352 : }
353 :
354 0 : band = ftl_get_band_from_region(ftl_mngt_get_dev(mngt), ctx->md_region);
355 :
356 : /* No band has the md region assigned (shutdown happened before next_band was assigned) */
357 0 : if (!band) {
358 0 : ctx->xfer_start = 0;
359 0 : ctx->xfer_end = 0;
360 0 : ctx->md_region++;
361 0 : ftl_mngt_continue_step(mngt);
362 0 : return;
363 : }
364 :
365 0 : band_offs = ftl_band_block_offset_from_addr(band, band->md->iter.addr);
366 0 : num_xfers = band_offs / band->dev->xfer_size;
367 :
368 0 : ctx->xfer_start = 0;
369 0 : ctx->xfer_end = num_xfers;
370 0 : ctx->band = band;
371 :
372 : /* Band wasn't written to - no need to sync its P2L */
373 0 : if (ctx->xfer_end == 0) {
374 0 : ctx->md_region++;
375 0 : ftl_mngt_continue_step(mngt);
376 0 : return;
377 : }
378 :
379 0 : ftl_mngt_persist_band_p2l(mngt, ctx);
380 0 : }
381 :
382 : uint64_t
383 0 : ftl_mngt_p2l_ckpt_get_seq_id(struct spdk_ftl_dev *dev, int md_region)
384 : {
385 0 : struct ftl_layout *layout = &dev->layout;
386 0 : struct ftl_md *md = layout->md[md_region];
387 0 : struct ftl_p2l_ckpt_page_no_vss *page = ftl_md_get_buffer(md);
388 0 : uint64_t page_no, seq_id = 0;
389 :
390 0 : for (page_no = 0; page_no < layout->p2l.ckpt_pages; page_no++, page++) {
391 0 : if (seq_id < page->metadata.p2l_ckpt.seq_id) {
392 0 : seq_id = page->metadata.p2l_ckpt.seq_id;
393 0 : }
394 0 : }
395 0 : return seq_id;
396 0 : }
397 :
398 : int
399 5 : ftl_mngt_p2l_ckpt_restore(struct ftl_band *band, uint32_t md_region, uint64_t seq_id)
400 : {
401 5 : struct ftl_layout *layout = &band->dev->layout;
402 5 : struct ftl_md *md = layout->md[md_region];
403 5 : struct ftl_p2l_ckpt_page_no_vss *page = ftl_md_get_buffer(md);
404 5 : struct ftl_p2l_map_entry *band_entries;
405 5 : struct spdk_ftl_dev *dev = band->dev;
406 5 : uint64_t page_no, page_max = 0, xfer_count, lbas_synced;
407 5 : uint64_t pages_per_xfer = spdk_divide_round_up(dev->xfer_size, FTL_NUM_P2L_ENTRIES_NO_VSS);
408 5 : bool page_found = false;
409 :
410 5 : assert(band->md->p2l_md_region == md_region);
411 5 : if (band->md->p2l_md_region != md_region) {
412 0 : return -EINVAL;
413 : }
414 :
415 5 : assert(band->md->seq == seq_id);
416 5 : if (band->md->seq != seq_id) {
417 0 : return -EINVAL;
418 : }
419 :
420 10245 : for (page_no = 0; page_no < layout->p2l.ckpt_pages; page_no++, page++) {
421 10240 : if (page->metadata.p2l_ckpt.seq_id != seq_id) {
422 10230 : continue;
423 : }
424 :
425 10 : page_max = page_no;
426 10 : page_found = true;
427 :
428 10 : if (page->metadata.p2l_ckpt.p2l_checksum &&
429 0 : page->metadata.p2l_ckpt.p2l_checksum != spdk_crc32c_update(page->map,
430 : FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0)) {
431 0 : ftl_stats_crc_error(band->dev, FTL_STATS_TYPE_MD_NV_CACHE);
432 0 : return -EINVAL;
433 : }
434 :
435 10 : xfer_count = page_no / pages_per_xfer;
436 10 : lbas_synced = (page_no % pages_per_xfer) * FTL_NUM_P2L_ENTRIES_NO_VSS;
437 :
438 : /* Restore the page from P2L checkpoint */
439 10 : band_entries = band->p2l_map.band_map + xfer_count * dev->xfer_size + lbas_synced;
440 :
441 10 : memcpy(band_entries, page->map, page->metadata.p2l_ckpt.count * sizeof(struct ftl_p2l_map_entry));
442 10 : }
443 :
444 5 : assert(page_found);
445 5 : if (!page_found) {
446 0 : return -EINVAL;
447 : }
448 :
449 : /* Restore check point in band P2L map */
450 5 : band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(
451 5 : band->dev, md_region);
452 :
453 : /* Align page_max to xfer_size aligned pages */
454 5 : if ((page_max + 1) % pages_per_xfer != 0) {
455 1 : page_max += (pages_per_xfer - page_max % pages_per_xfer - 1);
456 1 : }
457 : #ifdef DEBUG
458 : /* Set check point valid map for validation */
459 5 : struct ftl_p2l_ckpt *ckpt = band->p2l_map.p2l_ckpt;
460 23 : for (uint64_t i = 0; i <= page_max; i++) {
461 18 : ftl_bitmap_set(ckpt->bmp, i);
462 18 : }
463 : #endif
464 :
465 5 : ftl_band_iter_init(band);
466 : /* Align page max to xfer size and set iter */
467 5 : ftl_band_iter_set(band, (page_max / band->p2l_map.p2l_ckpt->pages_per_xfer + 1) * dev->xfer_size);
468 :
469 5 : return 0;
470 5 : }
471 :
472 : enum ftl_layout_region_type
473 0 : ftl_p2l_ckpt_region_type(const struct ftl_p2l_ckpt *ckpt) {
474 0 : return ckpt->layout_region->type;
475 : }
476 :
477 : struct ftl_p2l_ckpt *
478 5 : ftl_p2l_ckpt_acquire_region_type(struct spdk_ftl_dev *dev, uint32_t region_type)
479 : {
480 5 : struct ftl_p2l_ckpt *ckpt = NULL;
481 :
482 5 : TAILQ_FOREACH(ckpt, &dev->p2l_ckpt.free, link) {
483 5 : if (ckpt->layout_region->type == region_type) {
484 5 : break;
485 : }
486 0 : }
487 :
488 5 : assert(ckpt);
489 :
490 5 : TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
491 5 : TAILQ_INSERT_TAIL(&dev->p2l_ckpt.inuse, ckpt, link);
492 :
493 10 : return ckpt;
494 5 : }
495 :
496 : int
497 2 : ftl_mngt_p2l_ckpt_restore_clean(struct ftl_band *band)
498 : {
499 2 : struct spdk_ftl_dev *dev = band->dev;
500 2 : struct ftl_layout *layout = &dev->layout;
501 2 : struct ftl_p2l_ckpt_page_no_vss *page;
502 2 : enum ftl_layout_region_type md_region = band->md->p2l_md_region;
503 2 : struct ftl_p2l_ckpt *ckpt;
504 2 : uint64_t page_no;
505 2 : uint64_t num_written_pages, lbas_synced;
506 :
507 2 : if (md_region < FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN ||
508 2 : md_region > FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
509 0 : return -EINVAL;
510 : }
511 :
512 2 : assert(band->md->iter.offset % dev->xfer_size == 0);
513 :
514 : /* Associate band with md region before shutdown */
515 2 : if (!band->p2l_map.p2l_ckpt) {
516 0 : band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, md_region);
517 0 : }
518 :
519 : /* Band was opened but no data was written */
520 2 : if (band->md->iter.offset == 0) {
521 0 : return 0;
522 : }
523 :
524 2 : ckpt = band->p2l_map.p2l_ckpt;
525 2 : num_written_pages = band->md->iter.offset / dev->xfer_size * ckpt->pages_per_xfer;
526 :
527 2 : page_no = 0;
528 2 : lbas_synced = 0;
529 :
530 : /* Restore P2L map up to last written page */
531 2 : page = ftl_md_get_buffer(layout->md[md_region]);
532 :
533 :
534 8 : for (; page_no < num_written_pages; page_no++, page++) {
535 6 : assert(page->metadata.p2l_ckpt.seq_id == band->md->seq);
536 : /* Restore the page from P2L checkpoint */
537 12 : memcpy(band->p2l_map.band_map + lbas_synced, page->map,
538 6 : page->metadata.p2l_ckpt.count * sizeof(struct ftl_p2l_map_entry));
539 :
540 6 : lbas_synced += page->metadata.p2l_ckpt.count;
541 :
542 : #if defined(DEBUG)
543 6 : assert(ftl_bitmap_get(band->p2l_map.p2l_ckpt->bmp, page_no) == false);
544 6 : ftl_bitmap_set(band->p2l_map.p2l_ckpt->bmp, page_no);
545 : #endif
546 6 : }
547 :
548 2 : assert(lbas_synced % dev->xfer_size == 0);
549 :
550 2 : assert(page->metadata.p2l_ckpt.seq_id < band->md->seq);
551 :
552 2 : return 0;
553 2 : }
554 :
555 : void
556 0 : ftl_mngt_p2l_ckpt_restore_shm_clean(struct ftl_band *band)
557 : {
558 0 : struct spdk_ftl_dev *dev = band->dev;
559 0 : enum ftl_layout_region_type md_region = band->md->p2l_md_region;
560 :
561 : /* Associate band with md region before shutdown */
562 0 : if (!band->p2l_map.p2l_ckpt) {
563 0 : band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, md_region);
564 0 : }
565 :
566 : #if defined(DEBUG)
567 0 : uint64_t page_no;
568 0 : uint64_t num_written_pages;
569 :
570 0 : assert(band->md->iter.offset % dev->xfer_size == 0);
571 0 : num_written_pages = band->md->iter.offset / dev->xfer_size * band->p2l_map.p2l_ckpt->pages_per_xfer;
572 :
573 : /* Band was opened but no data was written */
574 0 : if (band->md->iter.offset == 0) {
575 0 : return;
576 : }
577 :
578 : /* Set page number to first data page - skip head md */
579 0 : page_no = 0;
580 :
581 0 : for (; page_no < num_written_pages; page_no++) {
582 0 : assert(ftl_bitmap_get(band->p2l_map.p2l_ckpt->bmp, page_no) == false);
583 0 : ftl_bitmap_set(band->p2l_map.p2l_ckpt->bmp, page_no);
584 0 : }
585 : #endif
586 0 : }
|