Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2022 Intel Corporation.
3 : * All rights reserved.
4 : */
5 :
6 : #include "ftl_l2p.h"
7 : #include "ftl_band.h"
8 : #include "ftl_nv_cache.h"
9 : #include "ftl_l2p_cache.h"
10 : #include "ftl_l2p_flat.h"
11 :
12 :
13 : /* TODO: Verify why function pointers had worse performance than compile time constants */
14 : #ifdef SPDK_FTL_L2P_FLAT
15 : #define FTL_L2P_OP(name) ftl_l2p_flat_ ## name
16 : #else
17 : #define FTL_L2P_OP(name) ftl_l2p_cache_ ## name
18 : #endif
19 :
20 :
21 : int
22 0 : ftl_l2p_init(struct spdk_ftl_dev *dev)
23 : {
24 0 : TAILQ_INIT(&dev->l2p_deferred_pins);
25 0 : return FTL_L2P_OP(init)(dev);
26 : }
27 :
28 : void
29 0 : ftl_l2p_deinit(struct spdk_ftl_dev *dev)
30 : {
31 0 : FTL_L2P_OP(deinit)(dev);
32 0 : }
33 :
34 : static inline void
35 0 : ftl_l2p_pin_ctx_init(struct ftl_l2p_pin_ctx *pin_ctx, uint64_t lba, uint64_t count,
36 : ftl_l2p_pin_cb cb, void *cb_ctx)
37 : {
38 0 : pin_ctx->lba = lba;
39 0 : pin_ctx->count = count;
40 0 : pin_ctx->cb = cb;
41 0 : pin_ctx->cb_ctx = cb_ctx;
42 0 : }
43 :
44 : void
45 0 : ftl_l2p_pin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count, ftl_l2p_pin_cb cb, void *cb_ctx,
46 : struct ftl_l2p_pin_ctx *pin_ctx)
47 : {
48 0 : ftl_l2p_pin_ctx_init(pin_ctx, lba, count, cb, cb_ctx);
49 0 : FTL_L2P_OP(pin)(dev, pin_ctx);
50 0 : }
51 :
52 : void
53 0 : ftl_l2p_unpin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count)
54 : {
55 0 : FTL_L2P_OP(unpin)(dev, lba, count);
56 0 : }
57 :
58 : void
59 0 : ftl_l2p_pin_skip(struct spdk_ftl_dev *dev, ftl_l2p_pin_cb cb, void *cb_ctx,
60 : struct ftl_l2p_pin_ctx *pin_ctx)
61 : {
62 0 : ftl_l2p_pin_ctx_init(pin_ctx, FTL_LBA_INVALID, 0, cb, cb_ctx);
63 0 : cb(dev, 0, pin_ctx);
64 0 : }
65 :
66 : void
67 0 : ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
68 : {
69 0 : FTL_L2P_OP(set)(dev, lba, addr);
70 0 : }
71 :
72 : ftl_addr
73 0 : ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba)
74 : {
75 0 : return FTL_L2P_OP(get)(dev, lba);
76 : }
77 :
78 : void
79 0 : ftl_l2p_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
80 : {
81 0 : FTL_L2P_OP(clear)(dev, cb, cb_ctx);
82 0 : }
83 :
84 : void
85 0 : ftl_l2p_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
86 : {
87 0 : FTL_L2P_OP(restore)(dev, cb, cb_ctx);
88 0 : }
89 :
90 : void
91 0 : ftl_l2p_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
92 : {
93 0 : FTL_L2P_OP(persist)(dev, cb, cb_ctx);
94 0 : }
95 :
96 : void
97 0 : ftl_l2p_trim(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
98 : {
99 0 : FTL_L2P_OP(trim)(dev, cb, cb_ctx);
100 0 : }
101 :
102 : void
103 0 : ftl_l2p_process(struct spdk_ftl_dev *dev)
104 : {
105 0 : struct ftl_l2p_pin_ctx *pin_ctx;
106 :
107 0 : pin_ctx = TAILQ_FIRST(&dev->l2p_deferred_pins);
108 0 : if (pin_ctx) {
109 0 : TAILQ_REMOVE(&dev->l2p_deferred_pins, pin_ctx, link);
110 0 : FTL_L2P_OP(pin)(dev, pin_ctx);
111 0 : }
112 :
113 0 : FTL_L2P_OP(process)(dev);
114 0 : }
115 :
116 : bool
117 0 : ftl_l2p_is_halted(struct spdk_ftl_dev *dev)
118 : {
119 0 : if (!TAILQ_EMPTY(&dev->l2p_deferred_pins)) {
120 0 : return false;
121 : }
122 :
123 0 : return FTL_L2P_OP(is_halted)(dev);
124 0 : }
125 :
126 : void
127 0 : ftl_l2p_resume(struct spdk_ftl_dev *dev)
128 : {
129 0 : return FTL_L2P_OP(resume)(dev);
130 : }
131 :
132 : void
133 0 : ftl_l2p_halt(struct spdk_ftl_dev *dev)
134 : {
135 0 : return FTL_L2P_OP(halt)(dev);
136 : }
137 :
138 : static uint64_t
139 0 : get_trim_seq_id(struct spdk_ftl_dev *dev, uint64_t lba)
140 : {
141 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
142 0 : uint64_t *page = ftl_md_get_buffer(md);
143 0 : uint64_t page_no = lba / dev->layout.l2p.lbas_in_page;
144 :
145 0 : return page[page_no];
146 0 : }
147 :
148 : void
149 0 : ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr)
150 : {
151 0 : struct ftl_nv_cache_chunk *current_chunk, *new_chunk;
152 0 : ftl_addr current_addr;
153 : /* Updating L2P for data in cache device - used by user writes.
154 : * Split off from updating L2P in base due to extra edge cases for handling dirty shutdown in the cache case,
155 : * namely keeping two simultaneous writes to same LBA consistent before/after shutdown - on base device we
156 : * can simply ignore the L2P update, here we need to keep the address with more advanced write pointer
157 : */
158 0 : assert(ftl_check_core_thread(dev));
159 0 : assert(new_addr != FTL_ADDR_INVALID);
160 0 : assert(ftl_addr_in_nvc(dev, new_addr));
161 :
162 0 : current_addr = ftl_l2p_get(dev, lba);
163 :
164 0 : if (current_addr != FTL_ADDR_INVALID) {
165 :
166 : /* Check if write-after-write happened (two simultaneous user writes to the same LBA) */
167 0 : if (spdk_unlikely(current_addr != old_addr
168 : && ftl_addr_in_nvc(dev, current_addr))) {
169 :
170 0 : current_chunk = ftl_nv_cache_get_chunk_from_addr(dev, current_addr);
171 0 : new_chunk = ftl_nv_cache_get_chunk_from_addr(dev, new_addr);
172 :
173 : /* To keep data consistency after recovery skip oldest block */
174 : /* If both user writes are to the same chunk, the highest address should 'win', to keep data after
175 : * dirty shutdown recovery consistent. If they're on different chunks, then higher seq_id chunk 'wins' */
176 0 : if (current_chunk == new_chunk) {
177 0 : if (new_addr < current_addr) {
178 0 : return;
179 : }
180 0 : } else {
181 0 : if (new_chunk->md->seq_id < current_chunk->md->seq_id) {
182 0 : return;
183 : }
184 : }
185 0 : }
186 :
187 : /* For recovery from SHM case valid maps need to be set before l2p set and
188 : * invalidated after it */
189 :
190 : /* DO NOT CHANGE ORDER - START */
191 0 : ftl_nv_cache_set_addr(dev, lba, new_addr);
192 0 : ftl_l2p_set(dev, lba, new_addr);
193 0 : ftl_invalidate_addr(dev, current_addr);
194 : /* DO NOT CHANGE ORDER - END */
195 0 : return;
196 : } else {
197 0 : uint64_t trim_seq_id = get_trim_seq_id(dev, lba);
198 0 : uint64_t new_seq_id = ftl_nv_cache_get_chunk_from_addr(dev, new_addr)->md->seq_id;
199 :
200 : /* Check if region hasn't been trimmed during IO */
201 0 : if (new_seq_id < trim_seq_id) {
202 0 : return;
203 : }
204 0 : }
205 :
206 : /* If current address doesn't have any value (ie. it was never set, or it was trimmed), then we can just set L2P */
207 : /* DO NOT CHANGE ORDER - START (need to set P2L maps/valid map first) */
208 0 : ftl_nv_cache_set_addr(dev, lba, new_addr);
209 0 : ftl_l2p_set(dev, lba, new_addr);
210 : /* DO NOT CHANGE ORDER - END */
211 0 : }
212 :
213 : void
214 0 : ftl_l2p_update_base(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr)
215 : {
216 0 : ftl_addr current_addr;
217 :
218 : /* Updating L2P for data in base device - used by compaction and GC, may be invalidated by user write.
219 : * Split off from updating L2P in cache due to extra edge cases for handling dirty shutdown in the cache case.
220 : * Also some assumptions are not the same (can't assign INVALID address for base device - trim cases are done on cache)
221 : */
222 0 : assert(ftl_check_core_thread(dev));
223 0 : assert(new_addr != FTL_ADDR_INVALID);
224 0 : assert(old_addr != FTL_ADDR_INVALID);
225 0 : assert(!ftl_addr_in_nvc(dev, new_addr));
226 :
227 0 : current_addr = ftl_l2p_get(dev, lba);
228 :
229 0 : if (current_addr == old_addr) {
230 : /* DO NOT CHANGE ORDER - START (need to set L2P (and valid bits), before invalidating old ones,
231 : * due to dirty shutdown from shm recovery - it's ok to have too many bits set, but not ok to
232 : * have too many cleared) */
233 0 : ftl_band_set_addr(ftl_band_from_addr(dev, new_addr), lba, new_addr);
234 0 : ftl_l2p_set(dev, lba, new_addr);
235 : /* DO NOT CHANGE ORDER - END */
236 0 : } else {
237 : /* new addr could be set by running p2l checkpoint but in the time window between
238 : * p2l checkpoint completion and l2p set operation new data could be written on
239 : * open chunk so this address need to be invalidated */
240 0 : ftl_invalidate_addr(dev, new_addr);
241 : }
242 :
243 0 : ftl_invalidate_addr(dev, old_addr);
244 0 : }
245 :
246 : void
247 0 : ftl_l2p_pin_complete(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
248 : {
249 0 : if (spdk_unlikely(status == -EAGAIN)) {
250 0 : TAILQ_INSERT_TAIL(&dev->l2p_deferred_pins, pin_ctx, link);
251 0 : } else {
252 0 : pin_ctx->cb(dev, status, pin_ctx);
253 : }
254 0 : }
|