]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/lightnvm/pblk-read.c
NFS4: Only set creation opendata if O_CREAT
[thirdparty/kernel/stable.git] / drivers / lightnvm / pblk-read.c
CommitLineData
02a1520d 1// SPDX-License-Identifier: GPL-2.0
a4bd217b
JG
2/*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-read.c - pblk's read path
17 */
18
19#include "pblk.h"
20
21/*
22 * There is no guarantee that the value read from cache has not been updated and
23 * resides at another location in the cache. We guarantee though that if the
24 * value is read from the cache, it belongs to the mapped lba. In order to
25 * guarantee and order between writes and reads are ordered, a flush must be
26 * issued.
27 */
28static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
a96de64a 29 sector_t lba, struct ppa_addr ppa)
a4bd217b 30{
880eda54 31#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
32 /* Callers must ensure that the ppa points to a cache address */
33 BUG_ON(pblk_ppa_empty(ppa));
34 BUG_ON(!pblk_addr_in_cache(ppa));
35#endif
36
a96de64a 37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
a4bd217b
JG
38}
39
a96de64a 40static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
87cc40bb 41 struct bio *bio, sector_t blba,
a96de64a 42 bool *from_cache)
a4bd217b 43{
faa79f27 44 void *meta_list = rqd->meta_list;
a96de64a 45 int nr_secs, i;
a4bd217b 46
a96de64a
IK
47retry:
48 nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
49 from_cache);
50
51 if (!*from_cache)
52 goto end;
a4bd217b
JG
53
54 for (i = 0; i < nr_secs; i++) {
faa79f27 55 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
a4bd217b
JG
56 sector_t lba = blba + i;
57
a96de64a 58 if (pblk_ppa_empty(rqd->ppa_list[i])) {
faa79f27
IK
59 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
60
faa79f27 61 meta->lba = addr_empty;
a96de64a
IK
62 } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
63 /*
64 * Try to read from write buffer. The address is later
65 * checked on the write buffer to prevent retrieving
66 * overwritten data.
67 */
68 if (!pblk_read_from_cache(pblk, bio, lba,
69 rqd->ppa_list[i])) {
70 if (i == 0) {
71 /*
72 * We didn't call with bio_advance()
73 * yet, so we can just retry.
74 */
75 goto retry;
76 } else {
77 /*
78 * We already call bio_advance()
79 * so we cannot retry and we need
80 * to quit that function in order
81 * to allow caller to handle the bio
82 * splitting in the current sector
83 * position.
84 */
85 nr_secs = i;
86 goto end;
87 }
75cb8e93 88 }
faa79f27 89 meta->lba = cpu_to_le64(lba);
880eda54 90#ifdef CONFIG_NVM_PBLK_DEBUG
db7ada33
JG
91 atomic_long_inc(&pblk->cache_reads);
92#endif
a4bd217b 93 }
a96de64a 94 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
a4bd217b
JG
95 }
96
a96de64a 97end:
f9c10152 98 if (pblk_io_aligned(pblk, nr_secs))
d7b68016 99 rqd->is_seq = 1;
f9c10152 100
880eda54 101#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
102 atomic_long_add(nr_secs, &pblk->inflight_reads);
103#endif
a96de64a
IK
104
105 return nr_secs;
a4bd217b
JG
106}
107
a4bd217b 108
03a34b2d
JG
109static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
110 sector_t blba)
a4809fee 111{
faa79f27 112 void *meta_list = rqd->meta_list;
03a34b2d 113 int nr_lbas = rqd->nr_ppas;
a4809fee
JG
114 int i;
115
55d8ec35
IK
116 if (!pblk_is_oob_meta_supported(pblk))
117 return;
118
a4809fee 119 for (i = 0; i < nr_lbas; i++) {
faa79f27
IK
120 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
121 u64 lba = le64_to_cpu(meta->lba);
a4809fee
JG
122
123 if (lba == ADDR_EMPTY)
124 continue;
125
03a34b2d 126 if (lba != blba + i) {
880eda54 127#ifdef CONFIG_NVM_PBLK_DEBUG
d68a9344 128 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
03a34b2d 129
d68a9344 130 print_ppa(pblk, &ppa_list[i], "seq", i);
03a34b2d 131#endif
4e495a46 132 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
03a34b2d
JG
133 lba, (u64)blba + i);
134 WARN_ON(1);
135 }
a4809fee
JG
136 }
137}
138
310df582
JG
139/*
140 * There can be holes in the lba list.
141 */
03a34b2d
JG
142static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
143 u64 *lba_list, int nr_lbas)
310df582 144{
faa79f27 145 void *meta_lba_list = rqd->meta_list;
310df582
JG
146 int i, j;
147
55d8ec35
IK
148 if (!pblk_is_oob_meta_supported(pblk))
149 return;
150
310df582 151 for (i = 0, j = 0; i < nr_lbas; i++) {
faa79f27
IK
152 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
153 meta_lba_list, j);
310df582
JG
154 u64 lba = lba_list[i];
155 u64 meta_lba;
156
157 if (lba == ADDR_EMPTY)
158 continue;
159
faa79f27 160 meta_lba = le64_to_cpu(meta->lba);
310df582
JG
161
162 if (lba != meta_lba) {
880eda54 163#ifdef CONFIG_NVM_PBLK_DEBUG
d68a9344 164 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
03a34b2d 165
ea1d24bc 166 print_ppa(pblk, &ppa_list[j], "rnd", j);
03a34b2d 167#endif
4e495a46 168 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
ea1d24bc 169 meta_lba, lba);
310df582
JG
170 WARN_ON(1);
171 }
03a34b2d
JG
172
173 j++;
310df582 174 }
03a34b2d
JG
175
176 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
310df582
JG
177}
178
d38954ed 179static void pblk_end_user_read(struct bio *bio, int error)
7bd4d370 180{
d38954ed
IK
181 if (error && error != NVM_RSP_WARN_HIGHECC)
182 bio_io_error(bio);
3e03f632
IK
183 else
184 bio_endio(bio);
7bd4d370
JG
185}
186
187static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
188 bool put_line)
a4bd217b 189{
998ba629 190 struct nvm_tgt_dev *dev = pblk->dev;
084ec9ba 191 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
87cc40bb 192 struct bio *int_bio = rqd->bio;
998ba629
JG
193 unsigned long start_time = r_ctx->start_time;
194
ddcf35d3 195 generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
a4bd217b
JG
196
197 if (rqd->error)
198 pblk_log_read_err(pblk, rqd);
a4bd217b 199
03a34b2d 200 pblk_read_check_seq(pblk, rqd, r_ctx->lba);
a96de64a 201 bio_put(int_bio);
084ec9ba 202
7bd4d370 203 if (put_line)
ae14cc04 204 pblk_rq_to_line_put(pblk, rqd);
a4bd217b 205
880eda54 206#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
207 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
208 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
209#endif
210
e2cddf20 211 pblk_free_rqd(pblk, rqd, PBLK_READ);
588726d3 212 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
213}
214
7bd4d370
JG
215static void pblk_end_io_read(struct nvm_rq *rqd)
216{
217 struct pblk *pblk = rqd->private;
87cc40bb
JG
218 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
219 struct bio *bio = (struct bio *)r_ctx->private;
7bd4d370 220
d38954ed 221 pblk_end_user_read(bio, rqd->error);
7bd4d370
JG
222 __pblk_end_io_read(pblk, rqd, true);
223}
224
87cc40bb 225static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
a96de64a 226 sector_t lba, bool *from_cache)
a4bd217b 227{
faa79f27 228 struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
a4bd217b 229 struct ppa_addr ppa;
a4bd217b 230
a96de64a 231 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
a4bd217b 232
880eda54 233#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
234 atomic_long_inc(&pblk->inflight_reads);
235#endif
236
237retry:
238 if (pblk_ppa_empty(ppa)) {
faa79f27
IK
239 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
240
faa79f27 241 meta->lba = addr_empty;
a4bd217b
JG
242 return;
243 }
244
245 /* Try to read from write buffer. The address is later checked on the
246 * write buffer to prevent retrieving overwritten data.
247 */
248 if (pblk_addr_in_cache(ppa)) {
a96de64a
IK
249 if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
250 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
a4bd217b
JG
251 goto retry;
252 }
a4809fee 253
faa79f27 254 meta->lba = cpu_to_le64(lba);
a4809fee 255
880eda54 256#ifdef CONFIG_NVM_PBLK_DEBUG
7bd4d370 257 atomic_long_inc(&pblk->cache_reads);
db7ada33 258#endif
a4bd217b
JG
259 } else {
260 rqd->ppa_addr = ppa;
261 }
262}
263
3e03f632 264void pblk_submit_read(struct pblk *pblk, struct bio *bio)
a4bd217b
JG
265{
266 struct nvm_tgt_dev *dev = pblk->dev;
998ba629 267 struct request_queue *q = dev->q;
84454e6d 268 sector_t blba = pblk_get_lba(bio);
5bf1e1ee 269 unsigned int nr_secs = pblk_get_secs(bio);
a96de64a 270 bool from_cache;
a4809fee 271 struct pblk_g_ctx *r_ctx;
a4bd217b 272 struct nvm_rq *rqd;
a96de64a 273 struct bio *int_bio, *split_bio;
a4bd217b 274
ddcf35d3
MC
275 generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
276 &pblk->disk->part0);
998ba629 277
e2cddf20 278 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
a4bd217b
JG
279
280 rqd->opcode = NVM_OP_PREAD;
a4bd217b
JG
281 rqd->nr_ppas = nr_secs;
282 rqd->private = pblk;
283 rqd->end_io = pblk_end_io_read;
284
a4809fee 285 r_ctx = nvm_rq_to_pdu(rqd);
998ba629 286 r_ctx->start_time = jiffies;
a4809fee
JG
287 r_ctx->lba = blba;
288
3e03f632
IK
289 if (pblk_alloc_rqd_meta(pblk, rqd)) {
290 bio_io_error(bio);
291 pblk_free_rqd(pblk, rqd, PBLK_READ);
292 return;
293 }
294
295 /* Clone read bio to deal internally with:
296 * -read errors when reading from drive
a96de64a 297 * -bio_advance() calls during cache reads
3e03f632
IK
298 */
299 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
a4bd217b 300
45dcf29b 301 if (nr_secs > 1)
a96de64a
IK
302 nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
303 &from_cache);
45dcf29b 304 else
a96de64a 305 pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
a4bd217b 306
a96de64a 307split_retry:
3e03f632
IK
308 r_ctx->private = bio; /* original bio */
309 rqd->bio = int_bio; /* internal bio */
310
a96de64a
IK
311 if (from_cache && nr_secs == rqd->nr_ppas) {
312 /* All data was read from cache, we can complete the IO. */
3e03f632 313 pblk_end_user_read(bio, 0);
588726d3 314 atomic_inc(&pblk->inflight_io);
7bd4d370 315 __pblk_end_io_read(pblk, rqd, false);
a96de64a 316 } else if (nr_secs != rqd->nr_ppas) {
3e03f632
IK
317 /* The read bio request could be partially filled by the write
318 * buffer, but there are some holes that need to be read from
a96de64a
IK
319 * the drive. In order to handle this, we will use block layer
320 * mechanism to split this request in to smaller ones and make
321 * a chain of it.
3e03f632 322 */
a96de64a
IK
323 split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
324 &pblk_bio_set);
325 bio_chain(split_bio, bio);
326 generic_make_request(bio);
327
328 /* New bio contains first N sectors of the previous one, so
329 * we can continue to use existing rqd, but we need to shrink
330 * the number of PPAs in it. New bio is also guaranteed that
331 * it contains only either data from cache or from drive, newer
332 * mix of them.
333 */
334 bio = split_bio;
335 rqd->nr_ppas = nr_secs;
336 if (rqd->nr_ppas == 1)
337 rqd->ppa_addr = rqd->ppa_list[0];
a4bd217b 338
a96de64a
IK
339 /* Recreate int_bio - existing might have some needed internal
340 * fields modified already.
341 */
342 bio_put(int_bio);
343 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
344 goto split_retry;
345 } else if (pblk_submit_io(pblk, rqd)) {
346 /* Submitting IO to drive failed, let's report an error */
347 rqd->error = -ENODEV;
348 pblk_end_io_read(rqd);
3e03f632 349 }
a4bd217b
JG
350}
351
352static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
353 struct pblk_line *line, u64 *lba_list,
d340121e 354 u64 *paddr_list_gc, unsigned int nr_secs)
a4bd217b 355{
afdc23c9 356 struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
d340121e 357 struct ppa_addr ppa_gc;
a4bd217b
JG
358 int valid_secs = 0;
359 int i;
360
d340121e 361 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
a4bd217b
JG
362
363 for (i = 0; i < nr_secs; i++) {
d340121e
JG
364 if (lba_list[i] == ADDR_EMPTY)
365 continue;
366
367 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
368 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
369 paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
a4bd217b
JG
370 continue;
371 }
372
d340121e 373 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
a4bd217b
JG
374 }
375
880eda54 376#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
377 atomic_long_add(valid_secs, &pblk->inflight_reads);
378#endif
d340121e 379
a4bd217b
JG
380 return valid_secs;
381}
382
383static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
d340121e
JG
384 struct pblk_line *line, sector_t lba,
385 u64 paddr_gc)
a4bd217b 386{
d340121e 387 struct ppa_addr ppa_l2p, ppa_gc;
a4bd217b
JG
388 int valid_secs = 0;
389
659226eb
DC
390 if (lba == ADDR_EMPTY)
391 goto out;
392
a4bd217b 393 /* logic error: lba out-of-bounds */
847a3a27 394 if (lba >= pblk->capacity) {
2a79efd8 395 WARN(1, "pblk: read lba out of bounds\n");
a4bd217b
JG
396 goto out;
397 }
398
a4bd217b 399 spin_lock(&pblk->trans_lock);
d340121e 400 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b
JG
401 spin_unlock(&pblk->trans_lock);
402
d340121e
JG
403 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
404 if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
a4bd217b
JG
405 goto out;
406
d340121e 407 rqd->ppa_addr = ppa_l2p;
a4bd217b
JG
408 valid_secs = 1;
409
880eda54 410#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
411 atomic_long_inc(&pblk->inflight_reads);
412#endif
413
414out:
415 return valid_secs;
416}
417
d340121e 418int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
a4bd217b
JG
419{
420 struct nvm_tgt_dev *dev = pblk->dev;
421 struct nvm_geo *geo = &dev->geo;
a4bd217b
JG
422 struct bio *bio;
423 struct nvm_rq rqd;
d340121e
JG
424 int data_len;
425 int ret = NVM_IO_OK;
a4bd217b
JG
426
427 memset(&rqd, 0, sizeof(struct nvm_rq));
428
45dcf29b
JG
429 ret = pblk_alloc_rqd_meta(pblk, &rqd);
430 if (ret)
431 return ret;
63e3809c 432
d340121e 433 if (gc_rq->nr_secs > 1) {
d340121e
JG
434 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
435 gc_rq->lba_list,
436 gc_rq->paddr_list,
437 gc_rq->nr_secs);
438 if (gc_rq->secs_to_gc == 1)
63e3809c 439 rqd.ppa_addr = rqd.ppa_list[0];
a4bd217b 440 } else {
d340121e
JG
441 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
442 gc_rq->lba_list[0],
443 gc_rq->paddr_list[0]);
a4bd217b
JG
444 }
445
d340121e 446 if (!(gc_rq->secs_to_gc))
a4bd217b
JG
447 goto out;
448
e46f4e48 449 data_len = (gc_rq->secs_to_gc) * geo->csecs;
d340121e 450 bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
7d327a9e 451 PBLK_VMALLOC_META, GFP_KERNEL);
a4bd217b 452 if (IS_ERR(bio)) {
4e495a46 453 pblk_err(pblk, "could not allocate GC bio (%lu)\n",
45dcf29b
JG
454 PTR_ERR(bio));
455 ret = PTR_ERR(bio);
a4bd217b
JG
456 goto err_free_dma;
457 }
458
459 bio->bi_iter.bi_sector = 0; /* internal bio */
460 bio_set_op_attrs(bio, REQ_OP_READ, 0);
461
462 rqd.opcode = NVM_OP_PREAD;
d340121e 463 rqd.nr_ppas = gc_rq->secs_to_gc;
a4bd217b
JG
464 rqd.bio = bio;
465
1a94b2d4 466 if (pblk_submit_io_sync(pblk, &rqd)) {
d340121e 467 ret = -EIO;
7d327a9e 468 goto err_free_bio;
a4bd217b
JG
469 }
470
03a34b2d 471 pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
310df582 472
588726d3 473 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
474
475 if (rqd.error) {
476 atomic_long_inc(&pblk->read_failed_gc);
880eda54 477#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
478 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
479#endif
480 }
481
880eda54 482#ifdef CONFIG_NVM_PBLK_DEBUG
d340121e
JG
483 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
484 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
485 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
a4bd217b
JG
486#endif
487
488out:
45dcf29b 489 pblk_free_rqd_meta(pblk, &rqd);
d340121e 490 return ret;
a4bd217b 491
7d327a9e
JG
492err_free_bio:
493 bio_put(bio);
a4bd217b 494err_free_dma:
45dcf29b 495 pblk_free_rqd_meta(pblk, &rqd);
d340121e 496 return ret;
a4bd217b 497}