Lines Matching refs:pblk

16  * pblk-read.c - pblk's read path
19 #include "pblk.h"
28 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
40 static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
48 nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
55 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
68 if (!pblk_read_from_cache(pblk, bio, lba,
91 atomic_long_inc(&pblk->cache_reads);
98 if (pblk_io_aligned(pblk, nr_secs))
102 atomic_long_add(nr_secs, &pblk->inflight_reads);
109 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
116 if (!pblk_is_oob_meta_supported(pblk))
120 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
130 print_ppa(pblk, &ppa_list[i], "seq", i);
132 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
142 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
148 if (!pblk_is_oob_meta_supported(pblk))
152 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
166 print_ppa(pblk, &ppa_list[j], "rnd", j);
168 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
176 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
187 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
197 pblk_log_read_err(pblk, rqd);
199 pblk_read_check_seq(pblk, rqd, r_ctx->lba);
203 pblk_rq_to_line_put(pblk, rqd);
206 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
207 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
210 pblk_free_rqd(pblk, rqd, PBLK_READ);
211 atomic_dec(&pblk->inflight_io);
216 struct pblk *pblk = rqd->private;
221 __pblk_end_io_read(pblk, rqd, true);
224 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
227 struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
230 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
233 atomic_long_inc(&pblk->inflight_reads);
248 if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
249 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
256 atomic_long_inc(&pblk->cache_reads);
263 void pblk_submit_read(struct pblk *pblk, struct bio *bio)
275 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
279 rqd->private = pblk;
286 if (pblk_alloc_rqd_meta(pblk, rqd)) {
288 pblk_free_rqd(pblk, rqd, PBLK_READ);
299 nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
302 pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
311 atomic_inc(&pblk->inflight_io);
312 __pblk_end_io_read(pblk, rqd, false);
342 } else if (pblk_submit_io(pblk, rqd, NULL)) {
349 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
358 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
364 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
374 atomic_long_add(valid_secs, &pblk->inflight_reads);
380 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
391 if (lba >= pblk->capacity) {
392 WARN(1, "pblk: read lba out of bounds\n");
396 spin_lock(&pblk->trans_lock);
397 ppa_l2p = pblk_trans_map_get(pblk, lba);
398 spin_unlock(&pblk->trans_lock);
400 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
408 atomic_long_inc(&pblk->inflight_reads);
415 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
422 ret = pblk_alloc_rqd_meta(pblk, &rqd);
427 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
434 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
445 if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
450 pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
452 atomic_dec(&pblk->inflight_io);
455 atomic_long_inc(&pblk->read_failed_gc);
457 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
462 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
463 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
464 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
468 pblk_free_rqd_meta(pblk, &rqd);
472 pblk_free_rqd_meta(pblk, &rqd);