Lines Matching refs:rqd
40 static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
44 void *meta_list = rqd->meta_list;
48 nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
58 if (pblk_ppa_empty(rqd->ppa_list[i])) {
62 } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
69 rqd->ppa_list[i])) {
99 rqd->is_seq = 1;
109 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
112 void *meta_list = rqd->meta_list;
113 int nr_lbas = rqd->nr_ppas;
128 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
142 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
145 void *meta_lba_list = rqd->meta_list;
164 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
176 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
187 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
190 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
191 struct bio *int_bio = rqd->bio;
196 if (rqd->error)
197 pblk_log_read_err(pblk, rqd);
199 pblk_read_check_seq(pblk, rqd, r_ctx->lba);
203 pblk_rq_to_line_put(pblk, rqd);
206 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
207 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
210 pblk_free_rqd(pblk, rqd, PBLK_READ);
214 static void pblk_end_io_read(struct nvm_rq *rqd)
216 struct pblk *pblk = rqd->private;
217 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
220 pblk_end_user_read(bio, rqd->error);
221 __pblk_end_io_read(pblk, rqd, true);
224 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
227 struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
259 rqd->ppa_addr = ppa;
269 struct nvm_rq *rqd;
275 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
277 rqd->opcode = NVM_OP_PREAD;
278 rqd->nr_ppas = nr_secs;
279 rqd->private = pblk;
280 rqd->end_io = pblk_end_io_read;
282 r_ctx = nvm_rq_to_pdu(rqd);
286 if (pblk_alloc_rqd_meta(pblk, rqd)) {
288 pblk_free_rqd(pblk, rqd, PBLK_READ);
299 nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
302 pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
306 rqd->bio = int_bio; /* internal bio */
308 if (from_cache && nr_secs == rqd->nr_ppas) {
312 __pblk_end_io_read(pblk, rqd, false);
313 } else if (nr_secs != rqd->nr_ppas) {
326 * we can continue to use existing rqd, but we need to shrink
332 rqd->nr_ppas = nr_secs;
333 if (rqd->nr_ppas == 1)
334 rqd->ppa_addr = rqd->ppa_list[0];
342 } else if (pblk_submit_io(pblk, rqd, NULL)) {
344 rqd->error = -ENODEV;
345 pblk_end_io_read(rqd);
349 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
370 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
380 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
404 rqd->ppa_addr = ppa_l2p;
417 struct nvm_rq rqd;
420 memset(&rqd, 0, sizeof(struct nvm_rq));
422 ret = pblk_alloc_rqd_meta(pblk, &rqd);
427 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
432 rqd.ppa_addr = rqd.ppa_list[0];
434 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
442 rqd.opcode = NVM_OP_PREAD;
443 rqd.nr_ppas = gc_rq->secs_to_gc;
445 if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
450 pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
454 if (rqd.error) {
457 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
468 pblk_free_rqd_meta(pblk, &rqd);
472 pblk_free_rqd_meta(pblk, &rqd);