Lines Matching refs:rqd
79 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
87 line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
88 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
93 if (rqd->error) {
95 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
98 pblk_mark_bb(pblk, line, rqd->ppa_addr);
101 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
106 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
113 static void pblk_end_io_erase(struct nvm_rq *rqd)
115 struct pblk *pblk = rqd->private;
117 __pblk_end_io_erase(pblk, rqd);
118 mempool_free(rqd, &pblk->e_rq_pool);
241 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
245 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
246 &rqd->dma_meta_list);
247 if (!rqd->meta_list)
250 if (rqd->nr_ppas == 1)
253 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
254 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
259 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
263 if (rqd->meta_list)
264 nvm_dev_dma_free(dev->parent, rqd->meta_list,
265 rqd->dma_meta_list);
272 struct nvm_rq *rqd;
290 rqd = mempool_alloc(pool, GFP_KERNEL);
291 memset(rqd, 0, rq_size);
293 return rqd;
297 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
303 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
315 pblk_err(pblk, "trying to free unknown rqd type\n");
319 pblk_free_rqd_meta(pblk, rqd);
320 mempool_free(rqd, pool);
473 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
477 pblk_print_failed_rqd(pblk, rqd, rqd->error);
481 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
484 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
489 switch (rqd->error) {
498 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
501 pblk_print_failed_rqd(pblk, rqd, rqd->error);
510 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
517 if (pblk_check_io(pblk, rqd))
521 return nvm_submit_io(dev, rqd, buf);
524 void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
526 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
530 for (i = 0; i < rqd->nr_ppas; i++) {
544 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
552 if (pblk_check_io(pblk, rqd))
556 ret = nvm_submit_io_sync(dev, rqd, buf);
559 rqd->opcode == NVM_OP_PWRITE)
560 pblk_check_chunk_state_update(pblk, rqd);
565 static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
568 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
572 ret = pblk_submit_io_sync(pblk, rqd, buf);
681 struct nvm_rq rqd;
685 memset(&rqd, 0, sizeof(struct nvm_rq));
687 ret = pblk_alloc_rqd_meta(pblk, &rqd);
691 rqd.opcode = NVM_OP_PREAD;
692 rqd.nr_ppas = lm->smeta_sec;
693 rqd.is_seq = 1;
694 ppa_list = nvm_rq_to_ppa_list(&rqd);
699 ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
707 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
708 pblk_log_read_err(pblk, &rqd);
713 pblk_free_rqd_meta(pblk, &rqd);
722 struct nvm_rq rqd;
727 memset(&rqd, 0, sizeof(struct nvm_rq));
729 ret = pblk_alloc_rqd_meta(pblk, &rqd);
733 rqd.opcode = NVM_OP_PWRITE;
734 rqd.nr_ppas = lm->smeta_sec;
735 rqd.is_seq = 1;
736 ppa_list = nvm_rq_to_ppa_list(&rqd);
740 rqd.meta_list, i);
746 ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
754 if (rqd.error) {
755 pblk_log_write_err(pblk, &rqd);
760 pblk_free_rqd_meta(pblk, &rqd);
772 struct nvm_rq rqd;
791 memset(&rqd, 0, sizeof(struct nvm_rq));
796 rqd.meta_list = meta_list;
797 rqd.ppa_list = ppa_list_buf;
798 rqd.dma_meta_list = dma_meta_list;
799 rqd.dma_ppa_list = dma_ppa_list;
800 rqd.opcode = NVM_OP_PREAD;
801 rqd.nr_ppas = rq_ppas;
802 ppa_list = nvm_rq_to_ppa_list(&rqd);
804 for (i = 0; i < rqd.nr_ppas; ) {
809 rqd.is_seq = 1;
831 ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
839 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
840 pblk_log_read_err(pblk, &rqd);
851 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
855 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
858 rqd->opcode = NVM_OP_ERASE;
859 rqd->ppa_addr = ppa;
860 rqd->nr_ppas = 1;
861 rqd->is_seq = 1;
862 rqd->bio = NULL;
867 struct nvm_rq rqd = {NULL};
873 pblk_setup_e_rq(pblk, &rqd, ppa);
878 ret = pblk_submit_io_sync(pblk, &rqd, NULL);
879 rqd.private = pblk;
880 __pblk_end_io_erase(pblk, &rqd);
1444 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1446 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1449 for (i = 0; i < rqd->nr_ppas; i++)
1691 struct nvm_rq *rqd;
1694 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1696 pblk_setup_e_rq(pblk, rqd, ppa);
1698 rqd->end_io = pblk_end_io_erase;
1699 rqd->private = pblk;
1707 err = pblk_submit_io(pblk, rqd, NULL);
2115 void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2121 buffer = rqd->meta_list;
2127 rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2133 void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2135 void *meta_list = rqd->meta_list;
2142 page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2144 for (; i < rqd->nr_ppas; i++)