Lines Matching refs:rqd
606 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
608 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
610 nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
613 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
615 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
617 nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
671 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
680 rqd->nr_ppas = nr_ppas;
681 rqd->ppa_addr = ppas[0];
686 rqd->nr_ppas = nr_ppas;
687 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
688 if (!rqd->ppa_list) {
694 rqd->nr_ppas *= plane_cnt;
700 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
708 struct nvm_rq *rqd)
710 if (!rqd->ppa_list)
713 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
716 static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
723 if (rqd->is_seq)
726 if (rqd->opcode == NVM_OP_PREAD)
728 else if (rqd->opcode == NVM_OP_PWRITE)
734 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
742 nvm_rq_tgt_to_dev(tgt_dev, rqd);
744 rqd->dev = tgt_dev;
745 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
748 ret = dev->ops->submit_io(dev, rqd, buf);
750 nvm_rq_dev_to_tgt(tgt_dev, rqd);
755 static void nvm_sync_end_io(struct nvm_rq *rqd)
757 struct completion *waiting = rqd->private;
762 static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
768 rqd->end_io = nvm_sync_end_io;
769 rqd->private = &wait;
771 ret = dev->ops->submit_io(dev, rqd, buf);
780 int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
789 nvm_rq_tgt_to_dev(tgt_dev, rqd);
791 rqd->dev = tgt_dev;
792 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
794 ret = nvm_submit_io_wait(dev, rqd, buf);
800 void nvm_end_io(struct nvm_rq *rqd)
802 struct nvm_tgt_dev *tgt_dev = rqd->dev;
806 nvm_rq_dev_to_tgt(tgt_dev, rqd);
808 if (rqd->end_io)
809 rqd->end_io(rqd);
813 static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
818 rqd->dev = NULL;
819 rqd->flags = nvm_set_flags(&dev->geo, rqd);
821 return nvm_submit_io_wait(dev, rqd, NULL);
826 struct nvm_rq rqd = { NULL };
840 rqd.bio = &bio;
841 rqd.opcode = NVM_OP_PREAD;
842 rqd.is_seq = 1;
843 rqd.nr_ppas = 1;
844 rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
846 ret = nvm_submit_io_sync_raw(dev, &rqd);
851 return rqd.error;
1068 struct nvm_rq rqd;
1079 memset(&rqd, 0, sizeof(struct nvm_rq));
1081 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1082 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1084 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1085 nvm_free_rqd_ppalist(tgt_dev, &rqd);