/kernel/linux/linux-5.10/drivers/lightnvm/ |
H A D | pblk-read.c | 40 static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, in pblk_read_ppalist_rq() argument 44 void *meta_list = rqd->meta_list; in pblk_read_ppalist_rq() 48 nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas, in pblk_read_ppalist_rq() 58 if (pblk_ppa_empty(rqd->ppa_list[i])) { in pblk_read_ppalist_rq() 62 } else if (pblk_addr_in_cache(rqd->ppa_list[i])) { in pblk_read_ppalist_rq() 69 rqd->ppa_list[i])) { in pblk_read_ppalist_rq() 99 rqd->is_seq = 1; in pblk_read_ppalist_rq() 109 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd, in pblk_read_check_seq() argument 112 void *meta_list = rqd in pblk_read_check_seq() 142 pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd, u64 *lba_list, int nr_lbas) pblk_read_check_rand() argument 187 __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd, bool put_line) __pblk_end_io_read() argument 214 pblk_end_io_read(struct nvm_rq *rqd) pblk_end_io_read() argument 224 pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio, sector_t lba, bool *from_cache) pblk_read_rq() argument 269 struct nvm_rq *rqd; pblk_submit_read() local 349 read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, struct pblk_line *line, u64 *lba_list, u64 *paddr_list_gc, unsigned int nr_secs) read_ppalist_rq_gc() argument 380 read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, struct pblk_line *line, sector_t lba, u64 paddr_gc) read_rq_gc() argument 417 struct nvm_rq rqd; pblk_submit_read_gc() local [all...] |
H A D | pblk-write.c | 22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, in pblk_end_w_bio() argument 53 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid, in pblk_end_w_bio() 57 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes); in pblk_end_w_bio() 62 bio_put(rqd->bio); in pblk_end_w_bio() 63 pblk_free_rqd(pblk, rqd, PBLK_WRITE); in pblk_end_w_bio() 69 struct nvm_rq *rqd, in pblk_end_queued_w_bio() 73 return pblk_end_w_bio(pblk, rqd, c_ctx); in pblk_end_queued_w_bio() 76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd, in pblk_complete_write() argument 90 pos = pblk_end_w_bio(pblk, rqd, c_ctx); in pblk_complete_write() 94 rqd in pblk_complete_write() 68 pblk_end_queued_w_bio(struct pblk *pblk, struct nvm_rq *rqd, struct pblk_c_ctx *c_ctx) pblk_end_queued_w_bio() argument 213 struct nvm_rq *rqd = recovery->rqd; pblk_submit_rec() local 235 pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd) pblk_end_w_fail() argument 252 pblk_end_io_write(struct nvm_rq *rqd) pblk_end_io_write() argument 272 pblk_end_io_write_meta(struct nvm_rq *rqd) pblk_end_io_write_meta() argument 302 pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int nr_secs, nvm_end_io_fn(*end_io)) pblk_alloc_w_rq() argument 315 pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd, struct ppa_addr *erase_ppa) pblk_setup_w_rq() argument 376 struct nvm_rq *rqd; pblk_submit_meta_io() local 494 pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) pblk_submit_io_set() argument 545 pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd) pblk_free_write_rqd() argument 558 struct nvm_rq *rqd; pblk_submit_write() local [all...] |
H A D | core.c | 606 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) in nvm_rq_tgt_to_dev() argument 608 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in nvm_rq_tgt_to_dev() 610 nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas); in nvm_rq_tgt_to_dev() 613 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) in nvm_rq_dev_to_tgt() argument 615 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in nvm_rq_dev_to_tgt() 617 nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas); in nvm_rq_dev_to_tgt() 671 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, in nvm_set_rqd_ppalist() argument 680 rqd->nr_ppas = nr_ppas; in nvm_set_rqd_ppalist() 681 rqd->ppa_addr = ppas[0]; in nvm_set_rqd_ppalist() 686 rqd in nvm_set_rqd_ppalist() 707 nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) nvm_free_rqd_ppalist() argument 716 nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd) nvm_set_flags() argument 734 nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf) nvm_submit_io() argument 755 nvm_sync_end_io(struct nvm_rq *rqd) nvm_sync_end_io() argument 762 nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd, void *buf) nvm_submit_io_wait() argument 780 nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf) nvm_submit_io_sync() argument 800 nvm_end_io(struct nvm_rq *rqd) nvm_end_io() argument 813 nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd) nvm_submit_io_sync_raw() argument 826 struct nvm_rq rqd = { NULL }; nvm_bb_chunk_sense() local 1068 struct nvm_rq rqd; nvm_set_chunk_meta() local [all...] |
H A D | pblk-core.c | 79 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd) in __pblk_end_io_erase() argument 87 line = pblk_ppa_to_line(pblk, rqd->ppa_addr); in __pblk_end_io_erase() 88 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr); in __pblk_end_io_erase() 93 if (rqd->error) { in __pblk_end_io_erase() 95 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED); in __pblk_end_io_erase() 98 pblk_mark_bb(pblk, line, rqd->ppa_addr); in __pblk_end_io_erase() 101 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE); in __pblk_end_io_erase() 106 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr, in __pblk_end_io_erase() 113 static void pblk_end_io_erase(struct nvm_rq *rqd) in pblk_end_io_erase() argument 115 struct pblk *pblk = rqd in pblk_end_io_erase() 241 pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd) pblk_alloc_rqd_meta() argument 259 pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd) pblk_free_rqd_meta() argument 272 struct nvm_rq *rqd; pblk_alloc_rqd() local 297 pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) pblk_free_rqd() argument 473 pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd) pblk_log_write_err() argument 481 pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd) pblk_log_read_err() argument 510 pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf) pblk_submit_io() argument 524 pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd) pblk_check_chunk_state_update() argument 544 pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf) pblk_submit_io_sync() argument 565 pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd, void *buf) pblk_submit_io_sync_sem() argument 681 struct nvm_rq rqd; pblk_line_smeta_read() local 722 struct nvm_rq rqd; pblk_line_smeta_write() local 772 struct nvm_rq rqd; pblk_line_emeta_read() local 855 pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd, struct ppa_addr ppa) pblk_setup_e_rq() argument 867 struct nvm_rq rqd = {NULL}; pblk_blk_erase_sync() local 1444 pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd) pblk_rq_to_line_put() argument 1691 struct nvm_rq *rqd; pblk_blk_erase_async() local 2115 pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd) pblk_get_meta_for_writes() argument 2133 pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd) pblk_get_packed_meta() argument [all...] |
H A D | pblk-recovery.c | 145 struct nvm_rq *rqd; member 158 static void pblk_end_io_recov(struct nvm_rq *rqd) in pblk_end_io_recov() argument 160 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_end_io_recov() 161 struct pblk_pad_rq *pad_rq = rqd->private; in pblk_end_io_recov() 166 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); in pblk_end_io_recov() 180 struct nvm_rq *rqd; in pblk_recov_pad_line() local 214 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT); in pblk_recov_pad_line() 216 ret = pblk_alloc_rqd_meta(pblk, rqd); in pblk_recov_pad_line() 218 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); in pblk_recov_pad_line() 222 rqd in pblk_recov_pad_line() 362 struct nvm_rq *rqd; pblk_recov_scan_oob() local 481 struct nvm_rq *rqd; pblk_recov_l2p_from_oob() local [all...] |
H A D | pblk-map.c | 96 int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry, in pblk_map_rq() argument 100 void *meta_list = pblk_get_meta_for_writes(pblk, rqd); in pblk_map_rq() 102 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_map_rq() 108 for (i = off; i < rqd->nr_ppas; i += min) { in pblk_map_rq() 122 int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd, in pblk_map_erase_rq() argument 129 void *meta_list = pblk_get_meta_for_writes(pblk, rqd); in pblk_map_erase_rq() 131 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_map_erase_rq() 139 for (i = 0; i < rqd->nr_ppas; i += min) { in pblk_map_erase_rq() 155 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap, in pblk_map_erase_rq() 170 return pblk_map_rq(pblk, rqd, sentr in pblk_map_erase_rq() [all...] |
H A D | pblk.h | 134 struct nvm_rq *rqd; member 738 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, 765 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type); 766 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd); 767 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd); 769 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, 776 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd); 777 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd); 778 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf); 779 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, voi 1212 pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd, int error) pblk_print_failed_rqd() argument 1265 pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd) pblk_check_io() argument [all...] |
H A D | pblk-rb.c | 550 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, in pblk_rb_read_to_bio() argument 556 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); in pblk_rb_read_to_bio() 557 struct bio *bio = rqd->bio; in pblk_rb_read_to_bio()
|
/kernel/linux/linux-5.10/block/ |
H A D | blk-rq-qos.c | 116 bool rq_depth_calc_max_depth(struct rq_depth *rqd) in rq_depth_calc_max_depth() argument 128 if (rqd->queue_depth == 1) { in rq_depth_calc_max_depth() 129 if (rqd->scale_step > 0) in rq_depth_calc_max_depth() 130 rqd->max_depth = 1; in rq_depth_calc_max_depth() 132 rqd->max_depth = 2; in rq_depth_calc_max_depth() 143 depth = min_t(unsigned int, rqd->default_depth, in rq_depth_calc_max_depth() 144 rqd->queue_depth); in rq_depth_calc_max_depth() 145 if (rqd->scale_step > 0) in rq_depth_calc_max_depth() 146 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step)); in rq_depth_calc_max_depth() 147 else if (rqd in rq_depth_calc_max_depth() 164 rq_depth_scale_up(struct rq_depth *rqd) rq_depth_scale_up() argument 183 rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) rq_depth_scale_down() argument [all...] |
H A D | blk-wbt.c | 238 struct rq_depth *rqd = &rwb->rq_depth; in latency_exceeded() local 282 if (rqd->scale_step) in latency_exceeded() 291 struct rq_depth *rqd = &rwb->rq_depth; in rwb_trace_step() local 293 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec, in rwb_trace_step() 294 rwb->wb_background, rwb->wb_normal, rqd->max_depth); in rwb_trace_step() 331 struct rq_depth *rqd = &rwb->rq_depth; in rwb_arm_timer() local 333 if (rqd->scale_step > 0) { in rwb_arm_timer() 341 int_sqrt((rqd->scale_step + 1) << 8)); in rwb_arm_timer() 356 struct rq_depth *rqd = &rwb->rq_depth; in wb_timer_fn() local 362 trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd in wb_timer_fn() 411 struct rq_depth *rqd = &rwb->rq_depth; wbt_update_limits() local [all...] |
H A D | blk-rq-qos.h | 154 bool rq_depth_scale_up(struct rq_depth *rqd); 155 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); 156 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
|
/kernel/linux/linux-6.6/block/ |
H A D | blk-rq-qos.c | 110 bool rq_depth_calc_max_depth(struct rq_depth *rqd) in rq_depth_calc_max_depth() argument 122 if (rqd->queue_depth == 1) { in rq_depth_calc_max_depth() 123 if (rqd->scale_step > 0) in rq_depth_calc_max_depth() 124 rqd->max_depth = 1; in rq_depth_calc_max_depth() 126 rqd->max_depth = 2; in rq_depth_calc_max_depth() 137 depth = min_t(unsigned int, rqd->default_depth, in rq_depth_calc_max_depth() 138 rqd->queue_depth); in rq_depth_calc_max_depth() 139 if (rqd->scale_step > 0) in rq_depth_calc_max_depth() 140 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step)); in rq_depth_calc_max_depth() 141 else if (rqd in rq_depth_calc_max_depth() 158 rq_depth_scale_up(struct rq_depth *rqd) rq_depth_scale_up() argument 177 rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) rq_depth_scale_down() argument [all...] |
H A D | blk-wbt.c | 307 struct rq_depth *rqd = &rwb->rq_depth; in latency_exceeded() local 351 if (rqd->scale_step) in latency_exceeded() 360 struct rq_depth *rqd = &rwb->rq_depth; in rwb_trace_step() local 362 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec, in rwb_trace_step() 363 rwb->wb_background, rwb->wb_normal, rqd->max_depth); in rwb_trace_step() 400 struct rq_depth *rqd = &rwb->rq_depth; in rwb_arm_timer() local 402 if (rqd->scale_step > 0) { in rwb_arm_timer() 410 int_sqrt((rqd->scale_step + 1) << 8)); in rwb_arm_timer() 425 struct rq_depth *rqd = &rwb->rq_depth; in wb_timer_fn() local 434 trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd in wb_timer_fn() 482 struct rq_depth *rqd = &rwb->rq_depth; wbt_update_limits() local [all...] |
H A D | blk-rq-qos.h | 99 bool rq_depth_scale_up(struct rq_depth *rqd); 100 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); 101 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
|
/kernel/linux/linux-5.10/drivers/nvme/host/ |
H A D | lightnvm.c | 624 static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns, in nvme_nvm_rqtocmd() argument 627 c->ph_rw.opcode = rqd->opcode; in nvme_nvm_rqtocmd() 629 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa); in nvme_nvm_rqtocmd() 630 c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list); in nvme_nvm_rqtocmd() 631 c->ph_rw.control = cpu_to_le16(rqd->flags); in nvme_nvm_rqtocmd() 632 c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1); in nvme_nvm_rqtocmd() 637 struct nvm_rq *rqd = rq->end_io_data; in nvme_nvm_end_io() local 639 rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64); in nvme_nvm_end_io() 640 rqd->error = nvme_req(rq)->status; in nvme_nvm_end_io() 641 nvm_end_io(rqd); in nvme_nvm_end_io() 647 nvme_nvm_alloc_request(struct request_queue *q, struct nvm_rq *rqd, struct nvme_nvm_command *cmd) nvme_nvm_alloc_request() argument 670 nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd, void *buf) nvme_nvm_submit_io() argument [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | lightnvm.h | 321 static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd) in nvm_rq_to_ppa_list() argument 323 return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; in nvm_rq_to_ppa_list()
|
/kernel/linux/linux-5.10/drivers/dma/ |
H A D | pl330.c | 547 struct list_head rqd; member 1716 list_add_tail(&descdone->rqd, &pl330->req_done); in pl330_update() 1723 struct dma_pl330_desc, rqd); in pl330_update() 1724 list_del(&descdone->rqd); in pl330_update()
|
/kernel/linux/linux-6.6/drivers/dma/ |
H A D | pl330.c | 547 struct list_head rqd; member 1714 list_add_tail(&descdone->rqd, &pl330->req_done); in pl330_update() 1721 struct dma_pl330_desc, rqd); in pl330_update() 1722 list_del(&descdone->rqd); in pl330_update()
|
/kernel/linux/linux-5.10/arch/m68k/ifpsp060/src/ |
H A D | ilsp.S | 299 swap %d5 # same as r*b if previous step rqd
|
/kernel/linux/linux-6.6/arch/m68k/ifpsp060/src/ |
H A D | ilsp.S | 299 swap %d5 # same as r*b if previous step rqd
|