/kernel/linux/linux-6.6/drivers/scsi/elx/efct/ |
H A D | efct_hw_queues.c | 21 struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ]; in efct_hw_init_queues() local 79 if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) { in efct_hw_init_queues() 85 rqs[j]->filter_mask = 0; in efct_hw_init_queues() 86 rqs[j]->is_mrq = true; in efct_hw_init_queues() 87 rqs[j]->base_mrq_id = rqs[0]->hdr->id; in efct_hw_init_queues() 310 efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[], in efct_hw_new_rq_set() argument 320 rqs[i] = NULL; in efct_hw_new_rq_set() 331 rqs[i] = rq; in efct_hw_new_rq_set() 356 rqs[ in efct_hw_new_rq_set() [all...] |
H A D | efct_hw.h | 746 efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
|
/kernel/linux/linux-5.10/tools/perf/scripts/python/ |
H A D | sched-migration.py | 170 self.rqs = prev.rqs.copy() 172 self.rqs = defaultdict(RunqueueSnapshot) 180 old_rq = self.prev.rqs[cpu] 186 self.rqs[cpu] = new_rq 194 old_rq = self.prev.rqs[old_cpu] 196 self.rqs[old_cpu] = out_rq 199 new_rq = self.prev.rqs[new_cpu] 201 self.rqs[new_cpu] = in_rq 211 old_rq = self.prev.rqs[cp [all...] |
/kernel/linux/linux-6.6/tools/perf/scripts/python/ |
H A D | sched-migration.py | 170 self.rqs = prev.rqs.copy() 172 self.rqs = defaultdict(RunqueueSnapshot) 180 old_rq = self.prev.rqs[cpu] 186 self.rqs[cpu] = new_rq 194 old_rq = self.prev.rqs[old_cpu] 196 self.rqs[old_cpu] = out_rq 199 new_rq = self.prev.rqs[new_cpu] 201 self.rqs[new_cpu] = in_rq 211 old_rq = self.prev.rqs[cp [all...] |
/kernel/linux/linux-5.10/block/ |
H A D | kyber-iosched.c | 144 * Also protect the rqs on rq_list when merge. 182 struct list_head rqs[KYBER_NUM_DOMAINS]; member 489 INIT_LIST_HEAD(&khd->rqs[i]); in kyber_init_hctx() 751 struct list_head *rqs; in kyber_dispatch_cur_domain() local 755 rqs = &khd->rqs[khd->cur_domain]; in kyber_dispatch_cur_domain() 765 rq = list_first_entry_or_null(rqs, struct request, queuelist); in kyber_dispatch_cur_domain() 780 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs); in kyber_dispatch_cur_domain() 781 rq = list_first_entry(rqs, struct request, queuelist); in kyber_dispatch_cur_domain() 848 if (!list_empty_careful(&khd->rqs[ in kyber_has_work() [all...] |
H A D | blk-iolatency.c | 134 struct blk_rq_stat rqs; member 204 blk_rq_stat_init(&stat->rqs); in latency_stat_init() 215 blk_rq_stat_sum(&sum->rqs, &stat->rqs); in latency_stat_sum() 227 blk_rq_stat_add(&stat->rqs, req_time); in latency_stat_record_time() 239 return stat->rqs.mean <= iolat->min_lat_nsec; in latency_sum_ok() 247 return stat->rqs.nr_samples; in latency_stat_samples() 270 stat->rqs.mean); in iolat_update_total_lat_avg()
|
H A D | blk-mq-tag.h | 20 struct request **rqs; member 25 * used to clear request reference in rqs[] before freeing one
|
H A D | blk-mq.c | 880 prefetch(tags->rqs[tag]); in blk_mq_tag_to_rq() 881 return tags->rqs[tag]; in blk_mq_tag_to_rq() 1136 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag() 2329 struct request *rq = drv_tags->rqs[i]; in blk_mq_clear_rq_mapping() 2334 cmpxchg(&drv_tags->rqs[i], rq, NULL); in blk_mq_clear_rq_mapping() 2354 if (tags->rqs && set->ops->exit_request) { in blk_mq_free_rqs() 2383 kfree(tags->rqs); in blk_mq_free_rq_map() 2384 tags->rqs = NULL; in blk_mq_free_rq_map() 2408 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), in blk_mq_alloc_rq_map() 2411 if (!tags->rqs) { in blk_mq_alloc_rq_map() [all...] |
H A D | blk-mq-tag.c | 209 rq = tags->rqs[bitnr]; in blk_mq_find_and_get_req() 229 * test and set the bit before assigning ->rqs[]. in bt_iter() 293 * test and set the bit before assigning ->rqs[]. in bt_tags_iter() 332 if (tags->rqs) in bt_tags_for_each()
|
/kernel/linux/linux-6.6/block/ |
H A D | kyber-iosched.c | 144 * Also protect the rqs on rq_list when merge. 183 struct list_head rqs[KYBER_NUM_DOMAINS]; member 495 INIT_LIST_HEAD(&khd->rqs[i]); in kyber_init_hctx() 758 struct list_head *rqs; in kyber_dispatch_cur_domain() local 762 rqs = &khd->rqs[khd->cur_domain]; in kyber_dispatch_cur_domain() 772 rq = list_first_entry_or_null(rqs, struct request, queuelist); in kyber_dispatch_cur_domain() 787 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs); in kyber_dispatch_cur_domain() 788 rq = list_first_entry(rqs, struct request, queuelist); in kyber_dispatch_cur_domain() 855 if (!list_empty_careful(&khd->rqs[ in kyber_has_work() [all...] |
H A D | blk-iolatency.c | 135 struct blk_rq_stat rqs; member 205 blk_rq_stat_init(&stat->rqs); in latency_stat_init() 216 blk_rq_stat_sum(&sum->rqs, &stat->rqs); in latency_stat_sum() 228 blk_rq_stat_add(&stat->rqs, req_time); in latency_stat_record_time() 240 return stat->rqs.mean <= iolat->min_lat_nsec; in latency_sum_ok() 248 return stat->rqs.nr_samples; in latency_stat_samples() 271 stat->rqs.mean); in iolat_update_total_lat_avg()
|
H A D | blk-mq-tag.c | 259 rq = tags->rqs[bitnr]; in blk_mq_find_and_get_req() 285 * test and set the bit before assigning ->rqs[]. in bt_iter() 351 * test and set the bit before assigning ->rqs[]. in bt_tags_iter() 390 if (tags->rqs) in bt_tags_for_each()
|
H A D | blk-mq.c | 1802 hctx->tags->rqs[rq->tag] = rq; in __blk_mq_get_driver_tag() 3263 struct request *rq = drv_tags->rqs[i]; in blk_mq_clear_rq_mapping() 3268 cmpxchg(&drv_tags->rqs[i], rq, NULL); in blk_mq_clear_rq_mapping() 3326 kfree(tags->rqs); in blk_mq_free_rq_map() 3327 tags->rqs = NULL; in blk_mq_free_rq_map() 3377 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), in blk_mq_alloc_rq_map() 3380 if (!tags->rqs) in blk_mq_alloc_rq_map() 3392 kfree(tags->rqs); in blk_mq_alloc_rq_map() 3618 * tags->rqs[] for avoiding potential UAF. 3633 cmpxchg(&tags->rqs[ in blk_mq_clear_flush_rq_mapping() [all...] |
H A D | blk-mq.h | 330 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | blk-mq.h | 743 struct request **rqs; member 748 * used to clear request reference in rqs[] before freeing one 758 prefetch(tags->rqs[tag]); in blk_mq_tag_to_rq() 759 return tags->rqs[tag]; in blk_mq_tag_to_rq()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/stmicro/stmmac/ |
H A D | dwmac4_dma.c | 224 unsigned int rqs = fifosz / 256 - 1; in dwmac4_dma_rx_chan_op_mode() local 247 mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; in dwmac4_dma_rx_chan_op_mode()
|
H A D | dwxgmac2_dma.c | 146 unsigned int rqs = fifosz / 256 - 1; in dwxgmac2_dma_rx_mode() local 163 value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS; in dwxgmac2_dma_rx_mode()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/stmicro/stmmac/ |
H A D | dwxgmac2_dma.c | 150 unsigned int rqs = fifosz / 256 - 1; in dwxgmac2_dma_rx_mode() local 167 value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS; in dwxgmac2_dma_rx_mode()
|
H A D | dwmac4_dma.c | 258 unsigned int rqs = fifosz / 256 - 1; in dwmac4_dma_rx_chan_op_mode() local 281 mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; in dwmac4_dma_rx_chan_op_mode()
|
/kernel/linux/linux-6.6/drivers/block/ |
H A D | virtio_blk.c | 473 req->mq_hctx->tags->rqs[req->tag] = req; in virtblk_prep_rq_batch()
|
/kernel/linux/linux-6.6/drivers/nvme/host/ |
H A D | pci.c | 927 req->mq_hctx->tags->rqs[req->tag] = req; in nvme_prep_rq_batch()
|