Home
last modified time | relevance | path

Searched refs:rq_list (Results 1 - 25 of 39) sorted by relevance

12

/kernel/linux/linux-5.10/block/
H A Dblk-mq-sched.c86 static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) in blk_mq_dispatch_hctx_list() argument
89 list_first_entry(rq_list, struct request, queuelist)->mq_hctx; in blk_mq_dispatch_hctx_list()
94 list_for_each_entry(rq, rq_list, queuelist) { in blk_mq_dispatch_hctx_list()
96 list_cut_before(&hctx_list, rq_list, &rq->queuelist); in blk_mq_dispatch_hctx_list()
101 list_splice_tail_init(rq_list, &hctx_list); in blk_mq_dispatch_hctx_list()
124 LIST_HEAD(rq_list); in __blk_mq_do_dispatch_sched()
165 list_add_tail(&rq->queuelist, &rq_list); in __blk_mq_do_dispatch_sched()
181 list_sort(NULL, &rq_list, sched_rq_cmp); in __blk_mq_do_dispatch_sched()
183 dispatched |= blk_mq_dispatch_hctx_list(&rq_list); in __blk_mq_do_dispatch_sched()
184 } while (!list_empty(&rq_list)); in __blk_mq_do_dispatch_sched()
[all...]
H A Dkyber-iosched.c143 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
144 * Also protect the rqs on rq_list when merge.
147 struct list_head rq_list[KYBER_NUM_DOMAINS]; member
445 INIT_LIST_HEAD(&kcq->rq_list[i]); in kyber_ctx_queue_init()
569 struct list_head *rq_list = &kcq->rq_list[sched_domain]; in kyber_bio_merge() local
573 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); in kyber_bio_merge()
585 struct list_head *rq_list, bool at_head) in kyber_insert_requests()
590 list_for_each_entry_safe(rq, next, rq_list, queuelist) { in kyber_insert_requests()
593 struct list_head *head = &kcq->rq_list[sched_domai in kyber_insert_requests()
584 kyber_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *rq_list, bool at_head) kyber_insert_requests() argument
[all...]
H A Dblk-mq.c805 LIST_HEAD(rq_list); in blk_mq_requeue_work()
809 list_splice_init(&q->requeue_list, &rq_list); in blk_mq_requeue_work()
812 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { in blk_mq_requeue_work()
829 while (!list_empty(&rq_list)) { in blk_mq_requeue_work()
830 rq = list_entry(rq_list.next, struct request, queuelist); in blk_mq_requeue_work()
1938 struct list_head rq_list; in blk_mq_flush_plug_list() local
1953 list_cut_before(&rq_list, &list, pos); in blk_mq_flush_plug_list()
1955 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list, in blk_mq_flush_plug_list()
2601 * 'cpu' is going away. splice any existing rq_list entries from this
/kernel/linux/linux-6.6/block/
H A Dblk-mq-sched.c56 static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) in blk_mq_dispatch_hctx_list() argument
59 list_first_entry(rq_list, struct request, queuelist)->mq_hctx; in blk_mq_dispatch_hctx_list()
64 list_for_each_entry(rq, rq_list, queuelist) { in blk_mq_dispatch_hctx_list()
66 list_cut_before(&hctx_list, rq_list, &rq->queuelist); in blk_mq_dispatch_hctx_list()
71 list_splice_tail_init(rq_list, &hctx_list); in blk_mq_dispatch_hctx_list()
94 LIST_HEAD(rq_list); in __blk_mq_do_dispatch_sched()
139 list_add_tail(&rq->queuelist, &rq_list); in __blk_mq_do_dispatch_sched()
165 list_sort(NULL, &rq_list, sched_rq_cmp); in __blk_mq_do_dispatch_sched()
167 dispatched |= blk_mq_dispatch_hctx_list(&rq_list); in __blk_mq_do_dispatch_sched()
168 } while (!list_empty(&rq_list)); in __blk_mq_do_dispatch_sched()
[all...]
H A Dkyber-iosched.c143 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
144 * Also protect the rqs on rq_list when merge.
147 struct list_head rq_list[KYBER_NUM_DOMAINS]; member
450 INIT_LIST_HEAD(&kcq->rq_list[i]); in kyber_ctx_queue_init()
575 struct list_head *rq_list = &kcq->rq_list[sched_domain]; in kyber_bio_merge() local
579 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); in kyber_bio_merge()
591 struct list_head *rq_list, in kyber_insert_requests()
597 list_for_each_entry_safe(rq, next, rq_list, queuelist) { in kyber_insert_requests()
600 struct list_head *head = &kcq->rq_list[sched_domai in kyber_insert_requests()
590 kyber_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *rq_list, blk_insert_t flags) kyber_insert_requests() argument
[all...]
/kernel/linux/linux-5.10/drivers/scsi/
H A Dsg.c144 struct list_head rq_list; /* head of request list */ member
862 list_for_each_entry(srp, &sfp->rq_list, entry) { in sg_fill_request_table()
1000 list_for_each_entry(srp, &sfp->rq_list, entry) { in sg_ioctl_common()
1012 list_for_each_entry(srp, &sfp->rq_list, entry) { in sg_ioctl_common()
1202 list_for_each_entry(srp, &sfp->rq_list, entry) { in sg_poll()
2098 list_for_each_entry(resp, &sfp->rq_list, entry) { in sg_get_rq_mark()
2128 if (!list_empty(&sfp->rq_list)) { in sg_add_request()
2142 list_add_tail(&rp->entry, &sfp->rq_list); in sg_add_request()
2157 if (!sfp || !srp || list_empty(&sfp->rq_list)) in sg_remove_request()
2191 INIT_LIST_HEAD(&sfp->rq_list); in sg_add_sfp()
[all...]
/kernel/linux/linux-6.6/drivers/scsi/
H A Dsg.c149 struct list_head rq_list; /* head of request list */ member
864 list_for_each_entry(srp, &sfp->rq_list, entry) { in sg_fill_request_table()
1002 list_for_each_entry(srp, &sfp->rq_list, entry) { in sg_ioctl_common()
1014 list_for_each_entry(srp, &sfp->rq_list, entry) { in sg_ioctl_common()
1184 list_for_each_entry(srp, &sfp->rq_list, entry) { in sg_poll()
2078 list_for_each_entry(resp, &sfp->rq_list, entry) { in sg_get_rq_mark()
2108 if (!list_empty(&sfp->rq_list)) { in sg_add_request()
2122 list_add_tail(&rp->entry, &sfp->rq_list); in sg_add_request()
2137 if (!sfp || !srp || list_empty(&sfp->rq_list)) in sg_remove_request()
2171 INIT_LIST_HEAD(&sfp->rq_list); in sg_add_sfp()
[all...]
/kernel/linux/linux-5.10/drivers/block/paride/
H A Dpd.c240 struct list_head rq_list; member
411 if (list_empty(&disk->rq_list)) in set_next_request()
414 pd_req = list_first_entry(&disk->rq_list, in set_next_request()
765 list_add_tail(&bd->rq->queuelist, &disk->rq_list); in pd_queue_rq()
956 INIT_LIST_HEAD(&disk->rq_list); in pd_detect()
H A Dpcd.c204 struct list_head rq_list; member
325 INIT_LIST_HEAD(&cd->rq_list); in pcd_init_units()
783 if (cd->present && !list_empty(&cd->rq_list)) { in set_next_request()
784 pcd_req = list_first_entry(&cd->rq_list, struct request, in set_next_request()
827 list_add_tail(&bd->rq->queuelist, &cd->rq_list); in pcd_queue_rq()
H A Dpf.c243 struct list_head rq_list; member
309 INIT_LIST_HEAD(&pf->rq_list); in pf_init_units()
816 if (pf->present && !list_empty(&pf->rq_list)) { in set_next_request()
817 pf_req = list_first_entry(&pf->rq_list, struct request, in set_next_request()
878 list_add_tail(&bd->rq->queuelist, &pf->rq_list); in pf_queue_rq()
/kernel/linux/linux-5.10/include/linux/mtd/
H A Dblktrans.h33 struct list_head rq_list; member
/kernel/linux/linux-6.6/include/linux/mtd/
H A Dblktrans.h33 struct list_head rq_list; member
/kernel/linux/linux-6.6/drivers/mtd/
H A Dmtd_blkdevs.c110 rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist); in mtd_next_request()
178 list_add_tail(&bd->rq->queuelist, &dev->rq_list); in mtd_queue_rq()
369 INIT_LIST_HEAD(&new->rq_list); in add_mtd_blktrans_dev()
/kernel/linux/linux-5.10/drivers/mtd/
H A Dmtd_blkdevs.c130 rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist); in mtd_next_request()
197 list_add_tail(&bd->rq->queuelist, &dev->rq_list); in mtd_queue_rq()
424 INIT_LIST_HEAD(&new->rq_list); in add_mtd_blktrans_dev()
/kernel/linux/linux-5.10/drivers/block/
H A Dxsysace.c210 struct list_head rq_list; member
473 return !list_empty(&ace->rq_list); in ace_has_next_request()
482 rq = list_first_entry_or_null(&ace->rq_list, struct request, queuelist); in ace_get_next_request()
881 list_add_tail(&req->queuelist, &ace->rq_list); in ace_queue_rq()
991 INIT_LIST_HEAD(&ace->rq_list); in ace_setup()
/kernel/linux/linux-5.10/drivers/block/aoe/
H A Daoe.h173 struct list_head rq_list; member
H A Daoeblk.c278 list_add_tail(&bd->rq->queuelist, &d->rq_list); in aoeblk_queue_rq()
H A Daoedev.c475 INIT_LIST_HEAD(&d->rq_list); in aoedev_by_aoeaddr()
/kernel/linux/linux-6.6/drivers/block/aoe/
H A Daoe.h173 struct list_head rq_list; member
/kernel/linux/linux-5.10/net/sunrpc/
H A Dxprt.c1661 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); in xprt_alloc_slot()
1662 list_del(&req->rq_list); in xprt_alloc_slot()
1699 list_add(&req->rq_list, &xprt->free); in xprt_free_slot()
1709 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); in xprt_free_all_slots()
1710 list_del(&req->rq_list); in xprt_free_all_slots()
1733 list_add(&req->rq_list, &xprt->free); in xprt_alloc()
/kernel/linux/linux-6.6/net/sunrpc/
H A Dxprt.c1726 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); in xprt_alloc_slot()
1727 list_del(&req->rq_list); in xprt_alloc_slot()
1764 list_add(&req->rq_list, &xprt->free); in xprt_free_slot()
1774 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); in xprt_free_all_slots()
1775 list_del(&req->rq_list); in xprt_free_all_slots()
1823 list_add(&req->rq_list, &xprt->free); in xprt_alloc()
/kernel/linux/linux-5.10/drivers/ide/
H A Dide-io.c545 list_add(&rq->queuelist, &drive->rq_list); in ide_issue_rq()
901 list_add_tail(&rq->queuelist, &drive->rq_list); in ide_insert_request_head()
H A Dide-probe.c1170 while (!list_empty(&drive->rq_list)) { in drive_rq_insert_work()
1171 rq = list_first_entry(&drive->rq_list, struct request, queuelist); in drive_rq_insert_work()
1219 INIT_LIST_HEAD(&drive->rq_list); in ide_port_init_devices_data()
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/
H A Dhns_roce_cq.c273 INIT_LIST_HEAD(&hr_cq->rq_list); in hns_roce_create_cq()
/kernel/linux/linux-5.10/include/linux/sunrpc/
H A Dxprt.h85 struct list_head rq_list; /* Slot allocation list */ member

Completed in 44 milliseconds

12