Home
last modified time | relevance | path

Searched refs:rq_flags (Results 1 - 25 of 128) sorted by relevance

123456

/kernel/linux/linux-5.10/block/
H A Dblk-pm.h21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
29 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_requeue_request()
38 if (q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_add_request()
46 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_put_request()
H A Dblk-flush.c130 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
330 flush_rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_kick_flush()
336 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush()
436 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_insert_flush()
H A Dblk-zoned.c92 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in blk_req_zone_write_trylock()
93 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in blk_req_zone_write_trylock()
105 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in __blk_req_zone_write_lock()
106 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_lock()
112 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_unlock()
H A Dblk-core.c245 if (unlikely(rq->rq_flags & RQF_QUIET)) in req_bio_endio()
262 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
1227 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in blk_rq_err_bytes()
1285 !(req->rq_flags & RQF_FLUSH_SEQ)) { in blk_account_io_done()
1445 !(req->rq_flags & RQF_QUIET))) in blk_update_request()
1489 if (req->rq_flags & RQF_MIXED_MERGE) { in blk_update_request()
1494 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { in blk_update_request()
1625 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { in blk_rq_prep_clone()
1626 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; in blk_rq_prep_clone()
H A Dblk-mq.h232 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag()
233 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
H A Dblk-mq.c289 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp()
310 rq->rq_flags = 0; in blk_mq_rq_ctx_init()
313 rq->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init()
315 rq->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init()
355 rq->rq_flags |= RQF_ELVPRIV; in blk_mq_rq_ctx_init()
531 if (rq->rq_flags & RQF_ELVPRIV) { in blk_mq_free_request()
541 if (rq->rq_flags & RQF_MQ_INFLIGHT) in blk_mq_free_request()
562 if (rq->rq_flags & RQF_STATS) { in __blk_mq_end_request()
759 rq->rq_flags |= RQF_STATS; in blk_mq_start_request()
786 rq->rq_flags in __blk_mq_requeue_request()
[all...]
/kernel/linux/linux-6.6/block/
H A Dblk-mq-sched.h40 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_allow_merge()
51 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_completed_request()
61 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_requeue_request()
H A Dblk-mq.c362 data->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init()
364 data->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init()
365 rq->rq_flags = data->rq_flags; in blk_mq_rq_ctx_init()
367 if (data->rq_flags & RQF_SCHED_TAGS) { in blk_mq_rq_ctx_init()
392 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_rq_ctx_init()
455 data->rq_flags |= RQF_SCHED_TAGS; in __blk_mq_alloc_requests()
467 data->rq_flags |= RQF_USE_SCHED; in __blk_mq_alloc_requests()
476 if (!(data->rq_flags & RQF_SCHED_TAGS)) in __blk_mq_alloc_requests()
480 data->rq_flags | in __blk_mq_alloc_requests()
[all...]
H A Dblk-mq.h152 req_flags_t rq_flags; member
224 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
308 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag()
309 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
H A Dblk-flush.c135 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
334 flush_rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_kick_flush()
340 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush()
393 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_rq_init_flush()
H A Dblk-zoned.c74 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in blk_req_zone_write_trylock()
75 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in blk_req_zone_write_trylock()
87 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in __blk_req_zone_write_lock()
88 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_lock()
94 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_unlock()
H A Dblk-pm.h21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
H A Dblk-merge.c572 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in __blk_rq_map_sg()
738 if (rq->rq_flags & RQF_MIXED_MERGE) in blk_rq_set_mixed_merge()
751 rq->rq_flags |= RQF_MIXED_MERGE; in blk_rq_set_mixed_merge()
770 if (req->rq_flags & RQF_MIXED_MERGE) { in blk_update_mixed_merge()
847 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || in attempt_merge()
/kernel/linux/linux-6.6/include/linux/
H A Dblk-mq.h86 req_flags_t rq_flags; member
835 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); in blk_mq_need_time_stamp()
840 return rq->rq_flags & RQF_RESV; in blk_mq_is_reserved_rq()
855 if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror || in blk_mq_add_to_batch()
1073 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes()
1084 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec()
1123 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments()
1184 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) in blk_req_zone_write_unlock()
/kernel/linux/linux-5.10/kernel/sched/
H A Dsched.h1331 struct rq_flags { struct
1350 * copy of the (on-stack) 'struct rq_flags rf'.
1354 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock()
1364 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock()
1374 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock()
1386 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1389 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1393 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1401 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1411 rq_lock_irqsave(struct rq *rq, struct rq_flags *r
[all...]
H A Dcore.c189 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
213 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
358 struct rq_flags rf; in hrtick()
386 struct rq_flags rf; in __hrtick_start()
1077 struct rq_flags rf; in uclamp_update_util_min_rt_default()
1356 struct rq_flags rf; in uclamp_update_active()
1798 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task()
1840 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task()
1863 struct rq_flags rf; in migration_cpu_stop()
1950 struct rq_flags r in __set_cpus_allowed_ptr()
[all...]
/kernel/linux/linux-6.6/kernel/sched/
H A Dsched.h1712 struct rq_flags { struct
1733 * copy of the (on-stack) 'struct rq_flags rf'.
1737 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock()
1750 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock()
1760 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock()
1772 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1775 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1779 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1787 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1797 rq_lock_irqsave(struct rq *rq, struct rq_flags *r
[all...]
H A Dcore.c633 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
657 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
796 struct rq_flags rf; in hrtick()
824 struct rq_flags rf; in __hrtick_start()
1532 struct rq_flags rf; in uclamp_update_util_min_rt_default()
1784 struct rq_flags rf; in uclamp_update_active()
2337 struct rq_flags rf; in wait_task_inactive()
2565 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task()
2620 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task()
2644 struct rq_flags r in migration_cpu_stop()
[all...]
/kernel/linux/linux-5.10/drivers/ide/
H A Dide-cd.c103 if (!sense || !rq || (rq->rq_flags & RQF_QUIET)) in cdrom_log_sense()
304 rq->rq_flags |= RQF_FAILED; in cdrom_decode_status()
324 !(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
359 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
368 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
375 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
393 rq->rq_flags |= RQF_FAILED; in cdrom_decode_status()
435 req_flags_t rq_flags) in ide_cd_queue_pc()
443 "rq_flags: 0x%x", in ide_cd_queue_pc()
444 cmd[0], write, timeout, rq_flags); in ide_cd_queue_pc()
432 ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, int write, void *buffer, unsigned *bufflen, struct scsi_sense_hdr *sshdr, int timeout, req_flags_t rq_flags) ide_cd_queue_pc() argument
[all...]
H A Dide-io.c330 rq->rq_flags |= RQF_FAILED; in start_request()
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { in ide_issue_rq()
467 rq->rq_flags |= RQF_DONTPREP; in ide_issue_rq()
521 (rq->rq_flags & RQF_PM) == 0) { in ide_issue_rq()
/kernel/linux/linux-5.10/net/sunrpc/
H A Dsvc.c612 __set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_rqst_alloc()
697 set_bit(RQ_VICTIM, &rqstp->rq_flags); in choose_victim()
864 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags)) in svc_exit_thread()
1167 set_bit(RQ_AUTHERR, &rqstp->rq_flags); in svc_return_autherr()
1175 if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags)) in svc_get_autherr()
1200 test_bit(RQ_DROPME, &rqstp->rq_flags)) in svc_generic_dispatch()
1203 if (test_bit(RQ_AUTHERR, &rqstp->rq_flags)) in svc_generic_dispatch()
1294 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); in svc_process_common()
1296 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); in svc_process_common()
1297 clear_bit(RQ_DROPME, &rqstp->rq_flags); in svc_process_common()
[all...]
H A Dsvc_xprt.c362 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot()
366 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot()
374 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot()
438 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_xprt_do_enqueue()
561 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_wake_up()
729 clear_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt()
739 set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt()
1175 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer()
1205 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_defer()
/kernel/linux/linux-5.10/drivers/scsi/
H A Dscsi_lib.c156 if (cmd->request->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd()
157 cmd->request->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd()
234 * @rq_flags: flags for ->rq_flags
243 int timeout, int retries, u64 flags, req_flags_t rq_flags, in __scsi_execute()
253 rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0); in __scsi_execute()
267 req->rq_flags |= rq_flags | RQF_QUIET; in __scsi_execute()
806 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action()
895 else if (req->rq_flags in scsi_io_completion_nz_result()
240 __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, struct scsi_sense_hdr *sshdr, int timeout, int retries, u64 flags, req_flags_t rq_flags, int *resid) __scsi_execute() argument
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/fungible/funcore/
H A Dfun_queue.h69 u16 rq_flags; member
120 u16 rq_flags; member
/kernel/linux/linux-6.6/drivers/scsi/
H A Dscsi_lib.c118 if (rq->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd()
119 rq->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd()
233 req->rq_flags |= RQF_QUIET; in scsi_execute_cmd()
641 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in scsi_rq_err_bytes()
819 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action()
910 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result()
1150 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq()
1152 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq()
1231 if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) in scsi_device_state_check()
1239 if (req && !(req->rq_flags in scsi_device_state_check()
[all...]

Completed in 54 milliseconds

123456