/kernel/linux/linux-5.10/drivers/gpu/drm/mga/ |
H A D | mga_state.c | 634 buf_priv->dispatched = 1; in mga_dma_dispatch_vertex() 661 buf_priv->dispatched = 0; in mga_dma_dispatch_vertex() 681 buf_priv->dispatched = 1; in mga_dma_dispatch_indices() 707 buf_priv->dispatched = 0; in mga_dma_dispatch_indices() 759 buf_priv->dispatched = 0; in mga_dma_dispatch_iload() 890 if (buf_priv->dispatched == 1) in mga_dma_vertex() 892 buf_priv->dispatched = 0; in mga_dma_vertex() 925 if (buf_priv->dispatched == 1) in mga_dma_indices() 927 buf_priv->dispatched = 0; in mga_dma_indices()
|
H A D | mga_drv.h | 89 int dispatched; member 369 if ((buf_priv)->dispatched) { \
|
/kernel/linux/linux-5.10/block/ |
H A D | blk-mq-sched.c | 122 bool dispatched = false, busy = false; in __blk_mq_do_dispatch_sched() local 183 dispatched |= blk_mq_dispatch_hctx_list(&rq_list); in __blk_mq_do_dispatch_sched() 186 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); in __blk_mq_do_dispatch_sched() 191 return !!dispatched; in __blk_mq_do_dispatch_sched()
|
H A D | bfq-iosched.c | 183 * When a sync request is dispatched, the queue that contains that 869 * has no dispatched request. DO NOT use bfqq after the next in bfq_weights_tree_remove() 945 * rounds to actually get it dispatched. 1622 return bfqq->dispatched == 0 && in bfq_bfqq_idle_for_long_time() 1689 bfqq->dispatched == 0; in bfq_bfqq_handle_idle_busy_switch() 2037 * be set when rq will be dispatched. in bfq_add_request() 3068 } else /* no new rq dispatched, just reset the number of samples */ in bfq_reset_rate_computation() 3193 * say exactly when a given dispatched request is served inside the 3206 * which a certain set of requests is dispatched over a certain time 3211 * property holds: the number of requests dispatched MUS [all...] |
H A D | blk-mq-debugfs.c | 558 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); in hctx_dispatched_show() 563 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]); in hctx_dispatched_show() 566 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]); in hctx_dispatched_show() 577 hctx->dispatched[i] = 0; in hctx_dispatched_write() 795 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write}, 808 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
|
H A D | bfq-wf2q.c | 864 * been dispatched at a rate equal to the estimated peak rate. 868 * internal queueing. The reason is that I/O requests dispatched 1689 if (!bfqq->dispatched) in bfq_del_bfqq_busy() 1705 if (!bfqq->dispatched) in bfq_add_bfqq_busy()
|
H A D | bfq-iosched.h | 283 int dispatched; member 467 * this accounts for also requests already dispatched, but not 522 /* number of requests dispatched and waiting for completion */ 569 * waited_rq when the request is finally dispatched. 826 /* total disk time and nr sectors dispatched by this group */
|
H A D | blk-throttle.c | 48 * are dispatched upwards level by level until they reach the top and get 50 * level, if the bios are dispatched into a single bio_list, there's a risk 54 * To avoid such starvation, dispatched bios are queued separately 55 * according to where they came from. When they are again dispatched to 61 * throtl_service_queue and then dispatched in round-robin order. 79 * Bios queued directly to this service_queue or dispatched from 124 * dispatched from children. qnode_on_parent is used when bios are 125 * dispatched from this throtl_grp into its parent and will compete 154 /* Number of bytes dispatched in current slice */ 156 /* Number of bio's dispatched i 1278 bool dispatched; throtl_pending_timer_fn() local [all...] |
/kernel/linux/linux-6.6/drivers/accel/ivpu/ |
H A D | ivpu_ipc.c | 365 bool dispatched; in ivpu_ipc_irq_handler() local 403 dispatched = false; in ivpu_ipc_irq_handler() 408 dispatched = true; in ivpu_ipc_irq_handler() 414 if (!dispatched) { in ivpu_ipc_irq_handler()
|
/kernel/linux/linux-6.6/block/ |
H A D | blk-mq-sched.c | 92 bool dispatched = false, busy = false; in __blk_mq_do_dispatch_sched() local 167 dispatched |= blk_mq_dispatch_hctx_list(&rq_list); in __blk_mq_do_dispatch_sched() 170 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); in __blk_mq_do_dispatch_sched() 175 return !!dispatched; in __blk_mq_do_dispatch_sched()
|
H A D | bfq-iosched.c | 183 * When a sync request is dispatched, the queue that contains that 1062 * rounds to actually get it dispatched. 1753 return bfqq->dispatched == 0 && in bfq_bfqq_idle_for_long_time() 1853 bfqq->dispatched == 0 && in bfq_bfqq_handle_idle_busy_switch() 2055 if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0) in bfq_update_io_intensity() 2270 * be set when rq will be dispatched. in bfq_add_request() 3441 } else /* no new rq dispatched, just reset the number of samples */ in bfq_reset_rate_computation() 3566 * say exactly when a given dispatched request is served inside the 3579 * which a certain set of requests is dispatched over a certain time 3584 * property holds: the number of requests dispatched MUS [all...] |
H A D | mq-deadline.c | 65 uint32_t dispatched; member 77 /* Position of the most recently dispatched request. */ 89 /* Data direction of latest dispatched request. */ 175 * is below that of the most recently dispatched request. Hence, for in deadline_from_pos() 360 * Look for a write request that can be dispatched, that is one with in deadline_fifo_request() 405 * Look for a write request that can be dispatched, that is one with in deadline_next_request() 530 * them can be dispatched, rq will be NULL. in __dd_dispatch_request() 551 dd->per_prio[prio].stats.dispatched++; in __dd_dispatch_request() 683 stats->dispatched, atomic_read(&stats->completed)); in dd_exit_sched() 1148 return stats->dispatched in dd_owned_by_driver() [all...] |
H A D | bfq-wf2q.c | 827 * been dispatched at a rate equal to the estimated peak rate. 831 * internal queueing. The reason is that I/O requests dispatched 1661 if (!bfqq->dispatched) { in bfq_del_bfqq_busy() 1685 if (!bfqq->dispatched) { in bfq_add_bfqq_busy()
|
H A D | bfq-iosched.h | 299 int dispatched; member 535 * this accounts for also requests already dispatched, but not 591 /* number of requests dispatched and waiting for completion */ 594 * number of requests dispatched and waiting for completion 646 * waited_rq when the request is finally dispatched. 933 /* total disk time and nr sectors dispatched by this group */
|
H A D | blk-throttle.c | 753 * A bio has been dispatched. Also adjust slice_end. It might happen in throtl_trim_slice() 755 * slice_end, but later limit was bumped up and bio was dispatched in throtl_trim_slice() 899 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 997 * dispatched. Mark that @tg was empty. This is automatically in throtl_add_bio_tg() 1162 * the first child throtl_grp should be dispatched. This function 1178 bool dispatched; in throtl_pending_timer_fn() local 1197 dispatched = false; in throtl_pending_timer_fn() 1207 dispatched = true; in throtl_pending_timer_fn() 1219 if (!dispatched) in throtl_pending_timer_fn() 1359 * account recently dispatched I in tg_conf_updated() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/r128/ |
H A D | r128_state.c | 588 buf_priv->dispatched = 1; in r128_cce_dispatch_vertex() 630 /* FIXME: Check dispatched field */ in r128_cce_dispatch_vertex() 631 buf_priv->dispatched = 0; in r128_cce_dispatch_vertex() 663 buf_priv->dispatched = 1; in r128_cce_dispatch_indirect() 688 /* FIXME: Check dispatched field */ in r128_cce_dispatch_indirect() 689 buf_priv->dispatched = 0; in r128_cce_dispatch_indirect() 715 buf_priv->dispatched = 1; in r128_cce_dispatch_indices() 768 /* FIXME: Check dispatched field */ in r128_cce_dispatch_indices() 769 buf_priv->dispatched = 0; in r128_cce_dispatch_indices()
|
H A D | r128_drv.h | 140 int dispatched; member
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gvt/ |
H A D | scheduler.h | 84 /* if this workload has been dispatched to i915? */ 85 bool dispatched; member
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gvt/ |
H A D | scheduler.h | 89 /* if this workload has been dispatched to i915? */ 90 bool dispatched; member
|
/kernel/linux/linux-6.6/arch/arm/nwfpe/ |
H A D | entry.S | 120 @ Only FPE instructions are dispatched here, everything else
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | blk-mq.h | 23 * dispatched to the hardware but for some reason (e.g. lack of 115 * assigned when a request is dispatched from a hardware queue. 127 /** @run: Number of dispatched requests. */ 130 /** @dispatched: Number of dispatch requests by queue. */ 131 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; member
|
/kernel/linux/linux-5.10/arch/m68k/fpsp040/ |
H A D | decbin.S | 21 | and NaN operands are dispatched without entering this routine)
|
/kernel/linux/linux-6.6/arch/m68k/fpsp040/ |
H A D | decbin.S | 21 | and NaN operands are dispatched without entering this routine)
|
/kernel/linux/linux-5.10/fs/ocfs2/dlm/ |
H A D | dlmrecovery.c | 1715 int dispatched = 0; in dlm_master_requery_handler() local 1742 dispatched = 1; in dlm_master_requery_handler() 1754 if (!dispatched) in dlm_master_requery_handler()
|
/kernel/linux/linux-6.6/fs/ocfs2/dlm/ |
H A D | dlmrecovery.c | 1706 int dispatched = 0; in dlm_master_requery_handler() local 1733 dispatched = 1; in dlm_master_requery_handler() 1745 if (!dispatched) in dlm_master_requery_handler()
|