/kernel/linux/linux-5.10/net/netfilter/ipvs/ |
H A D | ip_vs_sched.c | 29 * IPVS scheduler list 38 * Bind a service with a scheduler 41 struct ip_vs_scheduler *scheduler) in ip_vs_bind_scheduler() 45 if (scheduler->init_service) { in ip_vs_bind_scheduler() 46 ret = scheduler->init_service(svc); in ip_vs_bind_scheduler() 52 rcu_assign_pointer(svc->scheduler, scheduler); in ip_vs_bind_scheduler() 58 * Unbind a service with its scheduler 65 cur_sched = rcu_dereference_protected(svc->scheduler, 1); in ip_vs_unbind_scheduler() 72 /* svc->scheduler ca in ip_vs_unbind_scheduler() 40 ip_vs_bind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *scheduler) ip_vs_bind_scheduler() argument 133 ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) ip_vs_scheduler_put() argument 167 register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) register_ip_vs_scheduler() argument 223 unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) unregister_ip_vs_scheduler() argument [all...] |
/kernel/linux/linux-6.6/net/netfilter/ipvs/ |
H A D | ip_vs_sched.c | 29 * IPVS scheduler list 38 * Bind a service with a scheduler 41 struct ip_vs_scheduler *scheduler) in ip_vs_bind_scheduler() 45 if (scheduler->init_service) { in ip_vs_bind_scheduler() 46 ret = scheduler->init_service(svc); in ip_vs_bind_scheduler() 52 rcu_assign_pointer(svc->scheduler, scheduler); in ip_vs_bind_scheduler() 58 * Unbind a service with its scheduler 65 cur_sched = rcu_dereference_protected(svc->scheduler, 1); in ip_vs_unbind_scheduler() 72 /* svc->scheduler ca in ip_vs_unbind_scheduler() 40 ip_vs_bind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *scheduler) ip_vs_bind_scheduler() argument 133 ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) ip_vs_scheduler_put() argument 167 register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) register_ip_vs_scheduler() argument 223 unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) unregister_ip_vs_scheduler() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gvt/ |
H A D | sched_policy.c | 134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu() local 141 * let scheduler chose next_vgpu again by setting it to NULL. in try_to_schedule_next_vgpu() 143 if (scheduler->next_vgpu == scheduler->current_vgpu) { in try_to_schedule_next_vgpu() 144 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu() 152 scheduler->need_reschedule = true; in try_to_schedule_next_vgpu() 156 if (scheduler->current_workload[engine->id]) in try_to_schedule_next_vgpu() 161 vgpu_update_timeslice(scheduler->current_vgpu, cur_time); in try_to_schedule_next_vgpu() 162 vgpu_data = scheduler in try_to_schedule_next_vgpu() 214 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; tbs_sched_func() local 279 struct intel_gvt_workload_scheduler *scheduler = tbs_sched_init() local 301 struct intel_gvt_workload_scheduler *scheduler = tbs_sched_clean() local 445 struct intel_gvt_workload_scheduler *scheduler = intel_vgpu_stop_schedule() local [all...] |
H A D | scheduler.c | 274 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change() local 280 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 282 scheduler->engine_owner[ring_id]) { in shadow_context_status_change() 284 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change() 286 scheduler->engine_owner[ring_id] = NULL; in shadow_context_status_change() 288 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 293 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change() 299 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 300 if (workload->vgpu != scheduler in shadow_context_status_change() 802 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; pick_next_workload() local 1021 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; complete_current_workload() local 1108 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; workload_thread() local 1185 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; intel_gvt_wait_vgpu_idle() local 1197 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; intel_gvt_clean_workload_scheduler() local 1213 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; intel_gvt_init_workload_scheduler() local [all...] |
H A D | debugfs.c | 98 spin_lock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show() 105 spin_unlock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show()
|
H A D | Makefile | 5 execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
|
H A D | vgpu.c | 542 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in intel_gvt_reset_vgpu_locked() local 554 * scheduler when the reset is triggered by current vgpu. in intel_gvt_reset_vgpu_locked() 556 if (scheduler->current_vgpu == NULL) { in intel_gvt_reset_vgpu_locked()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gvt/ |
H A D | sched_policy.c | 134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in try_to_schedule_next_vgpu() local 141 * let scheduler chose next_vgpu again by setting it to NULL. in try_to_schedule_next_vgpu() 143 if (scheduler->next_vgpu == scheduler->current_vgpu) { in try_to_schedule_next_vgpu() 144 scheduler->next_vgpu = NULL; in try_to_schedule_next_vgpu() 152 scheduler->need_reschedule = true; in try_to_schedule_next_vgpu() 156 if (scheduler->current_workload[engine->id]) in try_to_schedule_next_vgpu() 161 vgpu_update_timeslice(scheduler->current_vgpu, cur_time); in try_to_schedule_next_vgpu() 162 vgpu_data = scheduler in try_to_schedule_next_vgpu() 214 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; tbs_sched_func() local 279 struct intel_gvt_workload_scheduler *scheduler = tbs_sched_init() local 301 struct intel_gvt_workload_scheduler *scheduler = tbs_sched_clean() local 445 struct intel_gvt_workload_scheduler *scheduler = intel_vgpu_stop_schedule() local [all...] |
H A D | scheduler.c | 292 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in shadow_context_status_change() local 298 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 300 scheduler->engine_owner[ring_id]) { in shadow_context_status_change() 302 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change() 304 scheduler->engine_owner[ring_id] = NULL; in shadow_context_status_change() 306 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 311 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change() 317 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); in shadow_context_status_change() 318 if (workload->vgpu != scheduler in shadow_context_status_change() 850 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; pick_next_workload() local 1068 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; complete_current_workload() local 1155 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; workload_thread() local 1232 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; intel_gvt_wait_vgpu_idle() local 1244 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; intel_gvt_clean_workload_scheduler() local 1260 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; intel_gvt_init_workload_scheduler() local [all...] |
H A D | debugfs.c | 98 spin_lock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show() 105 spin_unlock_bh(&gvt->scheduler.mmio_context_lock); in vgpu_mmio_diff_show()
|
H A D | vgpu.c | 438 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; in intel_gvt_reset_vgpu_locked() local 450 * scheduler when the reset is triggered by current vgpu. in intel_gvt_reset_vgpu_locked() 452 if (scheduler->current_vgpu == NULL) { in intel_gvt_reset_vgpu_locked()
|
/kernel/linux/linux-6.6/net/mptcp/ |
H A D | ctrl.c | 35 char scheduler[MPTCP_SCHED_NAME_MAX]; member 75 return mptcp_get_pernet(net)->scheduler; in mptcp_get_scheduler() 86 strcpy(pernet->scheduler, "default"); in mptcp_pernet_set_defaults() 139 .procname = "scheduler", 165 table[6].data = &pernet->scheduler; in mptcp_pernet_new_table()
|
/kernel/linux/linux-5.10/sound/pci/mixart/ |
H A D | mixart_core.h | 217 u64 scheduler; member 230 u64 scheduler; member 239 u64 scheduler; member 380 u64 scheduler; member 431 u64 scheduler; member 491 u64 scheduler; member 536 u64 scheduler; member
|
/kernel/linux/linux-6.6/sound/pci/mixart/ |
H A D | mixart_core.h | 218 u64 scheduler; member 231 u64 scheduler; member 240 u64 scheduler; member 388 u64 scheduler; member 438 u64 scheduler; member 498 u64 scheduler; member 543 u64 scheduler; member
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
H A D | i915_getparam.c | 64 value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); in i915_getparam_ioctl() 110 value = i915->caps.scheduler; in i915_getparam_ioctl()
|
H A D | intel_device_info.h | 232 unsigned int scheduler; member
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
H A D | i915_getparam.c | 69 value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); in i915_getparam_ioctl() 124 value = i915->caps.scheduler; in i915_getparam_ioctl()
|
H A D | intel_device_info.h | 251 unsigned int scheduler; member
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
H A D | intel_engine_user.c | 127 i915->caps.scheduler = enabled & ~disabled; in set_scheduler_caps() 128 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) in set_scheduler_caps() 129 i915->caps.scheduler = 0; in set_scheduler_caps()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/ |
H A D | intel_engine_user.c | 134 i915->caps.scheduler = enabled & ~disabled; in set_scheduler_caps() 135 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) in set_scheduler_caps() 136 i915->caps.scheduler = 0; in set_scheduler_caps()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/scheduler/ |
H A D | gpu_scheduler_trace.h | 130 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
|
/kernel/linux/linux-6.6/drivers/gpu/drm/scheduler/ |
H A D | gpu_scheduler_trace.h | 113 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
|
/kernel/linux/linux-5.10/arch/x86/crypto/ |
H A D | sha512-ssse3-asm.S | 129 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 160 # scheduler. 170 # by one tab. Vectored instructions (for the message scheduler) are indented 314 # +1 iteration because the scheduler leads hashing by 1 iteration
|
/kernel/linux/linux-6.6/arch/x86/crypto/ |
H A D | sha512-ssse3-asm.S | 126 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 157 # scheduler. 167 # by one tab. Vectored instructions (for the message scheduler) are indented 311 # +1 iteration because the scheduler leads hashing by 1 iteration
|
/kernel/linux/linux-5.10/include/net/ |
H A D | ip_vs.h | 634 struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */ member 636 void *sched_data; /* scheduler application data */ 697 /* The scheduler object */ 700 char *name; /* scheduler name */ 704 /* scheduler initializing service */ 724 char *name; /* scheduler name */ 1392 /* Registering/unregistering scheduler functions 1395 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1396 int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); [all...] |