/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gvt/ |
H A D | scheduler.c | 63 static void update_shadow_pdps(struct intel_vgpu_workload *workload) in update_shadow_pdps() argument 66 struct intel_context *ctx = workload->req->context; in update_shadow_pdps() 68 if (WARN_ON(!workload->shadow_mm)) in update_shadow_pdps() 71 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) in update_shadow_pdps() 76 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); in update_shadow_pdps() 84 static void sr_oa_regs(struct intel_vgpu_workload *workload, in sr_oa_regs() argument 87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; in sr_oa_regs() 101 if (workload->engine->id != RCS0) in sr_oa_regs() 105 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; in sr_oa_regs() 107 for (i = 0; i < ARRAY_SIZE(workload in sr_oa_regs() 127 populate_shadow_context(struct intel_vgpu_workload *workload) populate_shadow_context() argument 294 struct intel_vgpu_workload *workload; shadow_context_status_change() local 345 shadow_context_descriptor_update(struct intel_context *ce, struct intel_vgpu_workload *workload) shadow_context_descriptor_update() argument 361 copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) copy_workload_to_ring_buffer() argument 434 set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, struct intel_context *ce) set_context_ppgtt_from_shadow() argument 459 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) intel_gvt_workload_req_alloc() argument 486 intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) intel_gvt_scan_and_shadow_workload() argument 522 prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) prepare_shadow_batch_buffer() argument 592 struct intel_vgpu_workload *workload = update_wa_ctx_2_shadow_ctx() local 650 update_vreg_in_ctx(struct intel_vgpu_workload *workload) update_vreg_in_ctx() argument 656 release_shadow_batch_buffer(struct intel_vgpu_workload *workload) release_shadow_batch_buffer() argument 684 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) intel_vgpu_shadow_mm_pin() argument 725 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload) intel_vgpu_shadow_mm_unpin() argument 737 prepare_workload(struct intel_vgpu_workload *workload) prepare_workload() argument 799 dispatch_workload(struct intel_vgpu_workload *workload) dispatch_workload() argument 851 struct intel_vgpu_workload *workload = NULL; pick_next_workload() local 936 update_guest_context(struct intel_vgpu_workload *workload) update_guest_context() argument 1069 struct intel_vgpu_workload *workload = complete_current_workload() local 1156 struct intel_vgpu_workload *workload = NULL; workload_thread() local 1517 intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) intel_vgpu_destroy_workload() argument 1545 struct intel_vgpu_workload *workload; alloc_workload() local 1580 prepare_mm(struct intel_vgpu_workload *workload) prepare_mm() argument 1634 struct intel_vgpu_workload *workload = NULL; intel_vgpu_create_workload() local 1780 intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) intel_vgpu_queue_workload() argument [all...] |
H A D | execlist.c | 370 static int prepare_execlist_workload(struct intel_vgpu_workload *workload) in prepare_execlist_workload() argument 372 struct intel_vgpu *vgpu = workload->vgpu; in prepare_execlist_workload() 377 if (!workload->emulate_schedule_in) in prepare_execlist_workload() 380 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); in prepare_execlist_workload() 381 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); in prepare_execlist_workload() 383 ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id], in prepare_execlist_workload() 392 static int complete_execlist_workload(struct intel_vgpu_workload *workload) in complete_execlist_workload() argument 394 struct intel_vgpu *vgpu = workload->vgpu; in complete_execlist_workload() 397 &s->execlist[workload->engine->id]; in complete_execlist_workload() 399 struct list_head *next = workload_q_head(vgpu, workload in complete_execlist_workload() 436 struct intel_vgpu_workload *workload = NULL; submit_context() local [all...] |
H A D | cmd_parser.c | 515 struct intel_vgpu_workload *workload; member 853 u32 base = s->workload->engine->mmio_base; in is_cmd_update_pdps() 861 struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; in cmd_pdp_mmio_update_handler() 877 &s->workload->lri_shadow_mm); in cmd_pdp_mmio_update_handler() 1004 * In order to let workload with inhibit context to generate in cmd_reg_handler() 1006 * hw via LRIs in the workload with inhibit context. But as in cmd_reg_handler() 1007 * indirect context is loaded prior to LRIs in workload, we don't in cmd_reg_handler() 1010 * update reg values in it into vregs, so LRIs in workload with in cmd_reg_handler() 1017 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); in cmd_reg_handler() 1228 s->workload in cmd_handler_pipe_control() 2834 scan_workload(struct intel_vgpu_workload *workload) scan_workload() argument 2879 struct intel_vgpu_workload *workload = container_of(wa_ctx, scan_wa_ctx() local 2916 shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) shadow_workload_ring_buffer() argument 2975 intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload) intel_gvt_scan_and_shadow_ringbuffer() argument 2998 struct intel_vgpu_workload *workload = container_of(wa_ctx, shadow_indirect_ctx() local 3070 struct intel_vgpu_workload *workload = container_of(wa_ctx, intel_gvt_scan_and_shadow_wa_ctx() local 3155 intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload) intel_gvt_scan_engine_context() argument [all...] |
H A D | cmd_parser.h | 50 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); 56 int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
|
H A D | trace.h | 231 void *workload, const char *cmd_name), 234 buf_addr_type, workload, cmd_name), 243 __field(void*, workload) 255 __entry->workload = workload; 261 TP_printk("vgpu%d ring %d: address_type %u, buf_type %u, ip_gma %08x,cmd (name=%s,len=%u,raw cmd=%s), workload=%p\n", 271 __entry->workload)
|
H A D | scheduler.h | 89 /* if this workload has been dispatched to i915? */ 91 bool shadow; /* if workload has done shadow of guest request */ 139 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); 166 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gvt/ |
H A D | scheduler.c | 59 static void update_shadow_pdps(struct intel_vgpu_workload *workload) in update_shadow_pdps() argument 62 struct intel_context *ctx = workload->req->context; in update_shadow_pdps() 64 if (WARN_ON(!workload->shadow_mm)) in update_shadow_pdps() 67 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) in update_shadow_pdps() 72 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); in update_shadow_pdps() 80 static void sr_oa_regs(struct intel_vgpu_workload *workload, in sr_oa_regs() argument 83 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; in sr_oa_regs() 97 if (workload->engine->id != RCS0) in sr_oa_regs() 101 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; in sr_oa_regs() 103 for (i = 0; i < ARRAY_SIZE(workload in sr_oa_regs() 123 populate_shadow_context(struct intel_vgpu_workload *workload) populate_shadow_context() argument 276 struct intel_vgpu_workload *workload; shadow_context_status_change() local 327 shadow_context_descriptor_update(struct intel_context *ce, struct intel_vgpu_workload *workload) shadow_context_descriptor_update() argument 343 copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) copy_workload_to_ring_buffer() argument 414 set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, struct intel_context *ce) set_context_ppgtt_from_shadow() argument 439 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) intel_gvt_workload_req_alloc() argument 466 intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) intel_gvt_scan_and_shadow_workload() argument 502 prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) prepare_shadow_batch_buffer() argument 561 struct intel_vgpu_workload *workload = update_wa_ctx_2_shadow_ctx() local 604 update_vreg_in_ctx(struct intel_vgpu_workload *workload) update_vreg_in_ctx() argument 610 release_shadow_batch_buffer(struct intel_vgpu_workload *workload) release_shadow_batch_buffer() argument 636 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) intel_vgpu_shadow_mm_pin() argument 677 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload) intel_vgpu_shadow_mm_unpin() argument 689 prepare_workload(struct intel_vgpu_workload *workload) prepare_workload() argument 751 dispatch_workload(struct intel_vgpu_workload *workload) dispatch_workload() argument 803 struct intel_vgpu_workload *workload = NULL; pick_next_workload() local 888 update_guest_context(struct intel_vgpu_workload *workload) update_guest_context() argument 1022 struct intel_vgpu_workload *workload = complete_current_workload() local 1109 struct intel_vgpu_workload *workload = NULL; workload_thread() local 1473 intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) intel_vgpu_destroy_workload() argument 1501 struct intel_vgpu_workload *workload; alloc_workload() local 1536 prepare_mm(struct intel_vgpu_workload *workload) prepare_mm() argument 1590 struct intel_vgpu_workload *workload = NULL; intel_vgpu_create_workload() local 1736 intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) intel_vgpu_queue_workload() argument [all...] |
H A D | execlist.c | 370 static int prepare_execlist_workload(struct intel_vgpu_workload *workload) in prepare_execlist_workload() argument 372 struct intel_vgpu *vgpu = workload->vgpu; in prepare_execlist_workload() 377 if (!workload->emulate_schedule_in) in prepare_execlist_workload() 380 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); in prepare_execlist_workload() 381 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); in prepare_execlist_workload() 383 ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id], in prepare_execlist_workload() 392 static int complete_execlist_workload(struct intel_vgpu_workload *workload) in complete_execlist_workload() argument 394 struct intel_vgpu *vgpu = workload->vgpu; in complete_execlist_workload() 397 &s->execlist[workload->engine->id]; in complete_execlist_workload() 399 struct list_head *next = workload_q_head(vgpu, workload in complete_execlist_workload() 436 struct intel_vgpu_workload *workload = NULL; submit_context() local [all...] |
H A D | cmd_parser.c | 501 struct intel_vgpu_workload *workload; member 888 u32 base = s->workload->engine->mmio_base; in is_cmd_update_pdps() 896 struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; in cmd_pdp_mmio_update_handler() 912 &s->workload->lri_shadow_mm); in cmd_pdp_mmio_update_handler() 969 * In order to let workload with inhibit context to generate in cmd_reg_handler() 971 * hw via LRIs in the workload with inhibit context. But as in cmd_reg_handler() 972 * indirect context is loaded prior to LRIs in workload, we don't in cmd_reg_handler() 975 * update reg values in it into vregs, so LRIs in workload with in cmd_reg_handler() 982 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); in cmd_reg_handler() 1193 s->workload in cmd_handler_pipe_control() 2796 scan_workload(struct intel_vgpu_workload *workload) scan_workload() argument 2842 struct intel_vgpu_workload *workload = container_of(wa_ctx, scan_wa_ctx() local 2880 shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) shadow_workload_ring_buffer() argument 2939 intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload) intel_gvt_scan_and_shadow_ringbuffer() argument 2962 struct intel_vgpu_workload *workload = container_of(wa_ctx, shadow_indirect_ctx() local 3034 struct intel_vgpu_workload *workload = container_of(wa_ctx, intel_gvt_scan_and_shadow_wa_ctx() local [all...] |
H A D | trace.h | 231 void *workload, const char *cmd_name), 234 buf_addr_type, workload, cmd_name), 243 __field(void*, workload) 255 __entry->workload = workload; 261 TP_printk("vgpu%d ring %d: address_type %u, buf_type %u, ip_gma %08x,cmd (name=%s,len=%u,raw cmd=%s), workload=%p\n", 271 __entry->workload)
|
H A D | scheduler.h | 84 /* if this workload has been dispatched to i915? */ 86 bool shadow; /* if workload has done shadow of guest request */ 134 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); 161 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
H A D | cmd_parser.h | 49 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
|
/kernel/linux/linux-5.10/tools/perf/tests/ |
H A D | perf-record.c | 94 * Prepare the workload in argv[] to run, it'll fork it, and then wait in test__PERF_RECORD() 101 pr_debug("Couldn't run the workload!\n"); in test__PERF_RECORD() 114 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); in test__PERF_RECORD() 126 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { in test__PERF_RECORD() 157 * count just on workload.pid, which will start... in test__PERF_RECORD() 212 if ((pid_t)sample.pid != evlist->workload.pid) { in test__PERF_RECORD() 214 name, evlist->workload.pid, sample.pid); in test__PERF_RECORD() 218 if ((pid_t)sample.tid != evlist->workload.pid) { in test__PERF_RECORD() 220 name, evlist->workload.pid, sample.tid); in test__PERF_RECORD() 229 (pid_t)event->comm.pid != evlist->workload in test__PERF_RECORD() [all...] |
H A D | event-times.c | 37 pr_debug("Couldn't run the workload!\n"); in attach__enable_on_exec() 55 waitpid(evlist->workload.pid, NULL, 0); in detach__enable_on_exec()
|
/kernel/linux/linux-6.6/tools/perf/tests/ |
H A D | perf-record.c | 94 * Prepare the workload in argv[] to run, it'll fork it, and then wait in test__PERF_RECORD() 101 pr_debug("Couldn't run the workload!\n"); in test__PERF_RECORD() 114 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); in test__PERF_RECORD() 126 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { in test__PERF_RECORD() 157 * count just on workload.pid, which will start... in test__PERF_RECORD() 212 if ((pid_t)sample.pid != evlist->workload.pid) { in test__PERF_RECORD() 214 name, evlist->workload.pid, sample.pid); in test__PERF_RECORD() 218 if ((pid_t)sample.tid != evlist->workload.pid) { in test__PERF_RECORD() 220 name, evlist->workload.pid, sample.tid); in test__PERF_RECORD() 229 (pid_t)event->comm.pid != evlist->workload in test__PERF_RECORD() [all...] |
H A D | builtin-test.c | 511 pr_info("No workload found: %s\n", work); in run_workload() 522 const char *workload = NULL; in cmd_test() local 529 OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"), in cmd_test() 547 if (workload) in cmd_test() 548 return run_workload(workload, argc, argv); in cmd_test()
|
/kernel/linux/linux-5.10/tools/perf/bench/ |
H A D | find-bit-bench.c | 34 static noinline void workload(int val) in workload() function 80 workload(bit); in do_for_each_set_bit() 93 workload(bit); in do_for_each_set_bit()
|
/kernel/linux/linux-6.6/tools/perf/bench/ |
H A D | find-bit-bench.c | 34 static noinline void workload(int val) in workload() function 82 workload(bit); in do_for_each_set_bit() 97 workload(bit); in do_for_each_set_bit()
|
/kernel/linux/linux-6.6/tools/perf/tests/shell/lib/ |
H A D | perf_metric_validation.py | 10 def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''): 19 self.workloads = [x for x in workload.split(",") if x] 21 self.allresults = dict() # metric results of all workload 28 self.results = dict() # metric results of current workload 360 def _run_perf(self, metric, workload: str): 363 wl = workload.split() 371 def collect_perf(self, workload: str): 373 Collect metric data with "perf stat -M" on given workload with -a and -j. 377 print(f"Long workload: {workload}") [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
H A D | pp_psm.c | 263 long workload; in psm_adjust_power_state_dynamic() local 288 workload = hwmgr->workload_setting[index]; in psm_adjust_power_state_dynamic() 290 if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode) in psm_adjust_power_state_dynamic() 291 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); in psm_adjust_power_state_dynamic()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
H A D | pp_psm.c | 272 long workload; in psm_adjust_power_state_dynamic() local 297 workload = hwmgr->workload_setting[index]; in psm_adjust_power_state_dynamic() 299 if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode) in psm_adjust_power_state_dynamic() 300 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); in psm_adjust_power_state_dynamic()
|
/kernel/linux/linux-6.6/tools/perf/tests/shell/ |
H A D | test_intel_pt.sh | 23 workload="${temp_dir}/workload" 63 cat << _end_of_file_ | /usr/bin/cc -o "${workload}" -xc - -pthread && have_workload=true 148 echo "No workload, so skipping" 254 $workload & 256 $workload & 297 # Create a workload that uses self-modifying code and generates its own jitdump file 406 echo "SKIP: No jitdump workload"
|
/kernel/linux/linux-5.10/tools/perf/util/ |
H A D | evlist.c | 64 evlist->workload.pid = -1; in evlist__init() 1343 evlist->workload.pid = fork(); in perf_evlist__prepare_workload() 1344 if (evlist->workload.pid < 0) { in perf_evlist__prepare_workload() 1349 if (!evlist->workload.pid) { in perf_evlist__prepare_workload() 1372 * writing exactly one byte, in workload.cork_fd, usually via in perf_evlist__prepare_workload() 1375 * For cancelling the workload without actually running it, in perf_evlist__prepare_workload() 1376 * the parent will just close workload.cork_fd, without writing in perf_evlist__prepare_workload() 1413 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); in perf_evlist__prepare_workload() 1427 evlist->workload.cork_fd = go_pipe[1]; in perf_evlist__prepare_workload() 1442 if (evlist->workload in perf_evlist__start_workload() [all...] |
/kernel/linux/linux-6.6/tools/perf/util/ |
H A D | evlist.c | 76 evlist->workload.pid = -1; in evlist__init() 1425 evlist->workload.pid = fork(); in evlist__prepare_workload() 1426 if (evlist->workload.pid < 0) { in evlist__prepare_workload() 1431 if (!evlist->workload.pid) { in evlist__prepare_workload() 1461 * writing exactly one byte, in workload.cork_fd, usually via in evlist__prepare_workload() 1464 * For cancelling the workload without actually running it, in evlist__prepare_workload() 1465 * the parent will just close workload.cork_fd, without writing in evlist__prepare_workload() 1502 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); in evlist__prepare_workload() 1516 evlist->workload.cork_fd = go_pipe[1]; in evlist__prepare_workload() 1531 if (evlist->workload in evlist__start_workload() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/pm/swsmu/ |
H A D | amdgpu_smu.c | 1518 long workload; in smu_adjust_power_state_dynamic() local 1557 workload = smu->workload_setting[index]; in smu_adjust_power_state_dynamic() 1559 if (smu->power_profile_mode != workload) in smu_adjust_power_state_dynamic() 1560 smu_set_power_profile_mode(smu, &workload, 0, false); in smu_adjust_power_state_dynamic() 1609 long workload; in smu_switch_power_profile() local 1624 workload = smu->workload_setting[index]; in smu_switch_power_profile() 1629 workload = smu->workload_setting[index]; in smu_switch_power_profile() 1633 smu_set_power_profile_mode(smu, &workload, 0, false); in smu_switch_power_profile()
|