/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/ |
H A D | intel_timeline.c | 117 struct intel_gt_timelines *timelines = >->timelines; in intel_gt_init_timelines() local 119 spin_lock_init(&timelines->lock); in intel_gt_init_timelines() 120 INIT_LIST_HEAD(&timelines->active_list); in intel_gt_init_timelines() 177 /* Borrow a nearby lock; we only create these timelines during init */ in intel_timeline_create_from_engine() 179 list_add_tail(&tl->engine_link, &engine->status_page.timelines); in intel_timeline_create_from_engine() 236 struct intel_gt_timelines *timelines = &tl->gt->timelines; in intel_timeline_enter() local 261 spin_lock(&timelines->lock); in intel_timeline_enter() 270 list_add_tail(&tl->link, &timelines in intel_timeline_enter() 277 struct intel_gt_timelines *timelines = &tl->gt->timelines; intel_timeline_exit() local 405 struct intel_gt_timelines *timelines = >->timelines; intel_gt_fini_timelines() local 417 struct intel_gt_timelines *timelines = >->timelines; intel_gt_show_timelines() local [all...] |
H A D | intel_gt_requests.c | 70 * Our goal here is to retire _idle_ timelines as soon as in engine_retire() 136 struct intel_gt_timelines *timelines = >->timelines; in intel_gt_retire_requests_timeout() local 142 spin_lock(&timelines->lock); in intel_gt_retire_requests_timeout() 143 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { in intel_gt_retire_requests_timeout() 152 spin_unlock(&timelines->lock); in intel_gt_retire_requests_timeout() 178 out_active: spin_lock(&timelines->lock); in intel_gt_retire_requests_timeout() 191 spin_unlock(&timelines->lock); in intel_gt_retire_requests_timeout()
|
H A D | intel_engine_pm.c | 113 struct intel_gt_timelines *timelines = &engine->gt->timelines; in __queue_and_release_pm() local 135 spin_lock(&timelines->lock); in __queue_and_release_pm() 139 list_add_tail(&tl->link, &timelines->active_list); in __queue_and_release_pm() 147 spin_unlock(&timelines->lock); in __queue_and_release_pm() 190 * all timelines idle. in switch_to_kernel_context() 201 * list of active timelines looking for completions. Meanwhile as soon in switch_to_kernel_context() 203 * Ergo, if we put ourselves on the timelines.active_list in switch_to_kernel_context()
|
H A D | selftest_timeline.c | 167 * Create a bunch of timelines and check that their HWSP do not overlap. in mock_hwsp_freelist() 528 struct intel_timeline **timelines; in live_hwsp_engine() local 535 * Create a bunch of timelines and check we can write in live_hwsp_engine() 539 timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, in live_hwsp_engine() 540 sizeof(*timelines), in live_hwsp_engine() 542 if (!timelines) in live_hwsp_engine() 569 timelines[count++] = tl; in live_hwsp_engine() 582 struct intel_timeline *tl = timelines[n]; in live_hwsp_engine() 593 kvfree(timelines); in live_hwsp_engine() 602 struct intel_timeline **timelines; in live_hwsp_alternate() local [all...] |
H A D | intel_reset.c | 1040 struct intel_gt_timelines *timelines = >->timelines; in __intel_gt_unset_wedged() local 1063 spin_lock(&timelines->lock); in __intel_gt_unset_wedged() 1064 list_for_each_entry(tl, &timelines->active_list, link) { in __intel_gt_unset_wedged() 1071 spin_unlock(&timelines->lock); in __intel_gt_unset_wedged() 1084 spin_lock(&timelines->lock); in __intel_gt_unset_wedged() 1085 tl = list_entry(&timelines->active_list, typeof(*tl), link); in __intel_gt_unset_wedged() 1087 spin_unlock(&timelines->lock); in __intel_gt_unset_wedged()
|
H A D | intel_gt_types.h | 131 } timelines; member
|
H A D | intel_engine_types.h | 64 struct list_head timelines; member 554 * Keep track of completed timelines on this engine for early
|
H A D | intel_ring_submission.c | 279 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp()
|
H A D | intel_engine_cs.c | 1089 INIT_LIST_HEAD(&engine->status_page.timelines); in init_status_page() 1395 * Give our perma-pinned kernel timelines a separate lockdep class, in intel_engine_create_pinned_context() 1396 * so that we can use them from within the normal user timelines in intel_engine_create_pinned_context()
|
H A D | intel_execlists_submission.c | 448 * requests so that inter-timeline dependencies (i.e other timelines) in reset_active() 2853 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
H A D | intel_timeline.c | 51 struct intel_gt_timelines *gt = &timeline->gt->timelines; in hwsp_alloc() 278 struct intel_gt_timelines *timelines = >->timelines; in intel_gt_init_timelines() local 280 spin_lock_init(&timelines->lock); in intel_gt_init_timelines() 281 INIT_LIST_HEAD(&timelines->active_list); in intel_gt_init_timelines() 283 spin_lock_init(&timelines->hwsp_lock); in intel_gt_init_timelines() 284 INIT_LIST_HEAD(&timelines->hwsp_free_list); in intel_gt_init_timelines() 371 struct intel_gt_timelines *timelines = &tl->gt->timelines; in intel_timeline_enter() local 396 spin_lock(&timelines in intel_timeline_enter() 412 struct intel_gt_timelines *timelines = &tl->gt->timelines; intel_timeline_exit() local 620 struct intel_gt_timelines *timelines = >->timelines; intel_gt_fini_timelines() local [all...] |
H A D | intel_gt_requests.c | 70 * Our goal here is to retire _idle_ timelines as soon as in engine_retire() 135 struct intel_gt_timelines *timelines = >->timelines; in intel_gt_retire_requests_timeout() local 146 spin_lock(&timelines->lock); in intel_gt_retire_requests_timeout() 147 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { in intel_gt_retire_requests_timeout() 156 spin_unlock(&timelines->lock); in intel_gt_retire_requests_timeout() 182 out_active: spin_lock(&timelines->lock); in intel_gt_retire_requests_timeout() 195 spin_unlock(&timelines->lock); in intel_gt_retire_requests_timeout()
|
H A D | intel_engine_pm.c | 106 struct intel_gt_timelines *timelines = &engine->gt->timelines; in __queue_and_release_pm() local 119 spin_lock(&timelines->lock); in __queue_and_release_pm() 123 list_add_tail(&tl->link, &timelines->active_list); in __queue_and_release_pm() 131 spin_unlock(&timelines->lock); in __queue_and_release_pm() 162 * all timelines idle. in switch_to_kernel_context() 173 * list of active timelines looking for completions. Meanwhile as soon in switch_to_kernel_context() 175 * Ergo, if we put ourselves on the timelines.active_list in switch_to_kernel_context()
|
H A D | selftest_timeline.c | 135 * Create a bunch of timelines and check that their HWSP do not overlap. in mock_hwsp_freelist() 508 struct intel_timeline **timelines; in live_hwsp_engine() local 515 * Create a bunch of timelines and check we can write in live_hwsp_engine() 519 timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, in live_hwsp_engine() 520 sizeof(*timelines), in live_hwsp_engine() 522 if (!timelines) in live_hwsp_engine() 549 timelines[count++] = tl; in live_hwsp_engine() 562 struct intel_timeline *tl = timelines[n]; in live_hwsp_engine() 573 kvfree(timelines); in live_hwsp_engine() 582 struct intel_timeline **timelines; in live_hwsp_alternate() local [all...] |
H A D | intel_reset.c | 898 struct intel_gt_timelines *timelines = >->timelines; in __intel_gt_unset_wedged() local 921 spin_lock(&timelines->lock); in __intel_gt_unset_wedged() 922 list_for_each_entry(tl, &timelines->active_list, link) { in __intel_gt_unset_wedged() 929 spin_unlock(&timelines->lock); in __intel_gt_unset_wedged() 942 spin_lock(&timelines->lock); in __intel_gt_unset_wedged() 943 tl = list_entry(&timelines->active_list, typeof(*tl), link); in __intel_gt_unset_wedged() 945 spin_unlock(&timelines->lock); in __intel_gt_unset_wedged()
|
H A D | intel_gt_types.h | 45 /* Pack multiple timelines' seqnos into the same page */ 48 } timelines; member
|
/kernel/linux/linux-5.10/tools/testing/selftests/sync/ |
H A D | sync_stress_merge.c | 41 int timelines[timeline_count]; in test_merge_stress_random_merge() local 49 timelines[i] = sw_sync_timeline_create(); in test_merge_stress_random_merge() 51 fence = sw_sync_fence_create(timelines[0], "fence", 0); in test_merge_stress_random_merge() 59 * Randomly create sync_points out of a fixed set of timelines, in test_merge_stress_random_merge() 65 timeline = timelines[timeline_offset]; in test_merge_stress_random_merge() 101 sw_sync_timeline_inc(timelines[i], fence_map[i]); in test_merge_stress_random_merge() 112 sw_sync_timeline_destroy(timelines[i]); in test_merge_stress_random_merge()
|
/kernel/linux/linux-6.6/tools/testing/selftests/sync/ |
H A D | sync_stress_merge.c | 41 int timelines[timeline_count]; in test_merge_stress_random_merge() local 49 timelines[i] = sw_sync_timeline_create(); in test_merge_stress_random_merge() 51 fence = sw_sync_fence_create(timelines[0], "fence", 0); in test_merge_stress_random_merge() 59 * Randomly create sync_points out of a fixed set of timelines, in test_merge_stress_random_merge() 65 timeline = timelines[timeline_offset]; in test_merge_stress_random_merge() 101 sw_sync_timeline_inc(timelines[i], fence_map[i]); in test_merge_stress_random_merge() 112 sw_sync_timeline_destroy(timelines[i]); in test_merge_stress_random_merge()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
H A D | i915_mock_selftests.h | 26 selftest(timelines, intel_timeline_mock_selftests)
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/selftests/ |
H A D | i915_mock_selftests.h | 26 selftest(timelines, intel_timeline_mock_selftests)
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_guc_submission.c | 4105 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp()
|