Home
last modified time | relevance | path

Searched refs:timelines (Results 1 - 21 of 21) sorted by relevance

/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/
H A Dintel_timeline.c117 struct intel_gt_timelines *timelines = &gt->timelines; in intel_gt_init_timelines() local
119 spin_lock_init(&timelines->lock); in intel_gt_init_timelines()
120 INIT_LIST_HEAD(&timelines->active_list); in intel_gt_init_timelines()
177 /* Borrow a nearby lock; we only create these timelines during init */ in intel_timeline_create_from_engine()
179 list_add_tail(&tl->engine_link, &engine->status_page.timelines); in intel_timeline_create_from_engine()
236 struct intel_gt_timelines *timelines = &tl->gt->timelines; in intel_timeline_enter() local
261 spin_lock(&timelines->lock); in intel_timeline_enter()
270 list_add_tail(&tl->link, &timelines in intel_timeline_enter()
277 struct intel_gt_timelines *timelines = &tl->gt->timelines; intel_timeline_exit() local
405 struct intel_gt_timelines *timelines = &gt->timelines; intel_gt_fini_timelines() local
417 struct intel_gt_timelines *timelines = &gt->timelines; intel_gt_show_timelines() local
[all...]
H A Dintel_gt_requests.c70 * Our goal here is to retire _idle_ timelines as soon as in engine_retire()
136 struct intel_gt_timelines *timelines = &gt->timelines; in intel_gt_retire_requests_timeout() local
142 spin_lock(&timelines->lock); in intel_gt_retire_requests_timeout()
143 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { in intel_gt_retire_requests_timeout()
152 spin_unlock(&timelines->lock); in intel_gt_retire_requests_timeout()
178 out_active: spin_lock(&timelines->lock); in intel_gt_retire_requests_timeout()
191 spin_unlock(&timelines->lock); in intel_gt_retire_requests_timeout()
H A Dintel_engine_pm.c113 struct intel_gt_timelines *timelines = &engine->gt->timelines; in __queue_and_release_pm() local
135 spin_lock(&timelines->lock); in __queue_and_release_pm()
139 list_add_tail(&tl->link, &timelines->active_list); in __queue_and_release_pm()
147 spin_unlock(&timelines->lock); in __queue_and_release_pm()
190 * all timelines idle. in switch_to_kernel_context()
201 * list of active timelines looking for completions. Meanwhile as soon in switch_to_kernel_context()
203 * Ergo, if we put ourselves on the timelines.active_list in switch_to_kernel_context()
H A Dselftest_timeline.c167 * Create a bunch of timelines and check that their HWSP do not overlap. in mock_hwsp_freelist()
528 struct intel_timeline **timelines; in live_hwsp_engine() local
535 * Create a bunch of timelines and check we can write in live_hwsp_engine()
539 timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, in live_hwsp_engine()
540 sizeof(*timelines), in live_hwsp_engine()
542 if (!timelines) in live_hwsp_engine()
569 timelines[count++] = tl; in live_hwsp_engine()
582 struct intel_timeline *tl = timelines[n]; in live_hwsp_engine()
593 kvfree(timelines); in live_hwsp_engine()
602 struct intel_timeline **timelines; in live_hwsp_alternate() local
[all...]
H A Dintel_reset.c1040 struct intel_gt_timelines *timelines = &gt->timelines; in __intel_gt_unset_wedged() local
1063 spin_lock(&timelines->lock); in __intel_gt_unset_wedged()
1064 list_for_each_entry(tl, &timelines->active_list, link) { in __intel_gt_unset_wedged()
1071 spin_unlock(&timelines->lock); in __intel_gt_unset_wedged()
1084 spin_lock(&timelines->lock); in __intel_gt_unset_wedged()
1085 tl = list_entry(&timelines->active_list, typeof(*tl), link); in __intel_gt_unset_wedged()
1087 spin_unlock(&timelines->lock); in __intel_gt_unset_wedged()
H A Dintel_gt_types.h131 } timelines; member
H A Dintel_engine_types.h64 struct list_head timelines; member
554 * Keep track of completed timelines on this engine for early
H A Dintel_ring_submission.c279 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp()
H A Dintel_engine_cs.c1089 INIT_LIST_HEAD(&engine->status_page.timelines); in init_status_page()
1395 * Give our perma-pinned kernel timelines a separate lockdep class, in intel_engine_create_pinned_context()
1396 * so that we can use them from within the normal user timelines in intel_engine_create_pinned_context()
H A Dintel_execlists_submission.c448 * requests so that inter-timeline dependencies (i.e other timelines) in reset_active()
2853 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp()
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/
H A Dintel_timeline.c51 struct intel_gt_timelines *gt = &timeline->gt->timelines; in hwsp_alloc()
278 struct intel_gt_timelines *timelines = &gt->timelines; in intel_gt_init_timelines() local
280 spin_lock_init(&timelines->lock); in intel_gt_init_timelines()
281 INIT_LIST_HEAD(&timelines->active_list); in intel_gt_init_timelines()
283 spin_lock_init(&timelines->hwsp_lock); in intel_gt_init_timelines()
284 INIT_LIST_HEAD(&timelines->hwsp_free_list); in intel_gt_init_timelines()
371 struct intel_gt_timelines *timelines = &tl->gt->timelines; in intel_timeline_enter() local
396 spin_lock(&timelines in intel_timeline_enter()
412 struct intel_gt_timelines *timelines = &tl->gt->timelines; intel_timeline_exit() local
620 struct intel_gt_timelines *timelines = &gt->timelines; intel_gt_fini_timelines() local
[all...]
H A Dintel_gt_requests.c70 * Our goal here is to retire _idle_ timelines as soon as in engine_retire()
135 struct intel_gt_timelines *timelines = &gt->timelines; in intel_gt_retire_requests_timeout() local
146 spin_lock(&timelines->lock); in intel_gt_retire_requests_timeout()
147 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { in intel_gt_retire_requests_timeout()
156 spin_unlock(&timelines->lock); in intel_gt_retire_requests_timeout()
182 out_active: spin_lock(&timelines->lock); in intel_gt_retire_requests_timeout()
195 spin_unlock(&timelines->lock); in intel_gt_retire_requests_timeout()
H A Dintel_engine_pm.c106 struct intel_gt_timelines *timelines = &engine->gt->timelines; in __queue_and_release_pm() local
119 spin_lock(&timelines->lock); in __queue_and_release_pm()
123 list_add_tail(&tl->link, &timelines->active_list); in __queue_and_release_pm()
131 spin_unlock(&timelines->lock); in __queue_and_release_pm()
162 * all timelines idle. in switch_to_kernel_context()
173 * list of active timelines looking for completions. Meanwhile as soon in switch_to_kernel_context()
175 * Ergo, if we put ourselves on the timelines.active_list in switch_to_kernel_context()
H A Dselftest_timeline.c135 * Create a bunch of timelines and check that their HWSP do not overlap. in mock_hwsp_freelist()
508 struct intel_timeline **timelines; in live_hwsp_engine() local
515 * Create a bunch of timelines and check we can write in live_hwsp_engine()
519 timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, in live_hwsp_engine()
520 sizeof(*timelines), in live_hwsp_engine()
522 if (!timelines) in live_hwsp_engine()
549 timelines[count++] = tl; in live_hwsp_engine()
562 struct intel_timeline *tl = timelines[n]; in live_hwsp_engine()
573 kvfree(timelines); in live_hwsp_engine()
582 struct intel_timeline **timelines; in live_hwsp_alternate() local
[all...]
H A Dintel_reset.c898 struct intel_gt_timelines *timelines = &gt->timelines; in __intel_gt_unset_wedged() local
921 spin_lock(&timelines->lock); in __intel_gt_unset_wedged()
922 list_for_each_entry(tl, &timelines->active_list, link) { in __intel_gt_unset_wedged()
929 spin_unlock(&timelines->lock); in __intel_gt_unset_wedged()
942 spin_lock(&timelines->lock); in __intel_gt_unset_wedged()
943 tl = list_entry(&timelines->active_list, typeof(*tl), link); in __intel_gt_unset_wedged()
945 spin_unlock(&timelines->lock); in __intel_gt_unset_wedged()
H A Dintel_gt_types.h45 /* Pack multiple timelines' seqnos into the same page */
48 } timelines; member
/kernel/linux/linux-5.10/tools/testing/selftests/sync/
H A Dsync_stress_merge.c41 int timelines[timeline_count]; in test_merge_stress_random_merge() local
49 timelines[i] = sw_sync_timeline_create(); in test_merge_stress_random_merge()
51 fence = sw_sync_fence_create(timelines[0], "fence", 0); in test_merge_stress_random_merge()
59 * Randomly create sync_points out of a fixed set of timelines, in test_merge_stress_random_merge()
65 timeline = timelines[timeline_offset]; in test_merge_stress_random_merge()
101 sw_sync_timeline_inc(timelines[i], fence_map[i]); in test_merge_stress_random_merge()
112 sw_sync_timeline_destroy(timelines[i]); in test_merge_stress_random_merge()
/kernel/linux/linux-6.6/tools/testing/selftests/sync/
H A Dsync_stress_merge.c41 int timelines[timeline_count]; in test_merge_stress_random_merge() local
49 timelines[i] = sw_sync_timeline_create(); in test_merge_stress_random_merge()
51 fence = sw_sync_fence_create(timelines[0], "fence", 0); in test_merge_stress_random_merge()
59 * Randomly create sync_points out of a fixed set of timelines, in test_merge_stress_random_merge()
65 timeline = timelines[timeline_offset]; in test_merge_stress_random_merge()
101 sw_sync_timeline_inc(timelines[i], fence_map[i]); in test_merge_stress_random_merge()
112 sw_sync_timeline_destroy(timelines[i]); in test_merge_stress_random_merge()
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/
H A Di915_mock_selftests.h26 selftest(timelines, intel_timeline_mock_selftests)
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/selftests/
H A Di915_mock_selftests.h26 selftest(timelines, intel_timeline_mock_selftests)
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/uc/
H A Dintel_guc_submission.c4105 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp()

Completed in 29 milliseconds