162306a36Sopenharmony_ci/* SPDX-License-Identifier: MIT */ 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Copyright © 2019 Intel Corporation 462306a36Sopenharmony_ci */ 562306a36Sopenharmony_ci 662306a36Sopenharmony_ci#ifndef __INTEL_CONTEXT_H__ 762306a36Sopenharmony_ci#define __INTEL_CONTEXT_H__ 862306a36Sopenharmony_ci 962306a36Sopenharmony_ci#include <linux/bitops.h> 1062306a36Sopenharmony_ci#include <linux/lockdep.h> 1162306a36Sopenharmony_ci#include <linux/types.h> 1262306a36Sopenharmony_ci 1362306a36Sopenharmony_ci#include "i915_active.h" 1462306a36Sopenharmony_ci#include "i915_drv.h" 1562306a36Sopenharmony_ci#include "intel_context_types.h" 1662306a36Sopenharmony_ci#include "intel_engine_types.h" 1762306a36Sopenharmony_ci#include "intel_gt_pm.h" 1862306a36Sopenharmony_ci#include "intel_ring_types.h" 1962306a36Sopenharmony_ci#include "intel_timeline_types.h" 2062306a36Sopenharmony_ci#include "i915_trace.h" 2162306a36Sopenharmony_ci 2262306a36Sopenharmony_ci#define CE_TRACE(ce, fmt, ...) do { \ 2362306a36Sopenharmony_ci const struct intel_context *ce__ = (ce); \ 2462306a36Sopenharmony_ci ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \ 2562306a36Sopenharmony_ci ce__->timeline->fence_context, \ 2662306a36Sopenharmony_ci ##__VA_ARGS__); \ 2762306a36Sopenharmony_ci} while (0) 2862306a36Sopenharmony_ci 2962306a36Sopenharmony_ci#define INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS (1) 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_cistruct i915_gem_ww_ctx; 3262306a36Sopenharmony_ci 3362306a36Sopenharmony_civoid intel_context_init(struct intel_context *ce, 3462306a36Sopenharmony_ci struct intel_engine_cs *engine); 3562306a36Sopenharmony_civoid intel_context_fini(struct intel_context *ce); 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_civoid i915_context_module_exit(void); 3862306a36Sopenharmony_ciint i915_context_module_init(void); 3962306a36Sopenharmony_ci 4062306a36Sopenharmony_cistruct intel_context * 4162306a36Sopenharmony_ciintel_context_create(struct intel_engine_cs *engine); 4262306a36Sopenharmony_ci 4362306a36Sopenharmony_ciint intel_context_alloc_state(struct intel_context *ce); 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_civoid intel_context_free(struct intel_context *ce); 4662306a36Sopenharmony_ci 4762306a36Sopenharmony_ciint intel_context_reconfigure_sseu(struct intel_context *ce, 4862306a36Sopenharmony_ci const struct intel_sseu sseu); 4962306a36Sopenharmony_ci 5062306a36Sopenharmony_ci#define PARENT_SCRATCH_SIZE PAGE_SIZE 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_cistatic inline bool intel_context_is_child(struct intel_context *ce) 5362306a36Sopenharmony_ci{ 5462306a36Sopenharmony_ci return !!ce->parallel.parent; 5562306a36Sopenharmony_ci} 5662306a36Sopenharmony_ci 5762306a36Sopenharmony_cistatic inline bool intel_context_is_parent(struct intel_context *ce) 5862306a36Sopenharmony_ci{ 5962306a36Sopenharmony_ci return !!ce->parallel.number_children; 6062306a36Sopenharmony_ci} 6162306a36Sopenharmony_ci 6262306a36Sopenharmony_cistatic inline bool intel_context_is_pinned(struct intel_context *ce); 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_cistatic inline struct intel_context * 6562306a36Sopenharmony_ciintel_context_to_parent(struct intel_context *ce) 6662306a36Sopenharmony_ci{ 6762306a36Sopenharmony_ci if (intel_context_is_child(ce)) { 6862306a36Sopenharmony_ci /* 6962306a36Sopenharmony_ci * The parent holds ref count to the child so it is always safe 7062306a36Sopenharmony_ci * for the parent to access the child, but the child has a 7162306a36Sopenharmony_ci * pointer to the parent without a ref. To ensure this is safe 7262306a36Sopenharmony_ci * the child should only access the parent pointer while the 7362306a36Sopenharmony_ci * parent is pinned. 7462306a36Sopenharmony_ci */ 7562306a36Sopenharmony_ci GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); 7662306a36Sopenharmony_ci 7762306a36Sopenharmony_ci return ce->parallel.parent; 7862306a36Sopenharmony_ci } else { 7962306a36Sopenharmony_ci return ce; 8062306a36Sopenharmony_ci } 8162306a36Sopenharmony_ci} 8262306a36Sopenharmony_ci 8362306a36Sopenharmony_cistatic inline bool intel_context_is_parallel(struct intel_context *ce) 8462306a36Sopenharmony_ci{ 8562306a36Sopenharmony_ci return intel_context_is_child(ce) || intel_context_is_parent(ce); 8662306a36Sopenharmony_ci} 8762306a36Sopenharmony_ci 8862306a36Sopenharmony_civoid intel_context_bind_parent_child(struct intel_context *parent, 8962306a36Sopenharmony_ci struct intel_context *child); 9062306a36Sopenharmony_ci 9162306a36Sopenharmony_ci#define for_each_child(parent, ce)\ 9262306a36Sopenharmony_ci list_for_each_entry(ce, &(parent)->parallel.child_list,\ 9362306a36Sopenharmony_ci parallel.child_link) 9462306a36Sopenharmony_ci#define for_each_child_safe(parent, ce, cn)\ 9562306a36Sopenharmony_ci list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\ 9662306a36Sopenharmony_ci parallel.child_link) 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci/** 9962306a36Sopenharmony_ci * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context 10062306a36Sopenharmony_ci * @ce: the context 10162306a36Sopenharmony_ci * 10262306a36Sopenharmony_ci * Acquire a lock on the pinned status of the HW context, such that the context 10362306a36Sopenharmony_ci * can neither be bound to the GPU or unbound whilst the lock is held, i.e. 10462306a36Sopenharmony_ci * intel_context_is_pinned() remains stable. 10562306a36Sopenharmony_ci */ 10662306a36Sopenharmony_cistatic inline int intel_context_lock_pinned(struct intel_context *ce) 10762306a36Sopenharmony_ci __acquires(ce->pin_mutex) 10862306a36Sopenharmony_ci{ 10962306a36Sopenharmony_ci return mutex_lock_interruptible(&ce->pin_mutex); 11062306a36Sopenharmony_ci} 11162306a36Sopenharmony_ci 11262306a36Sopenharmony_ci/** 11362306a36Sopenharmony_ci * intel_context_is_pinned - Reports the 'pinned' status 11462306a36Sopenharmony_ci * @ce: the context 11562306a36Sopenharmony_ci * 11662306a36Sopenharmony_ci * While in use by the GPU, the context, along with its ring and page 11762306a36Sopenharmony_ci * tables is pinned into memory and the GTT. 11862306a36Sopenharmony_ci * 11962306a36Sopenharmony_ci * Returns: true if the context is currently pinned for use by the GPU. 12062306a36Sopenharmony_ci */ 12162306a36Sopenharmony_cistatic inline bool 12262306a36Sopenharmony_ciintel_context_is_pinned(struct intel_context *ce) 12362306a36Sopenharmony_ci{ 12462306a36Sopenharmony_ci return atomic_read(&ce->pin_count); 12562306a36Sopenharmony_ci} 12662306a36Sopenharmony_ci 12762306a36Sopenharmony_cistatic inline void intel_context_cancel_request(struct intel_context *ce, 12862306a36Sopenharmony_ci struct i915_request *rq) 12962306a36Sopenharmony_ci{ 13062306a36Sopenharmony_ci GEM_BUG_ON(!ce->ops->cancel_request); 13162306a36Sopenharmony_ci return ce->ops->cancel_request(ce, rq); 13262306a36Sopenharmony_ci} 13362306a36Sopenharmony_ci 13462306a36Sopenharmony_ci/** 13562306a36Sopenharmony_ci * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status 13662306a36Sopenharmony_ci * @ce: the context 13762306a36Sopenharmony_ci * 13862306a36Sopenharmony_ci * Releases the lock earlier acquired by intel_context_unlock_pinned(). 13962306a36Sopenharmony_ci */ 14062306a36Sopenharmony_cistatic inline void intel_context_unlock_pinned(struct intel_context *ce) 14162306a36Sopenharmony_ci __releases(ce->pin_mutex) 14262306a36Sopenharmony_ci{ 14362306a36Sopenharmony_ci mutex_unlock(&ce->pin_mutex); 14462306a36Sopenharmony_ci} 14562306a36Sopenharmony_ci 14662306a36Sopenharmony_ciint __intel_context_do_pin(struct intel_context *ce); 14762306a36Sopenharmony_ciint __intel_context_do_pin_ww(struct intel_context *ce, 14862306a36Sopenharmony_ci struct i915_gem_ww_ctx *ww); 14962306a36Sopenharmony_ci 15062306a36Sopenharmony_cistatic inline bool intel_context_pin_if_active(struct intel_context *ce) 15162306a36Sopenharmony_ci{ 15262306a36Sopenharmony_ci return atomic_inc_not_zero(&ce->pin_count); 15362306a36Sopenharmony_ci} 15462306a36Sopenharmony_ci 15562306a36Sopenharmony_cistatic inline int intel_context_pin(struct intel_context *ce) 15662306a36Sopenharmony_ci{ 15762306a36Sopenharmony_ci if (likely(intel_context_pin_if_active(ce))) 15862306a36Sopenharmony_ci return 0; 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_ci return __intel_context_do_pin(ce); 16162306a36Sopenharmony_ci} 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_cistatic inline int intel_context_pin_ww(struct intel_context *ce, 16462306a36Sopenharmony_ci struct i915_gem_ww_ctx *ww) 16562306a36Sopenharmony_ci{ 16662306a36Sopenharmony_ci if (likely(intel_context_pin_if_active(ce))) 16762306a36Sopenharmony_ci return 0; 16862306a36Sopenharmony_ci 16962306a36Sopenharmony_ci return __intel_context_do_pin_ww(ce, ww); 17062306a36Sopenharmony_ci} 17162306a36Sopenharmony_ci 17262306a36Sopenharmony_cistatic inline void __intel_context_pin(struct intel_context *ce) 17362306a36Sopenharmony_ci{ 17462306a36Sopenharmony_ci GEM_BUG_ON(!intel_context_is_pinned(ce)); 17562306a36Sopenharmony_ci atomic_inc(&ce->pin_count); 17662306a36Sopenharmony_ci} 17762306a36Sopenharmony_ci 17862306a36Sopenharmony_civoid __intel_context_do_unpin(struct intel_context *ce, int sub); 17962306a36Sopenharmony_ci 18062306a36Sopenharmony_cistatic inline void intel_context_sched_disable_unpin(struct intel_context *ce) 18162306a36Sopenharmony_ci{ 18262306a36Sopenharmony_ci __intel_context_do_unpin(ce, 2); 18362306a36Sopenharmony_ci} 18462306a36Sopenharmony_ci 18562306a36Sopenharmony_cistatic inline void intel_context_unpin(struct intel_context *ce) 18662306a36Sopenharmony_ci{ 18762306a36Sopenharmony_ci if (!ce->ops->sched_disable) { 18862306a36Sopenharmony_ci __intel_context_do_unpin(ce, 1); 18962306a36Sopenharmony_ci } else { 19062306a36Sopenharmony_ci /* 19162306a36Sopenharmony_ci * Move ownership of this pin to the scheduling disable which is 19262306a36Sopenharmony_ci * an async operation. When that operation completes the above 19362306a36Sopenharmony_ci * intel_context_sched_disable_unpin is called potentially 19462306a36Sopenharmony_ci * unpinning the context. 19562306a36Sopenharmony_ci */ 19662306a36Sopenharmony_ci while (!atomic_add_unless(&ce->pin_count, -1, 1)) { 19762306a36Sopenharmony_ci if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) { 19862306a36Sopenharmony_ci ce->ops->sched_disable(ce); 19962306a36Sopenharmony_ci break; 20062306a36Sopenharmony_ci } 20162306a36Sopenharmony_ci } 20262306a36Sopenharmony_ci } 20362306a36Sopenharmony_ci} 20462306a36Sopenharmony_ci 20562306a36Sopenharmony_civoid intel_context_enter_engine(struct intel_context *ce); 20662306a36Sopenharmony_civoid intel_context_exit_engine(struct intel_context *ce); 20762306a36Sopenharmony_ci 20862306a36Sopenharmony_cistatic inline void intel_context_enter(struct intel_context *ce) 20962306a36Sopenharmony_ci{ 21062306a36Sopenharmony_ci lockdep_assert_held(&ce->timeline->mutex); 21162306a36Sopenharmony_ci if (ce->active_count++) 21262306a36Sopenharmony_ci return; 21362306a36Sopenharmony_ci 21462306a36Sopenharmony_ci ce->ops->enter(ce); 21562306a36Sopenharmony_ci intel_gt_pm_get(ce->vm->gt); 21662306a36Sopenharmony_ci} 21762306a36Sopenharmony_ci 21862306a36Sopenharmony_cistatic inline void intel_context_mark_active(struct intel_context *ce) 21962306a36Sopenharmony_ci{ 22062306a36Sopenharmony_ci lockdep_assert(lockdep_is_held(&ce->timeline->mutex) || 22162306a36Sopenharmony_ci test_bit(CONTEXT_IS_PARKING, &ce->flags)); 22262306a36Sopenharmony_ci ++ce->active_count; 22362306a36Sopenharmony_ci} 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_cistatic inline void intel_context_exit(struct intel_context *ce) 22662306a36Sopenharmony_ci{ 22762306a36Sopenharmony_ci lockdep_assert_held(&ce->timeline->mutex); 22862306a36Sopenharmony_ci GEM_BUG_ON(!ce->active_count); 22962306a36Sopenharmony_ci if (--ce->active_count) 23062306a36Sopenharmony_ci return; 23162306a36Sopenharmony_ci 23262306a36Sopenharmony_ci intel_gt_pm_put_async(ce->vm->gt); 23362306a36Sopenharmony_ci ce->ops->exit(ce); 23462306a36Sopenharmony_ci} 23562306a36Sopenharmony_ci 23662306a36Sopenharmony_cistatic inline struct intel_context *intel_context_get(struct intel_context *ce) 23762306a36Sopenharmony_ci{ 23862306a36Sopenharmony_ci kref_get(&ce->ref); 23962306a36Sopenharmony_ci return ce; 24062306a36Sopenharmony_ci} 24162306a36Sopenharmony_ci 24262306a36Sopenharmony_cistatic inline void intel_context_put(struct intel_context *ce) 24362306a36Sopenharmony_ci{ 24462306a36Sopenharmony_ci kref_put(&ce->ref, ce->ops->destroy); 24562306a36Sopenharmony_ci} 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_cistatic inline struct intel_timeline *__must_check 24862306a36Sopenharmony_ciintel_context_timeline_lock(struct intel_context *ce) 24962306a36Sopenharmony_ci __acquires(&ce->timeline->mutex) 25062306a36Sopenharmony_ci{ 25162306a36Sopenharmony_ci struct intel_timeline *tl = ce->timeline; 25262306a36Sopenharmony_ci int err; 25362306a36Sopenharmony_ci 25462306a36Sopenharmony_ci if (intel_context_is_parent(ce)) 25562306a36Sopenharmony_ci err = mutex_lock_interruptible_nested(&tl->mutex, 0); 25662306a36Sopenharmony_ci else if (intel_context_is_child(ce)) 25762306a36Sopenharmony_ci err = mutex_lock_interruptible_nested(&tl->mutex, 25862306a36Sopenharmony_ci ce->parallel.child_index + 1); 25962306a36Sopenharmony_ci else 26062306a36Sopenharmony_ci err = mutex_lock_interruptible(&tl->mutex); 26162306a36Sopenharmony_ci if (err) 26262306a36Sopenharmony_ci return ERR_PTR(err); 26362306a36Sopenharmony_ci 26462306a36Sopenharmony_ci return tl; 26562306a36Sopenharmony_ci} 26662306a36Sopenharmony_ci 26762306a36Sopenharmony_cistatic inline void intel_context_timeline_unlock(struct intel_timeline *tl) 26862306a36Sopenharmony_ci __releases(&tl->mutex) 26962306a36Sopenharmony_ci{ 27062306a36Sopenharmony_ci mutex_unlock(&tl->mutex); 27162306a36Sopenharmony_ci} 27262306a36Sopenharmony_ci 27362306a36Sopenharmony_ciint intel_context_prepare_remote_request(struct intel_context *ce, 27462306a36Sopenharmony_ci struct i915_request *rq); 27562306a36Sopenharmony_ci 27662306a36Sopenharmony_cistruct i915_request *intel_context_create_request(struct intel_context *ce); 27762306a36Sopenharmony_ci 27862306a36Sopenharmony_cistruct i915_request *intel_context_get_active_request(struct intel_context *ce); 27962306a36Sopenharmony_ci 28062306a36Sopenharmony_cistatic inline bool intel_context_is_barrier(const struct intel_context *ce) 28162306a36Sopenharmony_ci{ 28262306a36Sopenharmony_ci return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); 28362306a36Sopenharmony_ci} 28462306a36Sopenharmony_ci 28562306a36Sopenharmony_cistatic inline void intel_context_close(struct intel_context *ce) 28662306a36Sopenharmony_ci{ 28762306a36Sopenharmony_ci set_bit(CONTEXT_CLOSED_BIT, &ce->flags); 28862306a36Sopenharmony_ci 28962306a36Sopenharmony_ci if (ce->ops->close) 29062306a36Sopenharmony_ci ce->ops->close(ce); 29162306a36Sopenharmony_ci} 29262306a36Sopenharmony_ci 29362306a36Sopenharmony_cistatic inline bool intel_context_is_closed(const struct intel_context *ce) 29462306a36Sopenharmony_ci{ 29562306a36Sopenharmony_ci return test_bit(CONTEXT_CLOSED_BIT, &ce->flags); 29662306a36Sopenharmony_ci} 29762306a36Sopenharmony_ci 29862306a36Sopenharmony_cistatic inline bool intel_context_has_inflight(const struct intel_context *ce) 29962306a36Sopenharmony_ci{ 30062306a36Sopenharmony_ci return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags); 30162306a36Sopenharmony_ci} 30262306a36Sopenharmony_ci 30362306a36Sopenharmony_cistatic inline bool intel_context_use_semaphores(const struct intel_context *ce) 30462306a36Sopenharmony_ci{ 30562306a36Sopenharmony_ci return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 30662306a36Sopenharmony_ci} 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_cistatic inline void intel_context_set_use_semaphores(struct intel_context *ce) 30962306a36Sopenharmony_ci{ 31062306a36Sopenharmony_ci set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 31162306a36Sopenharmony_ci} 31262306a36Sopenharmony_ci 31362306a36Sopenharmony_cistatic inline void intel_context_clear_use_semaphores(struct intel_context *ce) 31462306a36Sopenharmony_ci{ 31562306a36Sopenharmony_ci clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 31662306a36Sopenharmony_ci} 31762306a36Sopenharmony_ci 31862306a36Sopenharmony_cistatic inline bool intel_context_is_banned(const struct intel_context *ce) 31962306a36Sopenharmony_ci{ 32062306a36Sopenharmony_ci return test_bit(CONTEXT_BANNED, &ce->flags); 32162306a36Sopenharmony_ci} 32262306a36Sopenharmony_ci 32362306a36Sopenharmony_cistatic inline bool intel_context_set_banned(struct intel_context *ce) 32462306a36Sopenharmony_ci{ 32562306a36Sopenharmony_ci return test_and_set_bit(CONTEXT_BANNED, &ce->flags); 32662306a36Sopenharmony_ci} 32762306a36Sopenharmony_ci 32862306a36Sopenharmony_cibool intel_context_ban(struct intel_context *ce, struct i915_request *rq); 32962306a36Sopenharmony_ci 33062306a36Sopenharmony_cistatic inline bool intel_context_is_schedulable(const struct intel_context *ce) 33162306a36Sopenharmony_ci{ 33262306a36Sopenharmony_ci return !test_bit(CONTEXT_EXITING, &ce->flags) && 33362306a36Sopenharmony_ci !test_bit(CONTEXT_BANNED, &ce->flags); 33462306a36Sopenharmony_ci} 33562306a36Sopenharmony_ci 33662306a36Sopenharmony_cistatic inline bool intel_context_is_exiting(const struct intel_context *ce) 33762306a36Sopenharmony_ci{ 33862306a36Sopenharmony_ci return test_bit(CONTEXT_EXITING, &ce->flags); 33962306a36Sopenharmony_ci} 34062306a36Sopenharmony_ci 34162306a36Sopenharmony_cistatic inline bool intel_context_set_exiting(struct intel_context *ce) 34262306a36Sopenharmony_ci{ 34362306a36Sopenharmony_ci return test_and_set_bit(CONTEXT_EXITING, &ce->flags); 34462306a36Sopenharmony_ci} 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_cibool intel_context_revoke(struct intel_context *ce); 34762306a36Sopenharmony_ci 34862306a36Sopenharmony_cistatic inline bool 34962306a36Sopenharmony_ciintel_context_force_single_submission(const struct intel_context *ce) 35062306a36Sopenharmony_ci{ 35162306a36Sopenharmony_ci return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); 35262306a36Sopenharmony_ci} 35362306a36Sopenharmony_ci 35462306a36Sopenharmony_cistatic inline void 35562306a36Sopenharmony_ciintel_context_set_single_submission(struct intel_context *ce) 35662306a36Sopenharmony_ci{ 35762306a36Sopenharmony_ci __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); 35862306a36Sopenharmony_ci} 35962306a36Sopenharmony_ci 36062306a36Sopenharmony_cistatic inline bool 36162306a36Sopenharmony_ciintel_context_nopreempt(const struct intel_context *ce) 36262306a36Sopenharmony_ci{ 36362306a36Sopenharmony_ci return test_bit(CONTEXT_NOPREEMPT, &ce->flags); 36462306a36Sopenharmony_ci} 36562306a36Sopenharmony_ci 36662306a36Sopenharmony_cistatic inline void 36762306a36Sopenharmony_ciintel_context_set_nopreempt(struct intel_context *ce) 36862306a36Sopenharmony_ci{ 36962306a36Sopenharmony_ci set_bit(CONTEXT_NOPREEMPT, &ce->flags); 37062306a36Sopenharmony_ci} 37162306a36Sopenharmony_ci 37262306a36Sopenharmony_cistatic inline void 37362306a36Sopenharmony_ciintel_context_clear_nopreempt(struct intel_context *ce) 37462306a36Sopenharmony_ci{ 37562306a36Sopenharmony_ci clear_bit(CONTEXT_NOPREEMPT, &ce->flags); 37662306a36Sopenharmony_ci} 37762306a36Sopenharmony_ci 37862306a36Sopenharmony_ciu64 intel_context_get_total_runtime_ns(struct intel_context *ce); 37962306a36Sopenharmony_ciu64 intel_context_get_avg_runtime_ns(struct intel_context *ce); 38062306a36Sopenharmony_ci 38162306a36Sopenharmony_cistatic inline u64 intel_context_clock(void) 38262306a36Sopenharmony_ci{ 38362306a36Sopenharmony_ci /* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */ 38462306a36Sopenharmony_ci return ktime_get_raw_fast_ns(); 38562306a36Sopenharmony_ci} 38662306a36Sopenharmony_ci 38762306a36Sopenharmony_ci#endif /* __INTEL_CONTEXT_H__ */ 388