/kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
H A D | i915_gem_ww.c | 9 void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr) in i915_gem_ww_ctx_init() argument 11 ww_acquire_init(&ww->ctx, &reservation_ww_class); in i915_gem_ww_ctx_init() 12 INIT_LIST_HEAD(&ww->obj_list); in i915_gem_ww_ctx_init() 13 ww->intr = intr; in i915_gem_ww_ctx_init() 14 ww->contended = NULL; in i915_gem_ww_ctx_init() 17 static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww) in i915_gem_ww_ctx_unlock_all() argument 21 while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) { in i915_gem_ww_ctx_unlock_all() 35 void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww) in i915_gem_ww_ctx_fini() argument 37 i915_gem_ww_ctx_unlock_all(ww); in i915_gem_ww_ctx_fini() 38 WARN_ON(ww in i915_gem_ww_ctx_fini() 42 i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww) i915_gem_ww_ctx_backoff() argument [all...] |
H A D | i915_gem_evict.c | 67 static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) in grab_vma() argument 74 if (!i915_gem_object_trylock(vma->obj, ww)) { in grab_vma() 97 struct i915_gem_ww_ctx *ww, in mark_free() 105 if (!grab_vma(vma, ww)) in mark_free() 126 * @ww: An optional struct i915_gem_ww_ctx. 149 struct i915_gem_ww_ctx *ww, in i915_gem_evict_something() 230 if (mark_free(&scan, ww, vma, flags, &eviction_list)) in i915_gem_evict_something() 305 grab_vma(vma, ww)) { in i915_gem_evict_something() 319 * @ww: An optional struct i915_gem_ww_ctx. 329 struct i915_gem_ww_ctx *ww, in i915_gem_evict_for_node() 96 mark_free(struct drm_mm_scan *scan, struct i915_gem_ww_ctx *ww, struct i915_vma *vma, unsigned int flags, struct list_head *unwind) mark_free() argument 148 i915_gem_evict_something(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, u64 min_size, u64 alignment, unsigned long color, u64 start, u64 end, unsigned flags) i915_gem_evict_something() argument 328 i915_gem_evict_for_node(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, struct drm_mm_node *target, unsigned int flags) i915_gem_evict_for_node() argument 458 i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object **busy_bo) i915_gem_evict_vm() argument [all...] |
H A D | i915_gem_ww.h | 23 static inline int __i915_gem_ww_fini(struct i915_gem_ww_ctx *ww, int err) in __i915_gem_ww_fini() argument 26 err = i915_gem_ww_ctx_backoff(ww); in __i915_gem_ww_fini() 32 i915_gem_ww_ctx_fini(ww); in __i915_gem_ww_fini()
|
/kernel/linux/linux-5.10/drivers/staging/vt6655/ |
H A D | mac.c | 255 unsigned short ww; in MACbSoftwareReset() local 260 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { in MACbSoftwareReset() 264 if (ww == W_MAX_TIMEOUT) in MACbSoftwareReset() 318 unsigned short ww; in MACbSafeRxOff() local 325 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { in MACbSafeRxOff() 329 if (ww in MACbSafeRxOff() 372 unsigned short ww; MACbSafeTxOff() local 523 unsigned short ww; MACvSetCurrRx0DescAddr() local 557 unsigned short ww; MACvSetCurrRx1DescAddr() local 592 unsigned short ww; MACvSetCurrTx0DescAddrEx() local 628 unsigned short ww; MACvSetCurrAC0DescAddrEx() local 730 unsigned int ww; MACbPSWakeup() local [all...] |
/kernel/linux/linux-6.6/drivers/staging/vt6655/ |
H A D | mac.c | 268 unsigned short ww; in MACbSoftwareReset() local 273 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { in MACbSoftwareReset() 277 if (ww == W_MAX_TIMEOUT) in MACbSoftwareReset() 328 unsigned short ww; in vt6655_mac_safe_rx_off() local 335 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { in vt6655_mac_safe_rx_off() 339 if (ww in vt6655_mac_safe_rx_off() 382 unsigned short ww; vt6655_mac_safe_tx_off() local 533 unsigned short ww; vt6655_mac_set_curr_rx_0_desc_addr() local 567 unsigned short ww; vt6655_mac_set_curr_rx_1_desc_addr() local 601 unsigned short ww; vt6655_mac_set_curr_tx_0_desc_addr_ex() local 636 unsigned short ww; vt6655_mac_set_curr_ac_0_desc_addr_ex() local 737 unsigned int ww; MACbPSWakeup() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
H A D | intel_context.c | 103 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) in __context_pin_state() argument 108 err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH); in __context_pin_state() 138 struct i915_gem_ww_ctx *ww) in __ring_active() 142 err = intel_ring_pin(ring, ww); in __ring_active() 164 struct i915_gem_ww_ctx *ww) in intel_context_pre_pin() 170 err = __ring_active(ce->ring, ww); in intel_context_pre_pin() 174 err = intel_timeline_pin(ce->timeline, ww); in intel_context_pre_pin() 181 err = __context_pin_state(ce->state, ww); in intel_context_pre_pin() 205 struct i915_gem_ww_ctx *ww) in __intel_context_do_pin_ww() 223 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); in __intel_context_do_pin_ww() 137 __ring_active(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) __ring_active() argument 163 intel_context_pre_pin(struct intel_context *ce, struct i915_gem_ww_ctx *ww) intel_context_pre_pin() argument 204 __intel_context_do_pin_ww(struct intel_context *ce, struct i915_gem_ww_ctx *ww) __intel_context_do_pin_ww() argument 295 struct i915_gem_ww_ctx ww; __intel_context_do_pin() local 468 struct i915_gem_ww_ctx ww; intel_context_create_request() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_object_blt.c | 17 struct i915_gem_ww_ctx *ww, in intel_emit_vma_fill_blt() 43 err = i915_gem_object_lock(pool->obj, ww); in intel_emit_vma_fill_blt() 53 err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER); in intel_emit_vma_fill_blt() 149 struct i915_gem_ww_ctx ww; in i915_gem_object_fill_blt() local 159 i915_gem_ww_ctx_init(&ww, true); in i915_gem_object_fill_blt() 162 err = i915_gem_object_lock(obj, &ww); in i915_gem_object_fill_blt() 166 err = intel_context_pin_ww(ce, &ww); in i915_gem_object_fill_blt() 170 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); in i915_gem_object_fill_blt() 174 batch = intel_emit_vma_fill_blt(ce, vma, &ww, value); in i915_gem_object_fill_blt() 217 err = i915_gem_ww_ctx_backoff(&ww); in i915_gem_object_fill_blt() 15 intel_emit_vma_fill_blt(struct intel_context *ce, struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u32 value) intel_emit_vma_fill_blt() argument 237 intel_emit_vma_copy_blt(struct intel_context *ce, struct i915_gem_ww_ctx *ww, struct i915_vma *src, struct i915_vma *dst) intel_emit_vma_copy_blt() argument 357 struct i915_gem_ww_ctx ww; i915_gem_object_copy_blt() local [all...] |
H A D | i915_gem_client_blt.c | 161 struct i915_gem_ww_ctx ww; in clear_pages_worker() local 177 i915_gem_ww_ctx_init(&ww, false); in clear_pages_worker() 180 err = intel_context_pin_ww(w->ce, &ww); in clear_pages_worker() 184 batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value); in clear_pages_worker() 236 err = i915_gem_ww_ctx_backoff(&ww); in clear_pages_worker() 240 i915_gem_ww_ctx_fini(&ww); in clear_pages_worker() 256 struct i915_gem_ww_ctx ww; in pin_wait_clear_pages_work() local 259 i915_gem_ww_ctx_init(&ww, false); in pin_wait_clear_pages_work() 261 err = i915_gem_object_lock(vma->obj, &ww); in pin_wait_clear_pages_work() 265 err = i915_vma_pin_ww(vma, &ww, in pin_wait_clear_pages_work() [all...] |
H A D | i915_gem_object.h | 114 struct i915_gem_ww_ctx *ww, in __i915_gem_object_lock() 120 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); in __i915_gem_object_lock() 122 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); in __i915_gem_object_lock() 124 if (!ret && ww) in __i915_gem_object_lock() 125 list_add_tail(&obj->obj_link, &ww->obj_list); in __i915_gem_object_lock() 130 ww->contended = obj; in __i915_gem_object_lock() 136 struct i915_gem_ww_ctx *ww) in i915_gem_object_lock() 138 return __i915_gem_object_lock(obj, ww, w in i915_gem_object_lock() 113 __i915_gem_object_lock(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, bool intr) __i915_gem_object_lock() argument 135 i915_gem_object_lock(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww) i915_gem_object_lock() argument 141 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww) i915_gem_object_lock_interruptible() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/ |
H A D | intel_context.c | 102 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) in __context_pin_state() argument 107 err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH); in __context_pin_state() 137 struct i915_gem_ww_ctx *ww) in __ring_active() 141 err = intel_ring_pin(ring, ww); in __ring_active() 163 struct i915_gem_ww_ctx *ww) in intel_context_pre_pin() 169 err = __ring_active(ce->ring, ww); in intel_context_pre_pin() 173 err = intel_timeline_pin(ce->timeline, ww); in intel_context_pre_pin() 180 err = __context_pin_state(ce->state, ww); in intel_context_pre_pin() 204 struct i915_gem_ww_ctx *ww) in __intel_context_do_pin_ww() 222 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); in __intel_context_do_pin_ww() 136 __ring_active(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) __ring_active() argument 162 intel_context_pre_pin(struct intel_context *ce, struct i915_gem_ww_ctx *ww) intel_context_pre_pin() argument 203 __intel_context_do_pin_ww(struct intel_context *ce, struct i915_gem_ww_ctx *ww) __intel_context_do_pin_ww() argument 298 struct i915_gem_ww_ctx ww; __intel_context_do_pin() local 495 struct i915_gem_ww_ctx ww; intel_context_create_request() local [all...] |
H A D | selftest_migrate.c | 38 struct i915_gem_ww_ctx *ww, in copy() 47 struct i915_gem_ww_ctx ww; in copy() local 61 for_i915_gem_ww(&ww, err, true) { in copy() 62 err = i915_gem_object_lock(src, &ww); in copy() 66 err = i915_gem_object_lock(dst, &ww); in copy() 90 err = fn(migrate, &ww, src, dst, &rq); in copy() 223 struct i915_gem_ww_ctx *ww, in intel_migrate_ccs_copy() 242 err = intel_context_pin_ww(ce, ww); in intel_migrate_ccs_copy() 257 struct i915_gem_ww_ctx *ww, in clear() 266 struct i915_gem_ww_ctx ww; in clear() local 36 copy(struct intel_migrate *migrate, int (*fn)(struct intel_migrate *migrate, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *src, struct drm_i915_gem_object *dst, struct i915_request **out), u32 sz, struct rnd_state *prng) copy() argument 222 intel_migrate_ccs_copy(struct intel_migrate *m, struct i915_gem_ww_ctx *ww, const struct i915_deps *deps, struct scatterlist *sg, unsigned int pat_index, bool write_to_ccs, struct i915_request **out) intel_migrate_ccs_copy() argument 255 clear(struct intel_migrate *migrate, int (*fn)(struct intel_migrate *migrate, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *obj, u32 value, struct i915_request **out), u32 sz, struct rnd_state *prng) clear() argument 410 __migrate_copy(struct intel_migrate *migrate, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *src, struct drm_i915_gem_object *dst, struct i915_request **out) __migrate_copy() argument 424 __global_copy(struct intel_migrate *migrate, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *src, struct drm_i915_gem_object *dst, struct i915_request **out) __global_copy() argument 450 __migrate_clear(struct intel_migrate *migrate, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *obj, u32 value, struct i915_request **out) __migrate_clear() argument 463 __global_clear(struct intel_migrate *migrate, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *obj, u32 value, struct i915_request **out) __global_clear() argument [all...] |
H A D | intel_renderstate.c | 167 i915_gem_ww_ctx_init(&so->ww, true); in intel_renderstate_init() 169 err = intel_context_pin_ww(ce, &so->ww); in intel_renderstate_init() 177 err = i915_gem_object_lock(so->vma->obj, &so->ww); in intel_renderstate_init() 181 err = i915_vma_pin_ww(so->vma, &so->ww, 0, 0, PIN_GLOBAL | PIN_HIGH); in intel_renderstate_init() 197 err = i915_gem_ww_ctx_backoff(&so->ww); in intel_renderstate_init() 201 i915_gem_ww_ctx_fini(&so->ww); in intel_renderstate_init() 248 i915_gem_ww_ctx_fini(&so->ww); in intel_renderstate_fini()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/display/ |
H A D | intel_fb_pin.c | 29 struct i915_gem_ww_ctx ww; in intel_pin_fb_obj_dpt() local 47 for_i915_gem_ww(&ww, ret, true) { in intel_pin_fb_obj_dpt() 48 ret = i915_gem_object_lock(obj, &ww); in intel_pin_fb_obj_dpt() 63 ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0, in intel_pin_fb_obj_dpt() 85 ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL); in intel_pin_fb_obj_dpt() 116 struct i915_gem_ww_ctx ww; in intel_pin_and_fence_fb_obj() local 163 i915_gem_ww_ctx_init(&ww, true); in intel_pin_and_fence_fb_obj() 165 ret = i915_gem_object_lock(obj, &ww); in intel_pin_and_fence_fb_obj() 169 ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0); in intel_pin_and_fence_fb_obj() 175 vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignmen in intel_pin_and_fence_fb_obj() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_dmabuf.c | 122 struct i915_gem_ww_ctx ww; in i915_gem_begin_cpu_access() local 125 i915_gem_ww_ctx_init(&ww, true); in i915_gem_begin_cpu_access() 127 err = i915_gem_object_lock(obj, &ww); in i915_gem_begin_cpu_access() 135 err = i915_gem_ww_ctx_backoff(&ww); in i915_gem_begin_cpu_access() 139 i915_gem_ww_ctx_fini(&ww); in i915_gem_begin_cpu_access() 146 struct i915_gem_ww_ctx ww; in i915_gem_end_cpu_access() local 149 i915_gem_ww_ctx_init(&ww, true); in i915_gem_end_cpu_access() 151 err = i915_gem_object_lock(obj, &ww); in i915_gem_end_cpu_access() 159 err = i915_gem_ww_ctx_backoff(&ww); in i915_gem_end_cpu_access() 163 i915_gem_ww_ctx_fini(&ww); in i915_gem_end_cpu_access() 171 struct i915_gem_ww_ctx ww; i915_gem_dmabuf_attach() local [all...] |
H A D | i915_gem_object.h | 165 struct i915_gem_ww_ctx *ww, in __i915_gem_object_lock() 171 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); in __i915_gem_object_lock() 173 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); in __i915_gem_object_lock() 175 if (!ret && ww) { in __i915_gem_object_lock() 177 list_add_tail(&obj->obj_link, &ww->obj_list); in __i915_gem_object_lock() 184 ww->contended = obj; in __i915_gem_object_lock() 191 struct i915_gem_ww_ctx *ww) in i915_gem_object_lock() 193 return __i915_gem_object_lock(obj, ww, w in i915_gem_object_lock() 164 __i915_gem_object_lock(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, bool intr) __i915_gem_object_lock() argument 190 i915_gem_object_lock(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww) i915_gem_object_lock() argument 196 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww) i915_gem_object_lock_interruptible() argument 203 i915_gem_object_trylock(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww) i915_gem_object_trylock() argument [all...] |
H A D | i915_gem_region.c | 163 * In the future, a non-NULL apply->ww could mean the caller is in i915_gem_process_region() 166 GEM_WARN_ON(apply->ww); in i915_gem_process_region() 171 struct i915_gem_ww_ctx ww; in i915_gem_process_region() local 188 apply->ww = &ww; in i915_gem_process_region() 189 for_i915_gem_ww(&ww, ret, apply->interruptible) { in i915_gem_process_region() 190 ret = i915_gem_object_lock(obj, apply->ww); in i915_gem_process_region()
|
/kernel/linux/linux-6.6/kernel/locking/ |
H A D | ww_mutex.h | 177 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired 181 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) in ww_mutex_lock_acquired() argument 190 DEBUG_LOCKS_WARN_ON(ww->ctx); in ww_mutex_lock_acquired() 202 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); in ww_mutex_lock_acquired() 215 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); in ww_mutex_lock_acquired() 218 ww->ctx = ww_ctx; in ww_mutex_lock_acquired() 392 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS in ww_mutex_set_context_fastpath() 394 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx in ww_mutex_set_context_fastpath() 397 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx in ww_mutex_set_context_fastpath() 417 struct ww_mutex *ww; in __ww_mutex_kill() local 444 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); __ww_mutex_check_kill() local 545 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); __ww_mutex_add_waiter() local [all...] |
H A D | mutex.c | 307 struct ww_mutex *ww; in ww_mutex_spin_on_owner() local 309 ww = container_of(lock, struct ww_mutex, base); in ww_mutex_spin_on_owner() 312 * If ww->ctx is set the contents are undefined, only in ww_mutex_spin_on_owner() 322 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) in ww_mutex_spin_on_owner() 574 struct ww_mutex *ww; in __mutex_lock_common() local 584 ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common() 586 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) in __mutex_lock_common() 611 ww_mutex_set_context_fastpath(ww, ww_ctx); in __mutex_lock_common() 725 ww_mutex_lock_acquired(ww, ww_ctx); in __mutex_lock_common() 759 * @ww 770 ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) ww_mutex_trylock() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/selftests/ |
H A D | i915_gem_migrate.c | 53 struct i915_gem_ww_ctx ww; in igt_create_migrate() local 64 for_i915_gem_ww(&ww, err, true) { in igt_create_migrate() 65 err = i915_gem_object_lock(obj, &ww); in igt_create_migrate() 73 err = i915_gem_object_migrate(obj, &ww, dst); in igt_create_migrate() 111 static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww, in lmem_pages_migrate_one() argument 118 err = i915_gem_object_lock(obj, ww); in lmem_pages_migrate_one() 123 err = i915_vma_pin_ww(vma, ww, obj->base.size, 0, in lmem_pages_migrate_one() 141 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM); in lmem_pages_migrate_one() 160 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0); in lmem_pages_migrate_one() 192 struct i915_gem_ww_ctx ww; in __igt_lmem_pages_migrate() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
H A D | i915_gem.c | 206 struct i915_gem_ww_ctx ww; in igt_gem_ww_ctx() local 219 i915_gem_ww_ctx_init(&ww, true); in igt_gem_ww_ctx() 222 err = i915_gem_object_lock(obj, &ww); in igt_gem_ww_ctx() 224 err = i915_gem_object_lock_interruptible(obj, &ww); in igt_gem_ww_ctx() 226 err = i915_gem_object_lock_interruptible(obj2, &ww); in igt_gem_ww_ctx() 228 err = i915_gem_object_lock(obj2, &ww); in igt_gem_ww_ctx() 231 err = i915_gem_ww_ctx_backoff(&ww); in igt_gem_ww_ctx() 235 i915_gem_ww_ctx_fini(&ww); in igt_gem_ww_ctx()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/selftests/ |
H A D | igt_spinner.c | 43 struct i915_gem_ww_ctx *ww, in igt_spinner_pin_obj() 54 ret = i915_gem_object_lock(obj, ww); in igt_spinner_pin_obj() 60 if (!ww) in igt_spinner_pin_obj() 66 if (ww) in igt_spinner_pin_obj() 67 ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER); in igt_spinner_pin_obj() 81 struct i915_gem_ww_ctx *ww) in igt_spinner_pin() 90 vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma); in igt_spinner_pin() 101 vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma); in igt_spinner_pin() 42 igt_spinner_pin_obj(struct intel_context *ce, struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *obj, unsigned int mode, struct i915_vma **vma) igt_spinner_pin_obj() argument 79 igt_spinner_pin(struct igt_spinner *spin, struct intel_context *ce, struct i915_gem_ww_ctx *ww) igt_spinner_pin() argument
|
H A D | i915_gem.c | 213 struct i915_gem_ww_ctx ww; in igt_gem_ww_ctx() local 226 i915_gem_ww_ctx_init(&ww, true); in igt_gem_ww_ctx() 229 err = i915_gem_object_lock(obj, &ww); in igt_gem_ww_ctx() 231 err = i915_gem_object_lock_interruptible(obj, &ww); in igt_gem_ww_ctx() 233 err = i915_gem_object_lock_interruptible(obj2, &ww); in igt_gem_ww_ctx() 235 err = i915_gem_object_lock(obj2, &ww); in igt_gem_ww_ctx() 238 err = i915_gem_ww_ctx_backoff(&ww); in igt_gem_ww_ctx() 242 i915_gem_ww_ctx_fini(&ww); in igt_gem_ww_ctx()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/ |
H A D | i915_gem_execbuffer.c | 35 err = i915_gem_object_lock(obj, &eb->ww); in __igt_gpu_reloc() 39 err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH); in __igt_gpu_reloc() 140 i915_gem_ww_ctx_init(&eb.ww, false); in igt_gpu_reloc() 142 err = intel_context_pin_ww(eb.context, &eb.ww); in igt_gpu_reloc() 149 err = i915_gem_ww_ctx_backoff(&eb.ww); in igt_gpu_reloc() 153 i915_gem_ww_ctx_fini(&eb.ww); in igt_gpu_reloc()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_gsc_uc_heci_cmd_submit.c | 133 struct i915_gem_ww_ctx ww; in intel_gsc_uc_heci_cmd_submit_nonpriv() local 137 i915_gem_ww_ctx_init(&ww, false); in intel_gsc_uc_heci_cmd_submit_nonpriv() 139 err = i915_gem_object_lock(pkt->bb_vma->obj, &ww); in intel_gsc_uc_heci_cmd_submit_nonpriv() 142 err = i915_gem_object_lock(pkt->heci_pkt_vma->obj, &ww); in intel_gsc_uc_heci_cmd_submit_nonpriv() 145 err = intel_context_pin_ww(ce, &ww); in intel_gsc_uc_heci_cmd_submit_nonpriv() 200 err = i915_gem_ww_ctx_backoff(&ww); in intel_gsc_uc_heci_cmd_submit_nonpriv() 208 i915_gem_ww_ctx_fini(&ww); in intel_gsc_uc_heci_cmd_submit_nonpriv()
|
/kernel/linux/linux-5.10/kernel/locking/ |
H A D | mutex.c | 312 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired 316 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) in ww_mutex_lock_acquired() argument 325 DEBUG_LOCKS_WARN_ON(ww->ctx); in ww_mutex_lock_acquired() 337 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); in ww_mutex_lock_acquired() 350 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); in ww_mutex_lock_acquired() 353 ww->ctx = ww_ctx; in ww_mutex_lock_acquired() 489 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS in ww_mutex_set_context_fastpath() 491 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx in ww_mutex_set_context_fastpath() 494 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx in ww_mutex_set_context_fastpath() 515 struct ww_mutex *ww; in ww_mutex_spin_on_owner() local 790 struct ww_mutex *ww; __ww_mutex_kill() local 818 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); __ww_mutex_check_kill() local 918 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); __ww_mutex_add_waiter() local 941 struct ww_mutex *ww; __mutex_lock_common() local [all...] |