162306a36Sopenharmony_ci// SPDX-License-Identifier: MIT 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Copyright 2014 Advanced Micro Devices, Inc. 462306a36Sopenharmony_ci * All Rights Reserved. 562306a36Sopenharmony_ci * 662306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a 762306a36Sopenharmony_ci * copy of this software and associated documentation files (the 862306a36Sopenharmony_ci * "Software"), to deal in the Software without restriction, including 962306a36Sopenharmony_ci * without limitation the rights to use, copy, modify, merge, publish, 1062306a36Sopenharmony_ci * distribute, sub license, and/or sell copies of the Software, and to 1162306a36Sopenharmony_ci * permit persons to whom the Software is furnished to do so, subject to 1262306a36Sopenharmony_ci * the following conditions: 1362306a36Sopenharmony_ci * 1462306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1562306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1662306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 1762306a36Sopenharmony_ci * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 1862306a36Sopenharmony_ci * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 1962306a36Sopenharmony_ci * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 2062306a36Sopenharmony_ci * USE OR OTHER DEALINGS IN THE SOFTWARE. 2162306a36Sopenharmony_ci * 2262306a36Sopenharmony_ci * The above copyright notice and this permission notice (including the 2362306a36Sopenharmony_ci * next paragraph) shall be included in all copies or substantial portions 2462306a36Sopenharmony_ci * of the Software. 2562306a36Sopenharmony_ci * 2662306a36Sopenharmony_ci */ 2762306a36Sopenharmony_ci/* 2862306a36Sopenharmony_ci * Authors: 2962306a36Sopenharmony_ci * Christian König <christian.koenig@amd.com> 3062306a36Sopenharmony_ci */ 3162306a36Sopenharmony_ci 3262306a36Sopenharmony_ci#include <linux/dma-fence-chain.h> 3362306a36Sopenharmony_ci 3462306a36Sopenharmony_ci#include "amdgpu.h" 3562306a36Sopenharmony_ci#include "amdgpu_trace.h" 3662306a36Sopenharmony_ci#include "amdgpu_amdkfd.h" 3762306a36Sopenharmony_ci 3862306a36Sopenharmony_cistruct amdgpu_sync_entry { 3962306a36Sopenharmony_ci struct hlist_node node; 4062306a36Sopenharmony_ci struct dma_fence *fence; 4162306a36Sopenharmony_ci}; 4262306a36Sopenharmony_ci 4362306a36Sopenharmony_cistatic struct kmem_cache *amdgpu_sync_slab; 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_ci/** 4662306a36Sopenharmony_ci * amdgpu_sync_create - zero init sync object 4762306a36Sopenharmony_ci * 4862306a36Sopenharmony_ci * @sync: sync object to initialize 4962306a36Sopenharmony_ci * 5062306a36Sopenharmony_ci * Just clear the sync object for now. 5162306a36Sopenharmony_ci */ 5262306a36Sopenharmony_civoid amdgpu_sync_create(struct amdgpu_sync *sync) 5362306a36Sopenharmony_ci{ 5462306a36Sopenharmony_ci hash_init(sync->fences); 5562306a36Sopenharmony_ci} 5662306a36Sopenharmony_ci 5762306a36Sopenharmony_ci/** 5862306a36Sopenharmony_ci * amdgpu_sync_same_dev - test if fence belong to us 5962306a36Sopenharmony_ci * 6062306a36Sopenharmony_ci * @adev: amdgpu device to use for the test 6162306a36Sopenharmony_ci * @f: fence to test 6262306a36Sopenharmony_ci * 6362306a36Sopenharmony_ci * Test if the fence was issued by us. 6462306a36Sopenharmony_ci */ 6562306a36Sopenharmony_cistatic bool amdgpu_sync_same_dev(struct amdgpu_device *adev, 6662306a36Sopenharmony_ci struct dma_fence *f) 6762306a36Sopenharmony_ci{ 6862306a36Sopenharmony_ci struct drm_sched_fence *s_fence = to_drm_sched_fence(f); 6962306a36Sopenharmony_ci 7062306a36Sopenharmony_ci if (s_fence) { 7162306a36Sopenharmony_ci struct amdgpu_ring *ring; 7262306a36Sopenharmony_ci 7362306a36Sopenharmony_ci ring = container_of(s_fence->sched, struct amdgpu_ring, sched); 7462306a36Sopenharmony_ci return ring->adev == adev; 7562306a36Sopenharmony_ci } 7662306a36Sopenharmony_ci 7762306a36Sopenharmony_ci return false; 7862306a36Sopenharmony_ci} 7962306a36Sopenharmony_ci 8062306a36Sopenharmony_ci/** 8162306a36Sopenharmony_ci * amdgpu_sync_get_owner - extract the owner of a fence 8262306a36Sopenharmony_ci * 8362306a36Sopenharmony_ci * @f: fence get the owner from 8462306a36Sopenharmony_ci * 8562306a36Sopenharmony_ci * Extract who originally created the fence. 8662306a36Sopenharmony_ci */ 8762306a36Sopenharmony_cistatic void *amdgpu_sync_get_owner(struct dma_fence *f) 8862306a36Sopenharmony_ci{ 8962306a36Sopenharmony_ci struct drm_sched_fence *s_fence; 9062306a36Sopenharmony_ci struct amdgpu_amdkfd_fence *kfd_fence; 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_ci if (!f) 9362306a36Sopenharmony_ci return AMDGPU_FENCE_OWNER_UNDEFINED; 9462306a36Sopenharmony_ci 9562306a36Sopenharmony_ci s_fence = to_drm_sched_fence(f); 9662306a36Sopenharmony_ci if (s_fence) 9762306a36Sopenharmony_ci return s_fence->owner; 9862306a36Sopenharmony_ci 9962306a36Sopenharmony_ci kfd_fence = to_amdgpu_amdkfd_fence(f); 10062306a36Sopenharmony_ci if (kfd_fence) 10162306a36Sopenharmony_ci return AMDGPU_FENCE_OWNER_KFD; 10262306a36Sopenharmony_ci 10362306a36Sopenharmony_ci return AMDGPU_FENCE_OWNER_UNDEFINED; 10462306a36Sopenharmony_ci} 10562306a36Sopenharmony_ci 10662306a36Sopenharmony_ci/** 10762306a36Sopenharmony_ci * amdgpu_sync_keep_later - Keep the later fence 10862306a36Sopenharmony_ci * 10962306a36Sopenharmony_ci * @keep: existing fence to test 11062306a36Sopenharmony_ci * @fence: new fence 11162306a36Sopenharmony_ci * 11262306a36Sopenharmony_ci * Either keep the existing fence or the new one, depending which one is later. 11362306a36Sopenharmony_ci */ 11462306a36Sopenharmony_cistatic void amdgpu_sync_keep_later(struct dma_fence **keep, 11562306a36Sopenharmony_ci struct dma_fence *fence) 11662306a36Sopenharmony_ci{ 11762306a36Sopenharmony_ci if (*keep && dma_fence_is_later(*keep, fence)) 11862306a36Sopenharmony_ci return; 11962306a36Sopenharmony_ci 12062306a36Sopenharmony_ci dma_fence_put(*keep); 12162306a36Sopenharmony_ci *keep = dma_fence_get(fence); 12262306a36Sopenharmony_ci} 12362306a36Sopenharmony_ci 12462306a36Sopenharmony_ci/** 12562306a36Sopenharmony_ci * amdgpu_sync_add_later - add the fence to the hash 12662306a36Sopenharmony_ci * 12762306a36Sopenharmony_ci * @sync: sync object to add the fence to 12862306a36Sopenharmony_ci * @f: fence to add 12962306a36Sopenharmony_ci * 13062306a36Sopenharmony_ci * Tries to add the fence to an existing hash entry. Returns true when an entry 13162306a36Sopenharmony_ci * was found, false otherwise. 13262306a36Sopenharmony_ci */ 13362306a36Sopenharmony_cistatic bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) 13462306a36Sopenharmony_ci{ 13562306a36Sopenharmony_ci struct amdgpu_sync_entry *e; 13662306a36Sopenharmony_ci 13762306a36Sopenharmony_ci hash_for_each_possible(sync->fences, e, node, f->context) { 13862306a36Sopenharmony_ci if (unlikely(e->fence->context != f->context)) 13962306a36Sopenharmony_ci continue; 14062306a36Sopenharmony_ci 14162306a36Sopenharmony_ci amdgpu_sync_keep_later(&e->fence, f); 14262306a36Sopenharmony_ci return true; 14362306a36Sopenharmony_ci } 14462306a36Sopenharmony_ci return false; 14562306a36Sopenharmony_ci} 14662306a36Sopenharmony_ci 14762306a36Sopenharmony_ci/** 14862306a36Sopenharmony_ci * amdgpu_sync_fence - remember to sync to this fence 14962306a36Sopenharmony_ci * 15062306a36Sopenharmony_ci * @sync: sync object to add fence to 15162306a36Sopenharmony_ci * @f: fence to sync to 15262306a36Sopenharmony_ci * 15362306a36Sopenharmony_ci * Add the fence to the sync object. 15462306a36Sopenharmony_ci */ 15562306a36Sopenharmony_ciint amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f) 15662306a36Sopenharmony_ci{ 15762306a36Sopenharmony_ci struct amdgpu_sync_entry *e; 15862306a36Sopenharmony_ci 15962306a36Sopenharmony_ci if (!f) 16062306a36Sopenharmony_ci return 0; 16162306a36Sopenharmony_ci 16262306a36Sopenharmony_ci if (amdgpu_sync_add_later(sync, f)) 16362306a36Sopenharmony_ci return 0; 16462306a36Sopenharmony_ci 16562306a36Sopenharmony_ci e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); 16662306a36Sopenharmony_ci if (!e) 16762306a36Sopenharmony_ci return -ENOMEM; 16862306a36Sopenharmony_ci 16962306a36Sopenharmony_ci hash_add(sync->fences, &e->node, f->context); 17062306a36Sopenharmony_ci e->fence = dma_fence_get(f); 17162306a36Sopenharmony_ci return 0; 17262306a36Sopenharmony_ci} 17362306a36Sopenharmony_ci 17462306a36Sopenharmony_ci/* Determine based on the owner and mode if we should sync to a fence or not */ 17562306a36Sopenharmony_cistatic bool amdgpu_sync_test_fence(struct amdgpu_device *adev, 17662306a36Sopenharmony_ci enum amdgpu_sync_mode mode, 17762306a36Sopenharmony_ci void *owner, struct dma_fence *f) 17862306a36Sopenharmony_ci{ 17962306a36Sopenharmony_ci void *fence_owner = amdgpu_sync_get_owner(f); 18062306a36Sopenharmony_ci 18162306a36Sopenharmony_ci /* Always sync to moves, no matter what */ 18262306a36Sopenharmony_ci if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) 18362306a36Sopenharmony_ci return true; 18462306a36Sopenharmony_ci 18562306a36Sopenharmony_ci /* We only want to trigger KFD eviction fences on 18662306a36Sopenharmony_ci * evict or move jobs. Skip KFD fences otherwise. 18762306a36Sopenharmony_ci */ 18862306a36Sopenharmony_ci if (fence_owner == AMDGPU_FENCE_OWNER_KFD && 18962306a36Sopenharmony_ci owner != AMDGPU_FENCE_OWNER_UNDEFINED) 19062306a36Sopenharmony_ci return false; 19162306a36Sopenharmony_ci 19262306a36Sopenharmony_ci /* Never sync to VM updates either. */ 19362306a36Sopenharmony_ci if (fence_owner == AMDGPU_FENCE_OWNER_VM && 19462306a36Sopenharmony_ci owner != AMDGPU_FENCE_OWNER_UNDEFINED && 19562306a36Sopenharmony_ci owner != AMDGPU_FENCE_OWNER_KFD) 19662306a36Sopenharmony_ci return false; 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci /* Ignore fences depending on the sync mode */ 19962306a36Sopenharmony_ci switch (mode) { 20062306a36Sopenharmony_ci case AMDGPU_SYNC_ALWAYS: 20162306a36Sopenharmony_ci return true; 20262306a36Sopenharmony_ci 20362306a36Sopenharmony_ci case AMDGPU_SYNC_NE_OWNER: 20462306a36Sopenharmony_ci if (amdgpu_sync_same_dev(adev, f) && 20562306a36Sopenharmony_ci fence_owner == owner) 20662306a36Sopenharmony_ci return false; 20762306a36Sopenharmony_ci break; 20862306a36Sopenharmony_ci 20962306a36Sopenharmony_ci case AMDGPU_SYNC_EQ_OWNER: 21062306a36Sopenharmony_ci if (amdgpu_sync_same_dev(adev, f) && 21162306a36Sopenharmony_ci fence_owner != owner) 21262306a36Sopenharmony_ci return false; 21362306a36Sopenharmony_ci break; 21462306a36Sopenharmony_ci 21562306a36Sopenharmony_ci case AMDGPU_SYNC_EXPLICIT: 21662306a36Sopenharmony_ci return false; 21762306a36Sopenharmony_ci } 21862306a36Sopenharmony_ci 21962306a36Sopenharmony_ci WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD, 22062306a36Sopenharmony_ci "Adding eviction fence to sync obj"); 22162306a36Sopenharmony_ci return true; 22262306a36Sopenharmony_ci} 22362306a36Sopenharmony_ci 22462306a36Sopenharmony_ci/** 22562306a36Sopenharmony_ci * amdgpu_sync_resv - sync to a reservation object 22662306a36Sopenharmony_ci * 22762306a36Sopenharmony_ci * @adev: amdgpu device 22862306a36Sopenharmony_ci * @sync: sync object to add fences from reservation object to 22962306a36Sopenharmony_ci * @resv: reservation object with embedded fence 23062306a36Sopenharmony_ci * @mode: how owner affects which fences we sync to 23162306a36Sopenharmony_ci * @owner: owner of the planned job submission 23262306a36Sopenharmony_ci * 23362306a36Sopenharmony_ci * Sync to the fence 23462306a36Sopenharmony_ci */ 23562306a36Sopenharmony_ciint amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, 23662306a36Sopenharmony_ci struct dma_resv *resv, enum amdgpu_sync_mode mode, 23762306a36Sopenharmony_ci void *owner) 23862306a36Sopenharmony_ci{ 23962306a36Sopenharmony_ci struct dma_resv_iter cursor; 24062306a36Sopenharmony_ci struct dma_fence *f; 24162306a36Sopenharmony_ci int r; 24262306a36Sopenharmony_ci 24362306a36Sopenharmony_ci if (resv == NULL) 24462306a36Sopenharmony_ci return -EINVAL; 24562306a36Sopenharmony_ci 24662306a36Sopenharmony_ci /* TODO: Use DMA_RESV_USAGE_READ here */ 24762306a36Sopenharmony_ci dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) { 24862306a36Sopenharmony_ci dma_fence_chain_for_each(f, f) { 24962306a36Sopenharmony_ci struct dma_fence *tmp = dma_fence_chain_contained(f); 25062306a36Sopenharmony_ci 25162306a36Sopenharmony_ci if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) { 25262306a36Sopenharmony_ci r = amdgpu_sync_fence(sync, f); 25362306a36Sopenharmony_ci dma_fence_put(f); 25462306a36Sopenharmony_ci if (r) 25562306a36Sopenharmony_ci return r; 25662306a36Sopenharmony_ci break; 25762306a36Sopenharmony_ci } 25862306a36Sopenharmony_ci } 25962306a36Sopenharmony_ci } 26062306a36Sopenharmony_ci return 0; 26162306a36Sopenharmony_ci} 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ci/* Free the entry back to the slab */ 26462306a36Sopenharmony_cistatic void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) 26562306a36Sopenharmony_ci{ 26662306a36Sopenharmony_ci hash_del(&e->node); 26762306a36Sopenharmony_ci dma_fence_put(e->fence); 26862306a36Sopenharmony_ci kmem_cache_free(amdgpu_sync_slab, e); 26962306a36Sopenharmony_ci} 27062306a36Sopenharmony_ci 27162306a36Sopenharmony_ci/** 27262306a36Sopenharmony_ci * amdgpu_sync_peek_fence - get the next fence not signaled yet 27362306a36Sopenharmony_ci * 27462306a36Sopenharmony_ci * @sync: the sync object 27562306a36Sopenharmony_ci * @ring: optional ring to use for test 27662306a36Sopenharmony_ci * 27762306a36Sopenharmony_ci * Returns the next fence not signaled yet without removing it from the sync 27862306a36Sopenharmony_ci * object. 27962306a36Sopenharmony_ci */ 28062306a36Sopenharmony_cistruct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 28162306a36Sopenharmony_ci struct amdgpu_ring *ring) 28262306a36Sopenharmony_ci{ 28362306a36Sopenharmony_ci struct amdgpu_sync_entry *e; 28462306a36Sopenharmony_ci struct hlist_node *tmp; 28562306a36Sopenharmony_ci int i; 28662306a36Sopenharmony_ci 28762306a36Sopenharmony_ci hash_for_each_safe(sync->fences, i, tmp, e, node) { 28862306a36Sopenharmony_ci struct dma_fence *f = e->fence; 28962306a36Sopenharmony_ci struct drm_sched_fence *s_fence = to_drm_sched_fence(f); 29062306a36Sopenharmony_ci 29162306a36Sopenharmony_ci if (dma_fence_is_signaled(f)) { 29262306a36Sopenharmony_ci amdgpu_sync_entry_free(e); 29362306a36Sopenharmony_ci continue; 29462306a36Sopenharmony_ci } 29562306a36Sopenharmony_ci if (ring && s_fence) { 29662306a36Sopenharmony_ci /* For fences from the same ring it is sufficient 29762306a36Sopenharmony_ci * when they are scheduled. 29862306a36Sopenharmony_ci */ 29962306a36Sopenharmony_ci if (s_fence->sched == &ring->sched) { 30062306a36Sopenharmony_ci if (dma_fence_is_signaled(&s_fence->scheduled)) 30162306a36Sopenharmony_ci continue; 30262306a36Sopenharmony_ci 30362306a36Sopenharmony_ci return &s_fence->scheduled; 30462306a36Sopenharmony_ci } 30562306a36Sopenharmony_ci } 30662306a36Sopenharmony_ci 30762306a36Sopenharmony_ci return f; 30862306a36Sopenharmony_ci } 30962306a36Sopenharmony_ci 31062306a36Sopenharmony_ci return NULL; 31162306a36Sopenharmony_ci} 31262306a36Sopenharmony_ci 31362306a36Sopenharmony_ci/** 31462306a36Sopenharmony_ci * amdgpu_sync_get_fence - get the next fence from the sync object 31562306a36Sopenharmony_ci * 31662306a36Sopenharmony_ci * @sync: sync object to use 31762306a36Sopenharmony_ci * 31862306a36Sopenharmony_ci * Get and removes the next fence from the sync object not signaled yet. 31962306a36Sopenharmony_ci */ 32062306a36Sopenharmony_cistruct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) 32162306a36Sopenharmony_ci{ 32262306a36Sopenharmony_ci struct amdgpu_sync_entry *e; 32362306a36Sopenharmony_ci struct hlist_node *tmp; 32462306a36Sopenharmony_ci struct dma_fence *f; 32562306a36Sopenharmony_ci int i; 32662306a36Sopenharmony_ci 32762306a36Sopenharmony_ci hash_for_each_safe(sync->fences, i, tmp, e, node) { 32862306a36Sopenharmony_ci 32962306a36Sopenharmony_ci f = e->fence; 33062306a36Sopenharmony_ci 33162306a36Sopenharmony_ci hash_del(&e->node); 33262306a36Sopenharmony_ci kmem_cache_free(amdgpu_sync_slab, e); 33362306a36Sopenharmony_ci 33462306a36Sopenharmony_ci if (!dma_fence_is_signaled(f)) 33562306a36Sopenharmony_ci return f; 33662306a36Sopenharmony_ci 33762306a36Sopenharmony_ci dma_fence_put(f); 33862306a36Sopenharmony_ci } 33962306a36Sopenharmony_ci return NULL; 34062306a36Sopenharmony_ci} 34162306a36Sopenharmony_ci 34262306a36Sopenharmony_ci/** 34362306a36Sopenharmony_ci * amdgpu_sync_clone - clone a sync object 34462306a36Sopenharmony_ci * 34562306a36Sopenharmony_ci * @source: sync object to clone 34662306a36Sopenharmony_ci * @clone: pointer to destination sync object 34762306a36Sopenharmony_ci * 34862306a36Sopenharmony_ci * Adds references to all unsignaled fences in @source to @clone. Also 34962306a36Sopenharmony_ci * removes signaled fences from @source while at it. 35062306a36Sopenharmony_ci */ 35162306a36Sopenharmony_ciint amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone) 35262306a36Sopenharmony_ci{ 35362306a36Sopenharmony_ci struct amdgpu_sync_entry *e; 35462306a36Sopenharmony_ci struct hlist_node *tmp; 35562306a36Sopenharmony_ci struct dma_fence *f; 35662306a36Sopenharmony_ci int i, r; 35762306a36Sopenharmony_ci 35862306a36Sopenharmony_ci hash_for_each_safe(source->fences, i, tmp, e, node) { 35962306a36Sopenharmony_ci f = e->fence; 36062306a36Sopenharmony_ci if (!dma_fence_is_signaled(f)) { 36162306a36Sopenharmony_ci r = amdgpu_sync_fence(clone, f); 36262306a36Sopenharmony_ci if (r) 36362306a36Sopenharmony_ci return r; 36462306a36Sopenharmony_ci } else { 36562306a36Sopenharmony_ci amdgpu_sync_entry_free(e); 36662306a36Sopenharmony_ci } 36762306a36Sopenharmony_ci } 36862306a36Sopenharmony_ci 36962306a36Sopenharmony_ci return 0; 37062306a36Sopenharmony_ci} 37162306a36Sopenharmony_ci 37262306a36Sopenharmony_ci/** 37362306a36Sopenharmony_ci * amdgpu_sync_push_to_job - push fences into job 37462306a36Sopenharmony_ci * @sync: sync object to get the fences from 37562306a36Sopenharmony_ci * @job: job to push the fences into 37662306a36Sopenharmony_ci * 37762306a36Sopenharmony_ci * Add all unsignaled fences from sync to job. 37862306a36Sopenharmony_ci */ 37962306a36Sopenharmony_ciint amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job) 38062306a36Sopenharmony_ci{ 38162306a36Sopenharmony_ci struct amdgpu_sync_entry *e; 38262306a36Sopenharmony_ci struct hlist_node *tmp; 38362306a36Sopenharmony_ci struct dma_fence *f; 38462306a36Sopenharmony_ci int i, r; 38562306a36Sopenharmony_ci 38662306a36Sopenharmony_ci hash_for_each_safe(sync->fences, i, tmp, e, node) { 38762306a36Sopenharmony_ci f = e->fence; 38862306a36Sopenharmony_ci if (dma_fence_is_signaled(f)) { 38962306a36Sopenharmony_ci amdgpu_sync_entry_free(e); 39062306a36Sopenharmony_ci continue; 39162306a36Sopenharmony_ci } 39262306a36Sopenharmony_ci 39362306a36Sopenharmony_ci dma_fence_get(f); 39462306a36Sopenharmony_ci r = drm_sched_job_add_dependency(&job->base, f); 39562306a36Sopenharmony_ci if (r) { 39662306a36Sopenharmony_ci dma_fence_put(f); 39762306a36Sopenharmony_ci return r; 39862306a36Sopenharmony_ci } 39962306a36Sopenharmony_ci } 40062306a36Sopenharmony_ci return 0; 40162306a36Sopenharmony_ci} 40262306a36Sopenharmony_ci 40362306a36Sopenharmony_ciint amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) 40462306a36Sopenharmony_ci{ 40562306a36Sopenharmony_ci struct amdgpu_sync_entry *e; 40662306a36Sopenharmony_ci struct hlist_node *tmp; 40762306a36Sopenharmony_ci int i, r; 40862306a36Sopenharmony_ci 40962306a36Sopenharmony_ci hash_for_each_safe(sync->fences, i, tmp, e, node) { 41062306a36Sopenharmony_ci r = dma_fence_wait(e->fence, intr); 41162306a36Sopenharmony_ci if (r) 41262306a36Sopenharmony_ci return r; 41362306a36Sopenharmony_ci 41462306a36Sopenharmony_ci amdgpu_sync_entry_free(e); 41562306a36Sopenharmony_ci } 41662306a36Sopenharmony_ci 41762306a36Sopenharmony_ci return 0; 41862306a36Sopenharmony_ci} 41962306a36Sopenharmony_ci 42062306a36Sopenharmony_ci/** 42162306a36Sopenharmony_ci * amdgpu_sync_free - free the sync object 42262306a36Sopenharmony_ci * 42362306a36Sopenharmony_ci * @sync: sync object to use 42462306a36Sopenharmony_ci * 42562306a36Sopenharmony_ci * Free the sync object. 42662306a36Sopenharmony_ci */ 42762306a36Sopenharmony_civoid amdgpu_sync_free(struct amdgpu_sync *sync) 42862306a36Sopenharmony_ci{ 42962306a36Sopenharmony_ci struct amdgpu_sync_entry *e; 43062306a36Sopenharmony_ci struct hlist_node *tmp; 43162306a36Sopenharmony_ci unsigned int i; 43262306a36Sopenharmony_ci 43362306a36Sopenharmony_ci hash_for_each_safe(sync->fences, i, tmp, e, node) 43462306a36Sopenharmony_ci amdgpu_sync_entry_free(e); 43562306a36Sopenharmony_ci} 43662306a36Sopenharmony_ci 43762306a36Sopenharmony_ci/** 43862306a36Sopenharmony_ci * amdgpu_sync_init - init sync object subsystem 43962306a36Sopenharmony_ci * 44062306a36Sopenharmony_ci * Allocate the slab allocator. 44162306a36Sopenharmony_ci */ 44262306a36Sopenharmony_ciint amdgpu_sync_init(void) 44362306a36Sopenharmony_ci{ 44462306a36Sopenharmony_ci amdgpu_sync_slab = kmem_cache_create( 44562306a36Sopenharmony_ci "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0, 44662306a36Sopenharmony_ci SLAB_HWCACHE_ALIGN, NULL); 44762306a36Sopenharmony_ci if (!amdgpu_sync_slab) 44862306a36Sopenharmony_ci return -ENOMEM; 44962306a36Sopenharmony_ci 45062306a36Sopenharmony_ci return 0; 45162306a36Sopenharmony_ci} 45262306a36Sopenharmony_ci 45362306a36Sopenharmony_ci/** 45462306a36Sopenharmony_ci * amdgpu_sync_fini - fini sync object subsystem 45562306a36Sopenharmony_ci * 45662306a36Sopenharmony_ci * Free the slab allocator. 45762306a36Sopenharmony_ci */ 45862306a36Sopenharmony_civoid amdgpu_sync_fini(void) 45962306a36Sopenharmony_ci{ 46062306a36Sopenharmony_ci kmem_cache_destroy(amdgpu_sync_slab); 46162306a36Sopenharmony_ci} 462