Lines Matching refs:cache
48 radv_pipeline_cache_lock(struct radv_pipeline_cache *cache)
50 if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT)
53 mtx_lock(&cache->mutex);
57 radv_pipeline_cache_unlock(struct radv_pipeline_cache *cache)
59 if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT)
62 mtx_unlock(&cache->mutex);
76 radv_pipeline_cache_init(struct radv_pipeline_cache *cache, struct radv_device *device)
78 vk_object_base_init(&device->vk, &cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE);
80 cache->device = device;
81 mtx_init(&cache->mutex, mtx_plain);
82 cache->flags = 0;
84 cache->modified = false;
85 cache->kernel_count = 0;
86 cache->total_size = 0;
87 cache->table_size = 1024;
88 const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
89 cache->hash_table = malloc(byte_size);
92 * cache. Disable caching when we want to keep shader debug info, since
94 if (cache->hash_table == NULL || radv_is_cache_disabled(device))
95 cache->table_size = 0;
97 memset(cache->hash_table, 0, byte_size);
101 radv_pipeline_cache_finish(struct radv_pipeline_cache *cache)
103 for (unsigned i = 0; i < cache->table_size; ++i)
104 if (cache->hash_table[i]) {
106 if (cache->hash_table[i]->shaders[j])
107 radv_shader_destroy(cache->device, cache->hash_table[i]->shaders[j]);
109 if (cache->hash_table[i]->slab)
110 radv_pipeline_slab_destroy(cache->device, cache->hash_table[i]->slab);
111 vk_free(&cache->alloc, cache->hash_table[i]);
113 mtx_destroy(&cache->mutex);
114 free(cache->hash_table);
116 vk_object_base_finish(&cache->base);
209 radv_pipeline_cache_search_unlocked(struct radv_pipeline_cache *cache, const unsigned char *sha1)
211 const uint32_t mask = cache->table_size - 1;
214 if (cache->table_size == 0)
217 for (uint32_t i = 0; i < cache->table_size; i++) {
219 struct cache_entry *entry = cache->hash_table[index];
233 radv_pipeline_cache_search(struct radv_pipeline_cache *cache, const unsigned char *sha1)
237 radv_pipeline_cache_lock(cache);
239 entry = radv_pipeline_cache_search_unlocked(cache, sha1);
241 radv_pipeline_cache_unlock(cache);
247 radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache, struct cache_entry *entry)
249 const uint32_t mask = cache->table_size - 1;
253 assert(cache->kernel_count < cache->table_size / 2);
255 for (uint32_t i = 0; i < cache->table_size; i++) {
257 if (!cache->hash_table[index]) {
258 cache->hash_table[index] = entry;
263 cache->total_size += entry_size(entry);
264 cache->kernel_count++;
268 radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
270 const uint32_t table_size = cache->table_size * 2;
271 const uint32_t old_table_size = cache->table_size;
272 const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
274 struct cache_entry **old_table = cache->hash_table;
278 return vk_error(cache, VK_ERROR_OUT_OF_HOST_MEMORY);
280 cache->hash_table = table;
281 cache->table_size = table_size;
282 cache->kernel_count = 0;
283 cache->total_size = 0;
285 memset(cache->hash_table, 0, byte_size);
291 radv_pipeline_cache_set_entry(cache, entry);
300 radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache, struct cache_entry *entry)
302 if (cache->kernel_count == cache->table_size / 2)
303 radv_pipeline_cache_grow(cache);
308 if (cache->kernel_count < cache->table_size / 2)
309 radv_pipeline_cache_set_entry(cache, entry);
314 struct radv_device *device, struct radv_pipeline_cache *cache, const unsigned char *sha1,
321 if (!cache) {
322 cache = device->mem_cache;
326 radv_pipeline_cache_lock(cache);
328 entry = radv_pipeline_cache_search_unlocked(cache, sha1);
333 /* Don't cache when we want debug info, since this isn't
334 * present in the cache.
337 radv_pipeline_cache_unlock(cache);
347 radv_pipeline_cache_unlock(cache);
352 vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
355 radv_pipeline_cache_unlock(cache);
364 cache != device->mem_cache)
365 radv_pipeline_cache_add_entry(cache, new_entry);
392 /* For the GS copy shader, RADV uses the compute shader slot to avoid a new cache entry. */
408 radv_pipeline_cache_unlock(cache);
430 if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE && cache == device->mem_cache)
431 vk_free(&cache->alloc, entry);
440 radv_pipeline_cache_unlock(cache);
445 radv_pipeline_cache_insert_shaders(struct radv_device *device, struct radv_pipeline_cache *cache,
451 if (!cache)
452 cache = device->mem_cache;
454 radv_pipeline_cache_lock(cache);
455 struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
461 radv_shader_destroy(cache->device, pipeline->shaders[i]);
467 radv_pipeline_slab_destroy(cache->device, pipeline->slab);
472 radv_pipeline_cache_unlock(cache);
476 /* Don't cache when we want debug info, since this isn't
477 * present in the cache.
480 radv_pipeline_cache_unlock(cache);
491 entry = vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
493 radv_pipeline_cache_unlock(cache);
523 /* Always add cache items to disk. This will allow collection of
525 * implements its own pipeline cache.
527 * Make sure to exclude meta shaders because they are stored in a different cache file.
529 if (device->physical_device->disk_cache && cache != &device->meta_state.cache) {
537 if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE && cache == device->mem_cache) {
538 vk_free2(&cache->alloc, NULL, entry);
539 radv_pipeline_cache_unlock(cache);
543 /* We delay setting the shader so we have reproducible disk cache
557 radv_pipeline_cache_add_entry(cache, entry);
559 cache->modified = true;
560 radv_pipeline_cache_unlock(cache);
565 radv_pipeline_cache_load(struct radv_pipeline_cache *cache, const void *data, size_t size)
567 struct radv_device *device = cache->device;
594 dest_entry = vk_alloc(&cache->alloc, size_of_entry, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
600 radv_pipeline_cache_add_entry(cache, dest_entry);
613 struct radv_pipeline_cache *cache;
617 cache = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*cache), 8,
619 if (cache == NULL)
623 cache->alloc = *pAllocator;
625 cache->alloc = device->vk.alloc;
627 radv_pipeline_cache_init(cache, device);
628 cache->flags = pCreateInfo->flags;
631 radv_pipeline_cache_load(cache, pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
634 *pPipelineCache = radv_pipeline_cache_to_handle(cache);
644 RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
646 if (!cache)
649 radv_pipeline_cache_finish(cache);
650 vk_free2(&device->vk.alloc, pAllocator, cache);
657 RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
661 radv_pipeline_cache_lock(cache);
663 const size_t size = sizeof(*header) + cache->total_size;
665 radv_pipeline_cache_unlock(cache);
670 radv_pipeline_cache_unlock(cache);
684 for (uint32_t i = 0; i < cache->table_size; i++) {
685 if (!cache->hash_table[i])
687 entry = cache->hash_table[i];
702 radv_pipeline_cache_unlock(cache);