Lines Matching defs:entry

120 entry_size(struct cache_entry *entry)
122 size_t ret = sizeof(*entry);
124 if (entry->binary_sizes[i])
125 ret += entry->binary_sizes[i];
126 ret += sizeof(struct radv_pipeline_shader_stack_size) * entry->num_stack_sizes;
219 struct cache_entry *entry = cache->hash_table[index];
221 if (!entry)
224 if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
225 return entry;
235 struct cache_entry *entry;
239 entry = radv_pipeline_cache_search_unlocked(cache, sha1);
243 return entry;
247 radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache, struct cache_entry *entry)
250 const uint32_t start = entry->sha1_dw[0];
258 cache->hash_table[index] = entry;
263 cache->total_size += entry_size(entry);
287 struct cache_entry *entry = old_table[i];
288 if (!entry)
291 radv_pipeline_cache_set_entry(cache, entry);
300 radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache, struct cache_entry *entry)
309 radv_pipeline_cache_set_entry(cache, entry);
318 struct cache_entry *entry;
328 entry = radv_pipeline_cache_search_unlocked(cache, sha1);
330 if (!entry) {
344 entry =
346 if (!entry) {
350 size_t size = entry_size(entry);
354 free(entry);
359 memcpy(new_entry, entry, entry_size(entry));
360 free(entry);
361 entry = new_entry;
372 char *p = entry->code;
374 if (!entry->shaders[i] && entry->binary_sizes[i]) {
375 struct radv_shader_binary *binary = calloc(1, entry->binary_sizes[i]);
376 memcpy(binary, p, entry->binary_sizes[i]);
377 p += entry->binary_sizes[i];
379 entry->shaders[i] = radv_shader_create(device, binary, false, true, NULL);
383 } else if (entry->binary_sizes[i]) {
384 p += entry->binary_sizes[i];
388 memcpy(pipeline->shaders, entry->shaders, sizeof(entry->shaders));
392 /* For the GS copy shader, RADV uses the compute shader slot to avoid a new cache entry. */
412 entry->slab = pipeline->slab;
414 pipeline->slab = entry->slab;
419 *num_stack_sizes = entry->num_stack_sizes;
420 if (entry->num_stack_sizes) {
421 *stack_sizes = malloc(entry->num_stack_sizes * sizeof(**stack_sizes));
422 memcpy(*stack_sizes, p, entry->num_stack_sizes * sizeof(**stack_sizes));
425 assert(!entry->num_stack_sizes);
428 p += entry->num_stack_sizes * sizeof(**stack_sizes);
431 vk_free(&cache->alloc, entry);
434 if (entry->shaders[i])
435 p_atomic_inc(&entry->shaders[i]->ref_count);
436 p_atomic_inc(&entry->slab->ref_count);
439 assert((uintptr_t)p <= (uintptr_t)entry + entry_size(entry));
455 struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
456 if (entry) {
458 if (!entry->shaders[i])
463 pipeline->shaders[i] = entry->shaders[i];
469 pipeline->slab = entry->slab;
484 size_t size = sizeof(*entry) + sizeof(*stack_sizes) * num_stack_sizes;
491 entry = vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
492 if (!entry) {
497 memset(entry, 0, sizeof(*entry));
498 memcpy(entry->sha1, sha1, 20);
500 char *p = entry->code;
506 entry->binary_sizes[i] = binaries[i]->total_size;
516 entry->num_stack_sizes = num_stack_sizes;
519 assert(p == (char *)entry + size_without_align);
520 assert(sizeof(*entry) + (p - entry->code) == size_without_align);
521 memset((char *)entry + size_without_align, 0, size - size_without_align);
533 disk_cache_put(device->physical_device->disk_cache, disk_sha1, entry, entry_size(entry),
538 vk_free2(&cache->alloc, NULL, entry);
550 entry->shaders[i] = pipeline->shaders[i];
554 entry->slab = pipeline->slab;
557 radv_pipeline_cache_add_entry(cache, entry);
588 struct cache_entry *entry = (struct cache_entry *)p;
590 size_t size_of_entry = entry_size(entry);
596 memcpy(dest_entry, entry, size_of_entry);
683 struct cache_entry *entry;
687 entry = cache->hash_table[i];
688 const uint32_t size_of_entry = entry_size(entry);
694 memcpy(p, entry, size_of_entry);
710 struct cache_entry *entry = src->hash_table[i];
711 if (!entry || radv_pipeline_cache_search(dst, entry->sha1))
714 radv_pipeline_cache_add_entry(dst, entry);