1/*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "vk_pipeline_cache.h"
25
26#include "vk_alloc.h"
27#include "vk_common_entrypoints.h"
28#include "vk_device.h"
29#include "vk_log.h"
30#include "vk_physical_device.h"
31
32#include "compiler/nir/nir_serialize.h"
33
34#include "util/blob.h"
35#include "util/debug.h"
36#include "util/disk_cache.h"
37#include "util/hash_table.h"
38#include "util/set.h"
39
40struct raw_data_object {
41   struct vk_pipeline_cache_object base;
42
43   const void *data;
44   size_t data_size;
45};
46
47static struct raw_data_object *
48raw_data_object_create(struct vk_device *device,
49                       const void *key_data, size_t key_size,
50                       const void *data, size_t data_size);
51
52static bool
53raw_data_object_serialize(struct vk_pipeline_cache_object *object,
54                          struct blob *blob)
55{
56   struct raw_data_object *data_obj =
57      container_of(object, struct raw_data_object, base);
58
59   blob_write_bytes(blob, data_obj->data, data_obj->data_size);
60
61   return true;
62}
63
64static struct vk_pipeline_cache_object *
65raw_data_object_deserialize(struct vk_device *device,
66                            const void *key_data,
67                            size_t key_size,
68                            struct blob_reader *blob)
69{
70   /* We consume the entire blob_reader.  Each call to ops->deserialize()
71    * happens with a brand new blob reader for error checking anyway so we
72    * can assume the blob consumes the entire reader and we don't need to
73    * serialize the data size separately.
74    */
75   assert(blob->current < blob->end);
76   size_t data_size = blob->end - blob->current;
77   const void *data = blob_read_bytes(blob, data_size);
78
79   struct raw_data_object *data_obj =
80      raw_data_object_create(device, key_data, key_size, data, data_size);
81
82   return data_obj ? &data_obj->base : NULL;
83}
84
85static void
86raw_data_object_destroy(struct vk_pipeline_cache_object *object)
87{
88   struct raw_data_object *data_obj =
89      container_of(object, struct raw_data_object, base);
90
91   vk_free(&data_obj->base.device->alloc, data_obj);
92}
93
94static const struct vk_pipeline_cache_object_ops raw_data_object_ops = {
95   .serialize = raw_data_object_serialize,
96   .deserialize = raw_data_object_deserialize,
97   .destroy = raw_data_object_destroy,
98};
99
100static struct raw_data_object *
101raw_data_object_create(struct vk_device *device,
102                       const void *key_data, size_t key_size,
103                       const void *data, size_t data_size)
104{
105   VK_MULTIALLOC(ma);
106   VK_MULTIALLOC_DECL(&ma, struct raw_data_object, data_obj, 1);
107   VK_MULTIALLOC_DECL_SIZE(&ma, char, obj_key_data, key_size);
108   VK_MULTIALLOC_DECL_SIZE(&ma, char, obj_data, data_size);
109
110   if (!vk_multialloc_alloc(&ma, &device->alloc,
111                            VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
112      return NULL;
113
114   vk_pipeline_cache_object_init(device, &data_obj->base,
115                                 &raw_data_object_ops,
116                                 obj_key_data, key_size);
117   data_obj->data = obj_data;
118   data_obj->data_size = data_size;
119
120   memcpy(obj_key_data, key_data, key_size);
121   memcpy(obj_data, data, data_size);
122
123   return data_obj;
124}
125
126static bool
127object_keys_equal(const void *void_a, const void *void_b)
128{
129   const struct vk_pipeline_cache_object *a = void_a, *b = void_b;
130   if (a->key_size != b->key_size)
131      return false;
132
133   return memcmp(a->key_data, b->key_data, a->key_size) == 0;
134}
135
136static uint32_t
137object_key_hash(const void *void_object)
138{
139   const struct vk_pipeline_cache_object *object = void_object;
140   return _mesa_hash_data(object->key_data, object->key_size);
141}
142
143static void
144vk_pipeline_cache_lock(struct vk_pipeline_cache *cache)
145{
146
147   if (!(cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT))
148      simple_mtx_lock(&cache->lock);
149}
150
151static void
152vk_pipeline_cache_unlock(struct vk_pipeline_cache *cache)
153{
154   if (!(cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT))
155      simple_mtx_unlock(&cache->lock);
156}
157
158static void
159vk_pipeline_cache_remove_object(struct vk_pipeline_cache *cache,
160                                uint32_t hash,
161                                struct vk_pipeline_cache_object *object)
162{
163   vk_pipeline_cache_lock(cache);
164   struct set_entry *entry =
165      _mesa_set_search_pre_hashed(cache->object_cache, hash, object);
166   if (entry && entry->key == (const void *)object) {
167      /* Drop the reference owned by the cache */
168      vk_pipeline_cache_object_unref(object);
169
170      _mesa_set_remove(cache->object_cache, entry);
171   }
172   vk_pipeline_cache_unlock(cache);
173
174   /* Drop our reference */
175   vk_pipeline_cache_object_unref(object);
176}
177
178/* Consumes references to both search and replace and produces a reference */
179static struct vk_pipeline_cache_object *
180vk_pipeline_cache_replace_object(struct vk_pipeline_cache *cache,
181                                 uint32_t hash,
182                                 struct vk_pipeline_cache_object *search,
183                                 struct vk_pipeline_cache_object *replace)
184{
185   assert(object_keys_equal(search, replace));
186
187   vk_pipeline_cache_lock(cache);
188   struct set_entry *entry =
189      _mesa_set_search_pre_hashed(cache->object_cache, hash, search);
190
191   struct vk_pipeline_cache_object *found = NULL;
192   if (entry) {
193      if (entry->key == (const void *)search) {
194         /* Drop the reference owned by the cache */
195         vk_pipeline_cache_object_unref(search);
196
197         entry->key = vk_pipeline_cache_object_ref(replace);
198      } else {
199         found = vk_pipeline_cache_object_ref((void *)entry->key);
200      }
201   } else {
202      /* I guess the object was purged?  Re-add it to the cache */
203      vk_pipeline_cache_object_ref(replace);
204      _mesa_set_add_pre_hashed(cache->object_cache, hash, replace);
205   }
206   vk_pipeline_cache_unlock(cache);
207
208   vk_pipeline_cache_object_unref(search);
209
210   if (found) {
211      vk_pipeline_cache_object_unref(replace);
212      return found;
213   } else {
214      return replace;
215   }
216}
217
218static bool
219vk_pipeline_cache_object_serialize(struct vk_pipeline_cache *cache,
220                                   struct vk_pipeline_cache_object *object,
221                                   struct blob *blob, uint32_t *data_size)
222{
223   if (object->ops->serialize == NULL)
224      return false;
225
226   assert(blob->size == align64(blob->size, VK_PIPELINE_CACHE_BLOB_ALIGN));
227   size_t start = blob->size;
228
229   /* Special case for if we're writing to a NULL blob (just to get the size)
230    * and we already know the data size of the allocation.  This should make
231    * the first GetPipelineCacheData() call to get the data size faster in the
232    * common case where a bunch of our objects were loaded from a previous
233    * cache or where we've already serialized the cache once.
234    */
235   if (blob->data == NULL && blob->fixed_allocation) {
236      *data_size = p_atomic_read(&object->data_size);
237      if (*data_size > 0) {
238         blob_write_bytes(blob, NULL, *data_size);
239         return true;
240      }
241   }
242
243   if (!object->ops->serialize(object, blob)) {
244      vk_logw(VK_LOG_OBJS(cache),
245              "Failed to serialize pipeline cache object");
246      return false;
247   }
248
249   size_t size = blob->size - start;
250   if (size > UINT32_MAX) {
251      vk_logw(VK_LOG_OBJS(cache),
252              "Skipping giant (4 GiB or larger) object");
253      return false;
254   }
255
256   if (blob->out_of_memory) {
257      vk_logw(VK_LOG_OBJS(cache),
258              "Insufficient memory for pipeline cache data");
259      return false;
260   }
261
262   *data_size = (uint32_t)size;
263   p_atomic_set(&object->data_size, *data_size);
264
265   return true;
266}
267
268static struct vk_pipeline_cache_object *
269vk_pipeline_cache_object_deserialize(struct vk_pipeline_cache *cache,
270                                     const void *key_data, uint32_t key_size,
271                                     const void *data, size_t data_size,
272                                     const struct vk_pipeline_cache_object_ops *ops)
273{
274   if (ops == NULL)
275      ops = &raw_data_object_ops;
276
277   if (unlikely(ops->deserialize == NULL)) {
278      vk_logw(VK_LOG_OBJS(cache),
279              "Pipeline cache object cannot be deserialized");
280      return NULL;
281   }
282
283   struct blob_reader reader;
284   blob_reader_init(&reader, data, data_size);
285
286   struct vk_pipeline_cache_object *object =
287      ops->deserialize(cache->base.device, key_data, key_size, &reader);
288
289   if (object == NULL) {
290      vk_logw(VK_LOG_OBJS(cache),
291              "Deserializing pipeline cache object failed");
292      return NULL;
293   }
294
295   assert(reader.current == reader.end && !reader.overrun);
296   assert(object->device == cache->base.device);
297   assert(object->ops == ops);
298   assert(object->ref_cnt == 1);
299   assert(object->key_size == key_size);
300   assert(memcmp(object->key_data, key_data, key_size) == 0);
301
302   return object;
303}
304
305struct vk_pipeline_cache_object *
306vk_pipeline_cache_lookup_object(struct vk_pipeline_cache *cache,
307                                const void *key_data, size_t key_size,
308                                const struct vk_pipeline_cache_object_ops *ops,
309                                bool *cache_hit)
310{
311   assert(key_size <= UINT32_MAX);
312   assert(ops != NULL);
313
314   if (cache_hit != NULL)
315      *cache_hit = false;
316
317   struct vk_pipeline_cache_object key = {
318      .key_data = key_data,
319      .key_size = key_size,
320   };
321   uint32_t hash = object_key_hash(&key);
322
323   struct vk_pipeline_cache_object *object = NULL;
324
325   if (cache != NULL && cache->object_cache != NULL) {
326      vk_pipeline_cache_lock(cache);
327      struct set_entry *entry =
328         _mesa_set_search_pre_hashed(cache->object_cache, hash, &key);
329      if (entry) {
330         object = vk_pipeline_cache_object_ref((void *)entry->key);
331         if (cache_hit != NULL)
332            *cache_hit = true;
333      }
334      vk_pipeline_cache_unlock(cache);
335   }
336
337   if (object == NULL) {
338#ifdef ENABLE_SHADER_CACHE
339      struct disk_cache *disk_cache = cache->base.device->physical->disk_cache;
340      if (disk_cache != NULL && cache->object_cache != NULL) {
341         cache_key cache_key;
342         disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
343
344         size_t data_size;
345         uint8_t *data = disk_cache_get(disk_cache, cache_key, &data_size);
346         if (data) {
347            object = vk_pipeline_cache_object_deserialize(cache,
348                                                          key_data, key_size,
349                                                          data, data_size,
350                                                          ops);
351            free(data);
352            if (object != NULL)
353               return vk_pipeline_cache_add_object(cache, object);
354         }
355      }
356#endif
357
358      /* No disk cache or not found in the disk cache */
359      return NULL;
360   }
361
362   if (object->ops == &raw_data_object_ops && ops != &raw_data_object_ops) {
363      /* The object isn't fully formed yet and we need to deserialize it into
364       * a real object before it can be used.
365       */
366      struct raw_data_object *data_obj =
367         container_of(object, struct raw_data_object, base);
368
369      struct vk_pipeline_cache_object *real_object =
370         vk_pipeline_cache_object_deserialize(cache,
371                                              data_obj->base.key_data,
372                                              data_obj->base.key_size,
373                                              data_obj->data,
374                                              data_obj->data_size, ops);
375      if (real_object == NULL) {
376         vk_pipeline_cache_remove_object(cache, hash, object);
377         return NULL;
378      }
379
380      object = vk_pipeline_cache_replace_object(cache, hash, object,
381                                                real_object);
382   }
383
384   assert(object->ops == ops);
385
386   return object;
387}
388
389struct vk_pipeline_cache_object *
390vk_pipeline_cache_add_object(struct vk_pipeline_cache *cache,
391                             struct vk_pipeline_cache_object *object)
392{
393   assert(object->ops != NULL);
394
395   if (cache->object_cache == NULL)
396      return object;
397
398   uint32_t hash = object_key_hash(object);
399
400   vk_pipeline_cache_lock(cache);
401   bool found = false;
402   struct set_entry *entry =
403      _mesa_set_search_or_add_pre_hashed(cache->object_cache,
404                                         hash, object, &found);
405
406   struct vk_pipeline_cache_object *found_object = NULL;
407   if (found) {
408      found_object = vk_pipeline_cache_object_ref((void *)entry->key);
409   } else {
410      /* The cache now owns a reference */
411      vk_pipeline_cache_object_ref(object);
412   }
413   vk_pipeline_cache_unlock(cache);
414
415   if (found) {
416      vk_pipeline_cache_object_unref(object);
417      return found_object;
418   } else {
419      /* If it wasn't in the object cache, it might not be in the disk cache
420       * either.  Better try and add it.
421       */
422
423#ifdef ENABLE_SHADER_CACHE
424      struct disk_cache *disk_cache = cache->base.device->physical->disk_cache;
425      if (object->ops->serialize != NULL && disk_cache) {
426         struct blob blob;
427         blob_init(&blob);
428
429         if (object->ops->serialize(object, &blob) && !blob.out_of_memory) {
430            cache_key cache_key;
431            disk_cache_compute_key(disk_cache, object->key_data,
432                                   object->key_size, cache_key);
433
434            disk_cache_put(disk_cache, cache_key, blob.data, blob.size, NULL);
435         }
436
437         blob_finish(&blob);
438      }
439#endif
440
441      return object;
442   }
443}
444
445nir_shader *
446vk_pipeline_cache_lookup_nir(struct vk_pipeline_cache *cache,
447                             const void *key_data, size_t key_size,
448                             const struct nir_shader_compiler_options *nir_options,
449                             bool *cache_hit, void *mem_ctx)
450{
451   struct vk_pipeline_cache_object *object =
452      vk_pipeline_cache_lookup_object(cache, key_data, key_size,
453                                      &raw_data_object_ops, cache_hit);
454   if (object == NULL)
455      return NULL;
456
457   struct raw_data_object *data_obj =
458      container_of(object, struct raw_data_object, base);
459
460   struct blob_reader blob;
461   blob_reader_init(&blob, data_obj->data, data_obj->data_size);
462
463   nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
464   vk_pipeline_cache_object_unref(object);
465
466   if (blob.overrun) {
467      ralloc_free(nir);
468      return NULL;
469   }
470
471   return nir;
472}
473
474void
475vk_pipeline_cache_add_nir(struct vk_pipeline_cache *cache,
476                          const void *key_data, size_t key_size,
477                          const nir_shader *nir)
478{
479   struct blob blob;
480   blob_init(&blob);
481
482   nir_serialize(&blob, nir, false);
483   if (blob.out_of_memory) {
484      vk_logw(VK_LOG_OBJS(cache), "Ran out of memory serializing NIR shader");
485      blob_finish(&blob);
486      return;
487   }
488
489   struct raw_data_object *data_obj =
490      raw_data_object_create(cache->base.device,
491                             key_data, key_size,
492                             blob.data, blob.size);
493   blob_finish(&blob);
494
495   struct vk_pipeline_cache_object *cached =
496      vk_pipeline_cache_add_object(cache, &data_obj->base);
497   vk_pipeline_cache_object_unref(cached);
498}
499
500static int32_t
501find_type_for_ops(const struct vk_physical_device *pdevice,
502                  const struct vk_pipeline_cache_object_ops *ops)
503{
504   const struct vk_pipeline_cache_object_ops *const *import_ops =
505      pdevice->pipeline_cache_import_ops;
506
507   if (import_ops == NULL)
508      return -1;
509
510   for (int32_t i = 0; import_ops[i]; i++) {
511      if (import_ops[i] == ops)
512         return i;
513   }
514
515   return -1;
516}
517
518static const struct vk_pipeline_cache_object_ops *
519find_ops_for_type(const struct vk_physical_device *pdevice,
520                  int32_t type)
521{
522   const struct vk_pipeline_cache_object_ops *const *import_ops =
523      pdevice->pipeline_cache_import_ops;
524
525   if (import_ops == NULL || type < 0)
526      return NULL;
527
528   return import_ops[type];
529}
530
531static void
532vk_pipeline_cache_load(struct vk_pipeline_cache *cache,
533                       const void *data, size_t size)
534{
535   struct blob_reader blob;
536   blob_reader_init(&blob, data, size);
537
538   struct vk_pipeline_cache_header header;
539   blob_copy_bytes(&blob, &header, sizeof(header));
540   uint32_t count = blob_read_uint32(&blob);
541   if (blob.overrun)
542      return;
543
544   if (memcmp(&header, &cache->header, sizeof(header)) != 0)
545      return;
546
547   for (uint32_t i = 0; i < count; i++) {
548      int32_t type = blob_read_uint32(&blob);
549      uint32_t key_size = blob_read_uint32(&blob);
550      uint32_t data_size = blob_read_uint32(&blob);
551      const void *key_data = blob_read_bytes(&blob, key_size);
552      blob_reader_align(&blob, VK_PIPELINE_CACHE_BLOB_ALIGN);
553      const void *data = blob_read_bytes(&blob, data_size);
554      if (blob.overrun)
555         break;
556
557      const struct vk_pipeline_cache_object_ops *ops =
558         find_ops_for_type(cache->base.device->physical, type);
559
560      struct vk_pipeline_cache_object *object =
561         vk_pipeline_cache_object_deserialize(cache,
562                                              key_data, key_size,
563                                              data, data_size, ops);
564      if (object == NULL)
565         continue;
566
567      object = vk_pipeline_cache_add_object(cache, object);
568      vk_pipeline_cache_object_unref(object);
569   }
570}
571
572struct vk_pipeline_cache *
573vk_pipeline_cache_create(struct vk_device *device,
574                         const struct vk_pipeline_cache_create_info *info,
575                         const VkAllocationCallbacks *pAllocator)
576{
577   static const struct VkPipelineCacheCreateInfo default_create_info = {
578      .sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
579   };
580   struct vk_pipeline_cache *cache;
581
582   const struct VkPipelineCacheCreateInfo *pCreateInfo =
583      info->pCreateInfo != NULL ? info->pCreateInfo : &default_create_info;
584
585   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
586
587   cache = vk_object_zalloc(device, pAllocator, sizeof(*cache),
588                            VK_OBJECT_TYPE_PIPELINE_CACHE);
589   if (cache == NULL)
590      return NULL;
591
592   cache->flags = pCreateInfo->flags;
593
594   struct VkPhysicalDeviceProperties pdevice_props;
595   device->physical->dispatch_table.GetPhysicalDeviceProperties(
596      vk_physical_device_to_handle(device->physical), &pdevice_props);
597
598   cache->header = (struct vk_pipeline_cache_header) {
599      .header_size = sizeof(struct vk_pipeline_cache_header),
600      .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
601      .vendor_id = pdevice_props.vendorID,
602      .device_id = pdevice_props.deviceID,
603   };
604   memcpy(cache->header.uuid, pdevice_props.pipelineCacheUUID, VK_UUID_SIZE);
605
606   simple_mtx_init(&cache->lock, mtx_plain);
607
608   if (info->force_enable ||
609       env_var_as_boolean("VK_ENABLE_PIPELINE_CACHE", true)) {
610      cache->object_cache = _mesa_set_create(NULL, object_key_hash,
611                                             object_keys_equal);
612   }
613
614   if (cache->object_cache && pCreateInfo->initialDataSize > 0) {
615      vk_pipeline_cache_load(cache, pCreateInfo->pInitialData,
616                             pCreateInfo->initialDataSize);
617   }
618
619   return cache;
620}
621
622static void
623object_unref_cb(struct set_entry *entry)
624{
625   vk_pipeline_cache_object_unref((void *)entry->key);
626}
627
628void
629vk_pipeline_cache_destroy(struct vk_pipeline_cache *cache,
630                          const VkAllocationCallbacks *pAllocator)
631{
632   if (cache->object_cache)
633      _mesa_set_destroy(cache->object_cache, object_unref_cb);
634   simple_mtx_destroy(&cache->lock);
635   vk_object_free(cache->base.device, pAllocator, cache);
636}
637
638VKAPI_ATTR VkResult VKAPI_CALL
639vk_common_CreatePipelineCache(VkDevice _device,
640                              const VkPipelineCacheCreateInfo *pCreateInfo,
641                              const VkAllocationCallbacks *pAllocator,
642                              VkPipelineCache *pPipelineCache)
643{
644   VK_FROM_HANDLE(vk_device, device, _device);
645   struct vk_pipeline_cache *cache;
646
647   struct vk_pipeline_cache_create_info info = {
648      .pCreateInfo = pCreateInfo,
649   };
650   cache = vk_pipeline_cache_create(device, &info, pAllocator);
651   if (cache == NULL)
652      return VK_ERROR_OUT_OF_HOST_MEMORY;
653
654   *pPipelineCache = vk_pipeline_cache_to_handle(cache);
655
656   return VK_SUCCESS;
657}
658
659VKAPI_ATTR void VKAPI_CALL
660vk_common_DestroyPipelineCache(VkDevice device,
661                               VkPipelineCache pipelineCache,
662                               const VkAllocationCallbacks *pAllocator)
663{
664   VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
665
666   if (cache == NULL)
667      return;
668
669   assert(cache->base.device == vk_device_from_handle(device));
670   vk_pipeline_cache_destroy(cache, pAllocator);
671}
672
673VKAPI_ATTR VkResult VKAPI_CALL
674vk_common_GetPipelineCacheData(VkDevice _device,
675                               VkPipelineCache pipelineCache,
676                               size_t *pDataSize,
677                               void *pData)
678{
679   VK_FROM_HANDLE(vk_device, device, _device);
680   VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
681
682   struct blob blob;
683   if (pData) {
684      blob_init_fixed(&blob, pData, *pDataSize);
685   } else {
686      blob_init_fixed(&blob, NULL, SIZE_MAX);
687   }
688
689   blob_write_bytes(&blob, &cache->header, sizeof(cache->header));
690
691   uint32_t count = 0;
692   intptr_t count_offset = blob_reserve_uint32(&blob);
693   if (count_offset < 0) {
694      *pDataSize = 0;
695      blob_finish(&blob);
696      return VK_INCOMPLETE;
697   }
698
699   vk_pipeline_cache_lock(cache);
700
701   VkResult result = VK_SUCCESS;
702   if (cache->object_cache != NULL) {
703      set_foreach(cache->object_cache, entry) {
704         struct vk_pipeline_cache_object *object = (void *)entry->key;
705
706         if (object->ops->serialize == NULL)
707            continue;
708
709         size_t blob_size_save = blob.size;
710
711         int32_t type = find_type_for_ops(device->physical, object->ops);
712         blob_write_uint32(&blob, type);
713         blob_write_uint32(&blob, object->key_size);
714         intptr_t data_size_resv = blob_reserve_uint32(&blob);
715         blob_write_bytes(&blob, object->key_data, object->key_size);
716
717         blob_align(&blob, VK_PIPELINE_CACHE_BLOB_ALIGN);
718
719         uint32_t data_size;
720         if (!vk_pipeline_cache_object_serialize(cache, object,
721                                                 &blob, &data_size)) {
722            blob.size = blob_size_save;
723            if (blob.out_of_memory) {
724               result = VK_INCOMPLETE;
725               break;
726            }
727
728            /* Failed for some other reason; keep going */
729            continue;
730         }
731
732         /* vk_pipeline_cache_object_serialize should have failed */
733         assert(!blob.out_of_memory);
734
735         assert(data_size_resv >= 0);
736         blob_overwrite_uint32(&blob, data_size_resv, data_size);
737      }
738   }
739
740   vk_pipeline_cache_unlock(cache);
741
742   blob_overwrite_uint32(&blob, count_offset, count);
743
744   *pDataSize = blob.size;
745
746   blob_finish(&blob);
747
748   return result;
749}
750
751VKAPI_ATTR VkResult VKAPI_CALL
752vk_common_MergePipelineCaches(VkDevice device,
753                              VkPipelineCache dstCache,
754                              uint32_t srcCacheCount,
755                              const VkPipelineCache *pSrcCaches)
756{
757   VK_FROM_HANDLE(vk_pipeline_cache, dst, dstCache);
758
759   if (!dst->object_cache)
760      return VK_SUCCESS;
761
762   vk_pipeline_cache_lock(dst);
763
764   for (uint32_t i = 0; i < srcCacheCount; i++) {
765      VK_FROM_HANDLE(vk_pipeline_cache, src, pSrcCaches[i]);
766
767      if (!src->object_cache)
768         continue;
769
770      assert(src != dst);
771      if (src == dst)
772         continue;
773
774      vk_pipeline_cache_lock(src);
775
776      set_foreach(src->object_cache, src_entry) {
777         struct vk_pipeline_cache_object *src_object = (void *)src_entry->key;
778
779         bool found_in_dst = false;
780         struct set_entry *dst_entry =
781            _mesa_set_search_or_add_pre_hashed(dst->object_cache,
782                                               src_entry->hash,
783                                               src_object, &found_in_dst);
784         if (found_in_dst) {
785            struct vk_pipeline_cache_object *dst_object = (void *)dst_entry->key;
786            if (dst_object->ops == &raw_data_object_ops &&
787                src_object->ops != &raw_data_object_ops) {
788               /* Even though dst has the object, it only has the blob version
789                * which isn't as useful.  Replace it with the real object.
790                */
791               vk_pipeline_cache_object_unref(dst_object);
792               dst_entry->key = vk_pipeline_cache_object_ref(src_object);
793            }
794         } else {
795            /* We inserted src_object in dst so it needs a reference */
796            assert(dst_entry->key == (const void *)src_object);
797            vk_pipeline_cache_object_ref(src_object);
798         }
799      }
800
801      vk_pipeline_cache_unlock(src);
802   }
803
804   vk_pipeline_cache_unlock(dst);
805
806   return VK_SUCCESS;
807}
808