1 /*
2  *
3  * Copyright (c) 2014-2023 The Khronos Group Inc.
4  * Copyright (c) 2014-2023 Valve Corporation
5  * Copyright (c) 2014-2023 LunarG, Inc.
6  * Copyright (C) 2015 Google Inc.
7  * Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
8  * Copyright (c) 2023-2023 RasterGrid Kft.
9  *
10  * Licensed under the Apache License, Version 2.0 (the "License");
11  * you may not use this file except in compliance with the License.
12  * You may obtain a copy of the License at
13  *
14  *     http://www.apache.org/licenses/LICENSE-2.0
15  *
16  * Unless required by applicable law or agreed to in writing, software
17  * distributed under the License is distributed on an "AS IS" BASIS,
18  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19  * See the License for the specific language governing permissions and
20  * limitations under the License.
21 
22  *
23  * Author: Jon Ashburn <jon@lunarg.com>
24  * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
25  * Author: Mark Young <marky@lunarg.com>
26  * Author: Lenny Komow <lenny@lunarg.com>
27  * Author: Charles Giessen <charles@lunarg.com>
28  *
29  */
30 
31 #include "loader.h"
32 
33 #include <ctype.h>
34 #include <inttypes.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdarg.h>
38 #include <stdbool.h>
39 #include <string.h>
40 #include <stddef.h>
41 
42 #if defined(__APPLE__)
43 #include <CoreFoundation/CoreFoundation.h>
44 #include <sys/param.h>
45 #endif
46 
47 #include <sys/types.h>
48 #if defined(_WIN32)
49 #include "dirent_on_windows.h"
50 #elif COMMON_UNIX_PLATFORMS
51 #include <dirent.h>
52 #else
53 #warning dirent.h not available on this platform
54 #endif  // _WIN32
55 
56 #include "allocation.h"
57 #include "cJSON.h"
58 #include "debug_utils.h"
59 #include "loader_environment.h"
60 #include "gpa_helper.h"
61 #include "log.h"
62 #include "unknown_function_handling.h"
63 #include "vk_loader_platform.h"
64 #include "wsi.h"
65 
66 #if defined(WIN32)
67 #include "loader_windows.h"
68 #endif
69 #if defined(LOADER_ENABLE_LINUX_SORT)
70 // This header is currently only used when sorting Linux devices, so don't include it otherwise.
71 #include "loader_linux.h"
72 #endif  // LOADER_ENABLE_LINUX_SORT
73 
74 // Generated file containing all the extension data
75 #include "vk_loader_extensions.c"
76 
77 struct loader_struct loader = {0};
78 
79 struct activated_layer_info {
80     char *name;
81     char *manifest;
82     char *library;
83     bool is_implicit;
84     char *disable_env;
85 };
86 
87 // thread safety lock for accessing global data structures such as "loader"
88 // all entrypoints on the instance chain need to be locked except GPA
89 // additionally CreateDevice and DestroyDevice needs to be locked
90 loader_platform_thread_mutex loader_lock;
91 loader_platform_thread_mutex loader_preload_icd_lock;
92 loader_platform_thread_mutex loader_global_instance_list_lock;
93 
94 // A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything
95 // other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change
96 // functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up
97 // significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and
98 // vkCreateInstance.
99 struct loader_icd_tramp_list scanned_icds;
100 
101 // controls whether loader_platform_close_library() closes the libraries or not - controlled by an environment
102 // variables - this is just the definition of the variable, usage is in vk_loader_platform.h
103 bool loader_disable_dynamic_library_unloading;
104 
105 LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init);
106 
107 // Creates loader_api_version struct that contains the major and minor fields, setting patch to 0
loader_make_version(uint32_t version)108 loader_api_version loader_make_version(uint32_t version) {
109     loader_api_version out_version;
110     out_version.major = VK_API_VERSION_MAJOR(version);
111     out_version.minor = VK_API_VERSION_MINOR(version);
112     out_version.patch = 0;
113     return out_version;
114 }
115 
116 // Creates loader_api_version struct containing the major, minor, and patch fields
loader_make_full_version(uint32_t version)117 loader_api_version loader_make_full_version(uint32_t version) {
118     loader_api_version out_version;
119     out_version.major = VK_API_VERSION_MAJOR(version);
120     out_version.minor = VK_API_VERSION_MINOR(version);
121     out_version.patch = VK_API_VERSION_PATCH(version);
122     return out_version;
123 }
124 
loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch)125 loader_api_version loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch) {
126     loader_api_version out_version;
127     out_version.major = (uint16_t)major;
128     out_version.minor = (uint16_t)minor;
129     out_version.patch = (uint16_t)patch;
130     return out_version;
131 }
132 
133 // Helper macros for determining if a version is valid or not
loader_check_version_meets_required(loader_api_version required, loader_api_version version)134 bool loader_check_version_meets_required(loader_api_version required, loader_api_version version) {
135     // major version is satisfied
136     return (version.major > required.major) ||
137            // major version is equal, minor version is patch version is greater to minimum minor
138            (version.major == required.major && version.minor > required.minor) ||
139            // major and minor version are equal, patch version is greater or equal to minimum patch
140            (version.major == required.major && version.minor == required.minor && version.patch >= required.patch);
141 }
142 
143 // Wrapper around opendir so that the dirent_on_windows gets the instance it needs
144 // while linux opendir & readdir does not
loader_opendir(const struct loader_instance *instance, const char *name)145 DIR *loader_opendir(const struct loader_instance *instance, const char *name) {
146 #if defined(_WIN32)
147     return opendir(instance ? &instance->alloc_callbacks : NULL, name);
148 #elif COMMON_UNIX_PLATFORMS
149     (void)instance;
150     return opendir(name);
151 #else
152 #warning dirent.h - opendir not available on this platform
153 #endif  // _WIN32
154 }
loader_closedir(const struct loader_instance *instance, DIR *dir)155 int loader_closedir(const struct loader_instance *instance, DIR *dir) {
156 #if defined(_WIN32)
157     return closedir(instance ? &instance->alloc_callbacks : NULL, dir);
158 #elif COMMON_UNIX_PLATFORMS
159     (void)instance;
160     return closedir(dir);
161 #else
162 #warning dirent.h - closedir not available on this platform
163 #endif  // _WIN32
164 }
165 
is_json(const char *path, size_t len)166 bool is_json(const char *path, size_t len) {
167     if (len < 5) {
168         return false;
169     }
170     return !strncmp(path, ".json", 5);
171 }
172 
173 // Handle error from to library loading
loader_handle_load_library_error(const struct loader_instance *inst, const char *filename, enum loader_layer_library_status *lib_status)174 void loader_handle_load_library_error(const struct loader_instance *inst, const char *filename,
175                                       enum loader_layer_library_status *lib_status) {
176     const char *error_message = loader_platform_open_library_error(filename);
177     // If the error is due to incompatible architecture (eg 32 bit vs 64 bit), report it with INFO level
178     // Discussed in Github issue 262 & 644
179     // "wrong ELF class" is a linux error, " with error 193" is a windows error
180     VkFlags err_flag = VULKAN_LOADER_ERROR_BIT;
181     if (strstr(error_message, "wrong ELF class:") != NULL || strstr(error_message, " with error 193") != NULL) {
182         err_flag = VULKAN_LOADER_INFO_BIT;
183         if (NULL != lib_status) {
184             *lib_status = LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE;
185         }
186     }
187     // Check if the error is due to lack of memory
188     // "with error 8" is the windows error code for OOM cases, aka ERROR_NOT_ENOUGH_MEMORY
189     // Linux doesn't have such a nice error message - only if there are reported issues should this be called
190     else if (strstr(error_message, " with error 8") != NULL) {
191         if (NULL != lib_status) {
192             *lib_status = LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY;
193         }
194     } else if (NULL != lib_status) {
195         *lib_status = LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD;
196     }
197     loader_log(inst, err_flag, 0, error_message);
198 }
199 
vkSetInstanceDispatch(VkInstance instance, void *object)200 VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) {
201     struct loader_instance *inst = loader_get_instance(instance);
202     if (!inst) {
203         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkSetInstanceDispatch: Can not retrieve Instance dispatch table.");
204         return VK_ERROR_INITIALIZATION_FAILED;
205     }
206     loader_set_dispatch(object, inst->disp);
207     return VK_SUCCESS;
208 }
209 
vkSetDeviceDispatch(VkDevice device, void *object)210 VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) {
211     struct loader_device *dev;
212     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
213 
214     if (NULL == icd_term || NULL == dev) {
215         return VK_ERROR_INITIALIZATION_FAILED;
216     }
217     loader_set_dispatch(object, &dev->loader_dispatch);
218     return VK_SUCCESS;
219 }
220 
loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties)221 void loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties) {
222     loader_instance_heap_free(inst, layer_properties->manifest_file_name);
223     loader_instance_heap_free(inst, layer_properties->lib_name);
224     loader_instance_heap_free(inst, layer_properties->functions.str_gipa);
225     loader_instance_heap_free(inst, layer_properties->functions.str_gdpa);
226     loader_instance_heap_free(inst, layer_properties->functions.str_negotiate_interface);
227     loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->instance_extension_list);
228     if (layer_properties->device_extension_list.capacity > 0 && NULL != layer_properties->device_extension_list.list) {
229         for (uint32_t i = 0; i < layer_properties->device_extension_list.count; i++) {
230             free_string_list(inst, &layer_properties->device_extension_list.list[i].entrypoints);
231         }
232     }
233     loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->device_extension_list);
234     loader_instance_heap_free(inst, layer_properties->disable_env_var.name);
235     loader_instance_heap_free(inst, layer_properties->disable_env_var.value);
236     loader_instance_heap_free(inst, layer_properties->enable_env_var.name);
237     loader_instance_heap_free(inst, layer_properties->enable_env_var.value);
238     free_string_list(inst, &layer_properties->component_layer_names);
239     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_extension_properties);
240     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_layer_properties);
241     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_version);
242     free_string_list(inst, &layer_properties->override_paths);
243     free_string_list(inst, &layer_properties->blacklist_layer_names);
244     free_string_list(inst, &layer_properties->app_key_paths);
245 
246     // Make sure to clear out the removed layer, in case new layers are added in the previous location
247     memset(layer_properties, 0, sizeof(struct loader_layer_properties));
248 }
249 
loader_init_library_list(struct loader_layer_list *instance_layers, loader_platform_dl_handle **libs)250 VkResult loader_init_library_list(struct loader_layer_list *instance_layers, loader_platform_dl_handle **libs) {
251     if (instance_layers->count > 0) {
252         *libs = loader_calloc(NULL, sizeof(loader_platform_dl_handle) * instance_layers->count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
253         if (*libs == NULL) {
254             return VK_ERROR_OUT_OF_HOST_MEMORY;
255         }
256     }
257     return VK_SUCCESS;
258 }
259 
loader_copy_to_new_str(const struct loader_instance *inst, const char *source_str, char **dest_str)260 VkResult loader_copy_to_new_str(const struct loader_instance *inst, const char *source_str, char **dest_str) {
261     assert(source_str && dest_str);
262     size_t str_len = strlen(source_str) + 1;
263     *dest_str = loader_instance_heap_calloc(inst, str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
264     if (NULL == *dest_str) return VK_ERROR_OUT_OF_HOST_MEMORY;
265     loader_strncpy(*dest_str, str_len, source_str, str_len);
266     (*dest_str)[str_len - 1] = 0;
267     return VK_SUCCESS;
268 }
269 
create_string_list(const struct loader_instance *inst, uint32_t allocated_count, struct loader_string_list *string_list)270 VkResult create_string_list(const struct loader_instance *inst, uint32_t allocated_count, struct loader_string_list *string_list) {
271     assert(string_list);
272     string_list->list = loader_instance_heap_calloc(inst, sizeof(char *) * allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
273     if (NULL == string_list->list) {
274         return VK_ERROR_OUT_OF_HOST_MEMORY;
275     }
276     string_list->allocated_count = allocated_count;
277     string_list->count = 0;
278     return VK_SUCCESS;
279 }
280 
append_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str)281 VkResult append_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str) {
282     assert(string_list && str);
283     if (string_list->allocated_count == 0) {
284         string_list->allocated_count = 32;
285         string_list->list =
286             loader_instance_heap_calloc(inst, sizeof(char *) * string_list->allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
287         if (NULL == string_list->list) {
288             return VK_ERROR_OUT_OF_HOST_MEMORY;
289         }
290     } else if (string_list->count + 1 > string_list->allocated_count) {
291         uint32_t new_allocated_count = string_list->allocated_count * 2;
292         string_list->list = loader_instance_heap_realloc(inst, string_list->list, sizeof(char *) * string_list->allocated_count,
293                                                          sizeof(char *) * new_allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
294         if (NULL == string_list->list) {
295             return VK_ERROR_OUT_OF_HOST_MEMORY;
296         }
297         // Null out the new space
298         memset(string_list->list + string_list->allocated_count, 0, string_list->allocated_count);
299         string_list->allocated_count *= 2;
300     }
301     string_list->list[string_list->count++] = str;
302     return VK_SUCCESS;
303 }
304 
copy_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, const char *str, size_t str_len)305 VkResult copy_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, const char *str,
306                                  size_t str_len) {
307     assert(string_list && str);
308     char *new_str = loader_instance_heap_calloc(inst, sizeof(char *) * str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
309     if (NULL == new_str) {
310         return VK_ERROR_OUT_OF_HOST_MEMORY;
311     }
312     loader_strncpy(new_str, sizeof(char *) * str_len + 1, str, str_len);
313     new_str[str_len] = '\0';
314     VkResult res = append_str_to_string_list(inst, string_list, new_str);
315     if (res != VK_SUCCESS) {
316         // Cleanup new_str if the append failed - as append_str_to_string_list takes ownership but not if the function fails
317         loader_instance_heap_free(inst, new_str);
318     }
319     return res;
320 }
321 
free_string_list(const struct loader_instance *inst, struct loader_string_list *string_list)322 void free_string_list(const struct loader_instance *inst, struct loader_string_list *string_list) {
323     assert(string_list);
324     if (string_list->list) {
325         for (uint32_t i = 0; i < string_list->count; i++) {
326             loader_instance_heap_free(inst, string_list->list[i]);
327             string_list->list[i] = NULL;
328         }
329         loader_instance_heap_free(inst, string_list->list);
330     }
331     memset(string_list, 0, sizeof(struct loader_string_list));
332 }
333 
334 // Given string of three part form "maj.min.pat" convert to a vulkan version number.
335 // Also can understand four part form "variant.major.minor.patch" if provided.
loader_parse_version_string(char *vers_str)336 uint32_t loader_parse_version_string(char *vers_str) {
337     uint32_t variant = 0, major = 0, minor = 0, patch = 0;
338     char *vers_tok;
339     char *context = NULL;
340     if (!vers_str) {
341         return 0;
342     }
343 
344     vers_tok = thread_safe_strtok(vers_str, ".\"\n\r", &context);
345     if (NULL != vers_tok) {
346         major = (uint16_t)atoi(vers_tok);
347         vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
348         if (NULL != vers_tok) {
349             minor = (uint16_t)atoi(vers_tok);
350             vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
351             if (NULL != vers_tok) {
352                 patch = (uint16_t)atoi(vers_tok);
353                 vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
354                 // check that we are using a 4 part version string
355                 if (NULL != vers_tok) {
356                     // if we are, move the values over into the correct place
357                     variant = major;
358                     major = minor;
359                     minor = patch;
360                     patch = (uint16_t)atoi(vers_tok);
361                 }
362             }
363         }
364     }
365 
366     return VK_MAKE_API_VERSION(variant, major, minor, patch);
367 }
368 
compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2)369 bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) {
370     return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false;
371 }
372 
373 // Search the given ext_array for an extension matching the given vk_ext_prop
has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count, const VkExtensionProperties *ext_array)374 bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count,
375                                      const VkExtensionProperties *ext_array) {
376     for (uint32_t i = 0; i < count; i++) {
377         if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true;
378     }
379     return false;
380 }
381 
382 // Search the given ext_list for an extension matching the given vk_ext_prop
has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list)383 bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) {
384     for (uint32_t i = 0; i < ext_list->count; i++) {
385         if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true;
386     }
387     return false;
388 }
389 
390 // Search the given ext_list for a device extension matching the given ext_prop
has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list)391 bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) {
392     for (uint32_t i = 0; i < ext_list->count; i++) {
393         if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true;
394     }
395     return false;
396 }
397 
loader_append_layer_property(const struct loader_instance *inst, struct loader_layer_list *layer_list, struct loader_layer_properties *layer_property)398 VkResult loader_append_layer_property(const struct loader_instance *inst, struct loader_layer_list *layer_list,
399                                       struct loader_layer_properties *layer_property) {
400     VkResult res = VK_SUCCESS;
401     if (layer_list->capacity == 0) {
402         res = loader_init_generic_list(inst, (struct loader_generic_list *)layer_list, sizeof(struct loader_layer_properties));
403         if (VK_SUCCESS != res) {
404             goto out;
405         }
406     }
407 
408     // Ensure enough room to add an entry
409     if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) {
410         void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2,
411                                                      VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
412         if (NULL == new_ptr) {
413             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_append_layer_property: realloc failed for layer list");
414             res = VK_ERROR_OUT_OF_HOST_MEMORY;
415             goto out;
416         }
417         layer_list->list = new_ptr;
418         memset((uint8_t *)layer_list->list + layer_list->capacity, 0, layer_list->capacity);
419         layer_list->capacity *= 2;
420     }
421     memcpy(&layer_list->list[layer_list->count], layer_property, sizeof(struct loader_layer_properties));
422     layer_list->count++;
423     memset(layer_property, 0, sizeof(struct loader_layer_properties));
424 out:
425     if (res != VK_SUCCESS) {
426         loader_free_layer_properties(inst, layer_property);
427     }
428     return res;
429 }
430 
431 // Search the given layer list for a layer property matching the given layer name
loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list)432 struct loader_layer_properties *loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list) {
433     for (uint32_t i = 0; i < layer_list->count; i++) {
434         const VkLayerProperties *item = &layer_list->list[i].info;
435         if (strcmp(name, item->layerName) == 0) return &layer_list->list[i];
436     }
437     return NULL;
438 }
439 
loader_find_pointer_layer_property(const char *name, const struct loader_pointer_layer_list *layer_list)440 struct loader_layer_properties *loader_find_pointer_layer_property(const char *name,
441                                                                    const struct loader_pointer_layer_list *layer_list) {
442     for (uint32_t i = 0; i < layer_list->count; i++) {
443         const VkLayerProperties *item = &layer_list->list[i]->info;
444         if (strcmp(name, item->layerName) == 0) return layer_list->list[i];
445     }
446     return NULL;
447 }
448 
449 // Search the given layer list for a layer matching the given layer name
loader_find_layer_name_in_list(const char *name, const struct loader_pointer_layer_list *layer_list)450 bool loader_find_layer_name_in_list(const char *name, const struct loader_pointer_layer_list *layer_list) {
451     if (NULL == layer_list) {
452         return false;
453     }
454     if (NULL != loader_find_pointer_layer_property(name, layer_list)) {
455         return true;
456     }
457     return false;
458 }
459 
460 // Search the given meta-layer's component list for a layer matching the given layer name
loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name, struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props)461 bool loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name,
462                                           struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) {
463     for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->component_layer_names.count; comp_layer++) {
464         if (!strcmp(meta_layer_props->component_layer_names.list[comp_layer], layer_name)) {
465             return true;
466         }
467         struct loader_layer_properties *comp_layer_props =
468             loader_find_layer_property(meta_layer_props->component_layer_names.list[comp_layer], layer_list);
469         if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
470             return loader_find_layer_name_in_meta_layer(inst, layer_name, layer_list, comp_layer_props);
471         }
472     }
473     return false;
474 }
475 
476 // Search the override layer's blacklist for a layer matching the given layer name
loader_find_layer_name_in_blacklist(const char *layer_name, struct loader_layer_properties *meta_layer_props)477 bool loader_find_layer_name_in_blacklist(const char *layer_name, struct loader_layer_properties *meta_layer_props) {
478     for (uint32_t black_layer = 0; black_layer < meta_layer_props->blacklist_layer_names.count; ++black_layer) {
479         if (!strcmp(meta_layer_props->blacklist_layer_names.list[black_layer], layer_name)) {
480             return true;
481         }
482     }
483     return false;
484 }
485 
486 // Remove all layer properties entries from the list
loader_delete_layer_list_and_properties(const struct loader_instance *inst, struct loader_layer_list *layer_list)487 void loader_delete_layer_list_and_properties(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
488     uint32_t i;
489     if (!layer_list) return;
490 
491     for (i = 0; i < layer_list->count; i++) {
492         if (layer_list->list[i].lib_handle) {
493             loader_platform_close_library(layer_list->list[i].lib_handle);
494             loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Unloading layer library %s",
495                        layer_list->list[i].lib_name);
496             layer_list->list[i].lib_handle = NULL;
497         }
498         loader_free_layer_properties(inst, &(layer_list->list[i]));
499     }
500     layer_list->count = 0;
501 
502     if (layer_list->capacity > 0) {
503         layer_list->capacity = 0;
504         loader_instance_heap_free(inst, layer_list->list);
505     }
506     memset(layer_list, 0, sizeof(struct loader_layer_list));
507 }
508 
loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list, uint32_t layer_to_remove)509 void loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list,
510                                  uint32_t layer_to_remove) {
511     if (layer_list == NULL || layer_to_remove >= layer_list->count) {
512         return;
513     }
514     loader_free_layer_properties(inst, &(layer_list->list[layer_to_remove]));
515 
516     // Remove the current invalid meta-layer from the layer list.  Use memmove since we are
517     // overlapping the source and destination addresses.
518     memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1],
519             sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove));
520 
521     // Decrement the count (because we now have one less) and decrement the loop index since we need to
522     // re-check this index.
523     layer_list->count--;
524 }
525 
526 // Remove all layers in the layer list that are blacklisted by the override layer.
527 // NOTE: This should only be called if an override layer is found and not expired.
loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list)528 void loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
529     struct loader_layer_properties *override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
530     if (NULL == override_prop) {
531         return;
532     }
533 
534     for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) {
535         struct loader_layer_properties cur_layer_prop = layer_list->list[j];
536         const char *cur_layer_name = &cur_layer_prop.info.layerName[0];
537 
538         // Skip the override layer itself.
539         if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) {
540             continue;
541         }
542 
543         // If found in the override layer's blacklist, remove it
544         if (loader_find_layer_name_in_blacklist(cur_layer_name, override_prop)) {
545             loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
546                        "loader_remove_layers_in_blacklist: Override layer is active and layer %s is in the blacklist inside of it. "
547                        "Removing that layer from current layer list.",
548                        cur_layer_name);
549             loader_remove_layer_in_list(inst, layer_list, j);
550             j--;
551 
552             // Re-do the query for the override layer
553             override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
554         }
555     }
556 }
557 
558 // Remove all layers in the layer list that are not found inside any implicit meta-layers.
loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list)559 void loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
560     int32_t i;
561     int32_t j;
562     int32_t layer_count = (int32_t)(layer_list->count);
563 
564     for (i = 0; i < layer_count; i++) {
565         layer_list->list[i].keep = false;
566     }
567 
568     for (i = 0; i < layer_count; i++) {
569         struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
570 
571         if (0 == (cur_layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
572             cur_layer_prop->keep = true;
573             continue;
574         }
575         for (j = 0; j < layer_count; j++) {
576             struct loader_layer_properties *layer_to_check = &layer_list->list[j];
577 
578             if (i == j) {
579                 continue;
580             }
581 
582             if (layer_to_check->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
583                 // For all layers found in this meta layer, we want to keep them as well.
584                 if (loader_find_layer_name_in_meta_layer(inst, cur_layer_prop->info.layerName, layer_list, layer_to_check)) {
585                     cur_layer_prop->keep = true;
586                 }
587             }
588         }
589     }
590 
591     // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be
592     // dynamically updated if we delete a layer property in the list).
593     for (i = 0; i < (int32_t)(layer_list->count); i++) {
594         struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
595         if (!cur_layer_prop->keep) {
596             loader_log(
597                 inst, VULKAN_LOADER_DEBUG_BIT, 0,
598                 "loader_remove_layers_not_in_implicit_meta_layers : Implicit meta-layers are active, and layer %s is not list "
599                 "inside of any.  So removing layer from current layer list.",
600                 cur_layer_prop->info.layerName);
601             loader_remove_layer_in_list(inst, layer_list, i);
602             i--;
603         }
604     }
605 }
606 
loader_add_instance_extensions(const struct loader_instance *inst, const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name, struct loader_extension_list *ext_list)607 VkResult loader_add_instance_extensions(const struct loader_instance *inst,
608                                         const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name,
609                                         struct loader_extension_list *ext_list) {
610     uint32_t i, count = 0;
611     VkExtensionProperties *ext_props;
612     VkResult res = VK_SUCCESS;
613 
614     if (!fp_get_props) {
615         // No EnumerateInstanceExtensionProperties defined
616         goto out;
617     }
618 
619     // Make sure we never call ourself by accident, this should never happen outside of error paths
620     if (fp_get_props == vkEnumerateInstanceExtensionProperties) {
621         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
622                    "loader_add_instance_extensions: %s's vkEnumerateInstanceExtensionProperties points to the loader, this would "
623                    "lead to infinite recursion.",
624                    lib_name);
625         goto out;
626     }
627 
628     res = fp_get_props(NULL, &count, NULL);
629     if (res != VK_SUCCESS) {
630         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
631                    "loader_add_instance_extensions: Error getting Instance extension count from %s", lib_name);
632         goto out;
633     }
634 
635     if (count == 0) {
636         // No ExtensionProperties to report
637         goto out;
638     }
639 
640     ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
641     if (NULL == ext_props) {
642         res = VK_ERROR_OUT_OF_HOST_MEMORY;
643         goto out;
644     }
645 
646     res = fp_get_props(NULL, &count, ext_props);
647     if (res != VK_SUCCESS) {
648         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_instance_extensions: Error getting Instance extensions from %s",
649                    lib_name);
650         goto out;
651     }
652 
653     for (i = 0; i < count; i++) {
654         bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]);
655         if (!ext_unsupported) {
656             res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
657             if (res != VK_SUCCESS) {
658                 goto out;
659             }
660         }
661     }
662 
663 out:
664     return res;
665 }
666 
loader_add_device_extensions(const struct loader_instance *inst, PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties, VkPhysicalDevice physical_device, const char *lib_name, struct loader_extension_list *ext_list)667 VkResult loader_add_device_extensions(const struct loader_instance *inst,
668                                       PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,
669                                       VkPhysicalDevice physical_device, const char *lib_name,
670                                       struct loader_extension_list *ext_list) {
671     uint32_t i = 0, count = 0;
672     VkResult res = VK_SUCCESS;
673     VkExtensionProperties *ext_props = NULL;
674 
675     res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL);
676     if (res != VK_SUCCESS) {
677         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
678                    "loader_add_device_extensions: Error getting physical device extension info count from library %s", lib_name);
679         return res;
680     }
681     if (count > 0) {
682         ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
683         if (!ext_props) {
684             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
685                        "loader_add_device_extensions: Failed to allocate space for device extension properties from library %s.",
686                        lib_name);
687             return VK_ERROR_OUT_OF_HOST_MEMORY;
688         }
689         res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props);
690         if (res != VK_SUCCESS) {
691             return res;
692         }
693         for (i = 0; i < count; i++) {
694             res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
695             if (res != VK_SUCCESS) {
696                 return res;
697             }
698         }
699     }
700 
701     return VK_SUCCESS;
702 }
703 
loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size)704 VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) {
705     size_t capacity = 32 * element_size;
706     list_info->count = 0;
707     list_info->capacity = 0;
708     list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
709     if (list_info->list == NULL) {
710         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list");
711         return VK_ERROR_OUT_OF_HOST_MEMORY;
712     }
713     list_info->capacity = capacity;
714     return VK_SUCCESS;
715 }
716 
loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list)717 void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) {
718     loader_instance_heap_free(inst, list->list);
719     memset(list, 0, sizeof(struct loader_generic_list));
720 }
721 
722 // Append non-duplicate extension properties defined in props to the given ext_list.
723 // Return - Vk_SUCCESS on success
loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list, uint32_t prop_list_count, const VkExtensionProperties *props)724 VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list,
725                                 uint32_t prop_list_count, const VkExtensionProperties *props) {
726     if (ext_list->list == NULL || ext_list->capacity == 0) {
727         VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
728         if (VK_SUCCESS != res) {
729             return res;
730         }
731     }
732 
733     for (uint32_t i = 0; i < prop_list_count; i++) {
734         const VkExtensionProperties *cur_ext = &props[i];
735 
736         // look for duplicates
737         if (has_vk_extension_property(cur_ext, ext_list)) {
738             continue;
739         }
740 
741         // add to list at end
742         // check for enough capacity
743         if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) {
744             void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
745                                                          VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
746             if (new_ptr == NULL) {
747                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
748                            "loader_add_to_ext_list: Failed to reallocate space for extension list");
749                 return VK_ERROR_OUT_OF_HOST_MEMORY;
750             }
751             ext_list->list = new_ptr;
752 
753             // double capacity
754             ext_list->capacity *= 2;
755         }
756 
757         memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties));
758         ext_list->count++;
759     }
760     return VK_SUCCESS;
761 }
762 
763 // Append one extension property defined in props with entrypoints defined in entries to the given
764 // ext_list. Do not append if a duplicate.
765 // If this is a duplicate, this function free's the passed in entries - as in it takes ownership over that list (if it is not
766 // NULL) Return - Vk_SUCCESS on success
loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list, const VkExtensionProperties *props, struct loader_string_list *entrys)767 VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list,
768                                     const VkExtensionProperties *props, struct loader_string_list *entrys) {
769     VkResult res = VK_SUCCESS;
770     bool should_free_entrys = true;
771     if (ext_list->list == NULL || ext_list->capacity == 0) {
772         res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props));
773         if (VK_SUCCESS != res) {
774             goto out;
775         }
776     }
777 
778     // look for duplicates
779     if (has_vk_dev_ext_property(props, ext_list)) {
780         goto out;
781     }
782 
783     uint32_t idx = ext_list->count;
784     // add to list at end
785     // check for enough capacity
786     if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) {
787         void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
788                                                      VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
789 
790         if (NULL == new_ptr) {
791             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
792                        "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list");
793             res = VK_ERROR_OUT_OF_HOST_MEMORY;
794             goto out;
795         }
796         ext_list->list = new_ptr;
797 
798         // double capacity
799         ext_list->capacity *= 2;
800     }
801 
802     memcpy(&ext_list->list[idx].props, props, sizeof(*props));
803     if (entrys) {
804         ext_list->list[idx].entrypoints = *entrys;
805         should_free_entrys = false;
806     }
807     ext_list->count++;
808 out:
809     if (NULL != entrys && should_free_entrys) {
810         free_string_list(inst, entrys);
811     }
812     return res;
813 }
814 
815 // Create storage for pointers to loader_layer_properties
loader_init_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list)816 bool loader_init_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list) {
817     list->capacity = 32 * sizeof(void *);
818     list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
819     if (list->list == NULL) {
820         return false;
821     }
822     list->count = 0;
823     return true;
824 }
825 
826 // Search the given array of layer names for an entry matching the given VkLayerProperties
loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count, struct activated_layer_info *layer_info)827 bool loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count,
828                                            struct activated_layer_info *layer_info) {
829     for (uint32_t i = 0; i < layer_info_count; i++) {
830         if (strcmp(vk_layer_prop->layerName, layer_info[i].name) == 0) {
831             return true;
832         }
833     }
834     return false;
835 }
836 
loader_destroy_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *layer_list)837 void loader_destroy_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *layer_list) {
838     loader_instance_heap_free(inst, layer_list->list);
839     memset(layer_list, 0, sizeof(struct loader_pointer_layer_list));
840 }
841 
842 // Append layer properties defined in prop_list to the given layer_info list
loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list, struct loader_layer_properties *props)843 VkResult loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list,
844                                              struct loader_layer_properties *props) {
845     if (list->list == NULL || list->capacity == 0) {
846         if (!loader_init_pointer_layer_list(inst, list)) {
847             return VK_ERROR_OUT_OF_HOST_MEMORY;
848         }
849     }
850 
851     // Check for enough capacity
852     if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) {
853         size_t new_capacity = list->capacity * 2;
854         void *new_ptr =
855             loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
856         if (NULL == new_ptr) {
857             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
858                        "loader_add_layer_properties_to_list: Realloc failed for when attempting to add new layer");
859             return VK_ERROR_OUT_OF_HOST_MEMORY;
860         }
861         list->list = new_ptr;
862         list->capacity = new_capacity;
863     }
864     list->list[list->count++] = props;
865 
866     return VK_SUCCESS;
867 }
868 
869 // Determine if the provided explicit layer should be available by querying the appropriate environmental variables.
loader_layer_is_available(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, const struct loader_layer_properties *prop)870 bool loader_layer_is_available(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
871                                const struct loader_layer_properties *prop) {
872     bool available = true;
873     bool is_implicit = (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER));
874     bool disabled_by_type =
875         (is_implicit) ? (filters->disable_filter.disable_all_implicit) : (filters->disable_filter.disable_all_explicit);
876     if ((filters->disable_filter.disable_all || disabled_by_type ||
877          check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) &&
878         !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) {
879         available = false;
880     }
881     if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) {
882         available = true;
883     } else if (!available) {
884         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
885                    "Layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
886                    VK_LAYERS_DISABLE_ENV_VAR);
887     }
888 
889     return available;
890 }
891 
892 // Search the given search_list for any layers in the props list.  Add these to the
893 // output layer_list.
loader_add_layer_names_to_list(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *output_list, struct loader_pointer_layer_list *expanded_output_list, uint32_t name_count, const char *const *names, const struct loader_layer_list *source_list)894 VkResult loader_add_layer_names_to_list(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
895                                         struct loader_pointer_layer_list *output_list,
896                                         struct loader_pointer_layer_list *expanded_output_list, uint32_t name_count,
897                                         const char *const *names, const struct loader_layer_list *source_list) {
898     VkResult err = VK_SUCCESS;
899 
900     for (uint32_t i = 0; i < name_count; i++) {
901         const char *source_name = names[i];
902 
903         struct loader_layer_properties *layer_prop = loader_find_layer_property(source_name, source_list);
904         if (NULL == layer_prop) {
905             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
906                        "loader_add_layer_names_to_list: Unable to find layer \"%s\"", source_name);
907             err = VK_ERROR_LAYER_NOT_PRESENT;
908             continue;
909         }
910 
911         // Make sure the layer isn't already in the output_list, skip adding it if it is.
912         if (loader_find_layer_name_in_list(source_name, output_list)) {
913             continue;
914         }
915 
916         if (!loader_layer_is_available(inst, filters, layer_prop)) {
917             continue;
918         }
919 
920         // If not a meta-layer, simply add it.
921         if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
922             err = loader_add_layer_properties_to_list(inst, output_list, layer_prop);
923             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
924             err = loader_add_layer_properties_to_list(inst, expanded_output_list, layer_prop);
925             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
926         } else {
927             err = loader_add_meta_layer(inst, filters, layer_prop, output_list, expanded_output_list, source_list, NULL);
928             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
929         }
930     }
931 
932     return err;
933 }
934 
935 // Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables.
936 // For an implicit layer, at least a disable environment variable is required.
loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, const struct loader_layer_properties *prop)937 bool loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
938                                       const struct loader_layer_properties *prop) {
939     bool enable = false;
940     bool forced_disabled = false;
941     bool forced_enabled = false;
942 
943     if ((filters->disable_filter.disable_all || filters->disable_filter.disable_all_implicit ||
944          check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) &&
945         !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) {
946         forced_disabled = true;
947     }
948     if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) {
949         forced_enabled = true;
950     }
951 
952     // If no enable_environment variable is specified, this implicit layer is always be enabled by default.
953     if (NULL == prop->enable_env_var.name) {
954         enable = true;
955     } else {
956         char *env_value = loader_getenv(prop->enable_env_var.name, inst);
957         if (env_value && !strcmp(prop->enable_env_var.value, env_value)) {
958             enable = true;
959         }
960 
961         // Otherwise, only enable this layer if the enable environment variable is defined
962         loader_free_getenv(env_value, inst);
963     }
964 
965     if (forced_enabled) {
966         // Only report a message that we've forced on a layer if it wouldn't have been enabled
967         // normally.
968         if (!enable) {
969             enable = true;
970             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
971                        "Implicit layer \"%s\" forced enabled due to env var \'%s\'.", prop->info.layerName,
972                        VK_LAYERS_ENABLE_ENV_VAR);
973         }
974     } else if (enable && forced_disabled) {
975         enable = false;
976         // Report a message that we've forced off a layer if it would have been enabled normally.
977         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
978                    "Implicit layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
979                    VK_LAYERS_DISABLE_ENV_VAR);
980         return enable;
981     }
982 
983     // The disable_environment has priority over everything else.  If it is defined, the layer is always
984     // disabled.
985     if (NULL != prop->disable_env_var.name) {
986         char *env_value = loader_getenv(prop->disable_env_var.name, inst);
987         if (NULL != env_value) {
988             enable = false;
989         }
990         loader_free_getenv(env_value, inst);
991     } else if ((prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == 0) {
992         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
993                    "Implicit layer \"%s\" missing disabled environment variable!", prop->info.layerName, VK_LAYERS_DISABLE_ENV_VAR);
994     }
995 
996     // Enable this layer if it is included in the override layer
997     if (inst != NULL && inst->override_layer_present) {
998         struct loader_layer_properties *override = NULL;
999         for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) {
1000             if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
1001                 override = &inst->instance_layer_list.list[i];
1002                 break;
1003             }
1004         }
1005         if (override != NULL) {
1006             for (uint32_t i = 0; i < override->component_layer_names.count; ++i) {
1007                 if (strcmp(override->component_layer_names.list[i], prop->info.layerName) == 0) {
1008                     enable = true;
1009                     break;
1010                 }
1011             }
1012         }
1013     }
1014 
1015     return enable;
1016 }
1017 
1018 // Check the individual implicit layer for the enable/disable environment variable settings.  Only add it after
1019 // every check has passed indicating it should be used, including making sure a layer of the same name hasn't already been
1020 // added.
loader_add_implicit_layer(const struct loader_instance *inst, struct loader_layer_properties *prop, const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *target_list, struct loader_pointer_layer_list *expanded_target_list, const struct loader_layer_list *source_list)1021 VkResult loader_add_implicit_layer(const struct loader_instance *inst, struct loader_layer_properties *prop,
1022                                    const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *target_list,
1023                                    struct loader_pointer_layer_list *expanded_target_list,
1024                                    const struct loader_layer_list *source_list) {
1025     VkResult result = VK_SUCCESS;
1026     if (loader_implicit_layer_is_enabled(inst, filters, prop)) {
1027         if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1028             // Make sure the layer isn't already in the output_list, skip adding it if it is.
1029             if (loader_find_layer_name_in_list(&prop->info.layerName[0], target_list)) {
1030                 return result;
1031             }
1032 
1033             result = loader_add_layer_properties_to_list(inst, target_list, prop);
1034             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1035             if (NULL != expanded_target_list) {
1036                 result = loader_add_layer_properties_to_list(inst, expanded_target_list, prop);
1037             }
1038         } else {
1039             result = loader_add_meta_layer(inst, filters, prop, target_list, expanded_target_list, source_list, NULL);
1040         }
1041     }
1042     return result;
1043 }
1044 
1045 // Add the component layers of a meta-layer to the active list of layers
loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, struct loader_layer_properties *prop, struct loader_pointer_layer_list *target_list, struct loader_pointer_layer_list *expanded_target_list, const struct loader_layer_list *source_list, bool *out_found_all_component_layers)1046 VkResult loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
1047                                struct loader_layer_properties *prop, struct loader_pointer_layer_list *target_list,
1048                                struct loader_pointer_layer_list *expanded_target_list, const struct loader_layer_list *source_list,
1049                                bool *out_found_all_component_layers) {
1050     VkResult result = VK_SUCCESS;
1051     bool found_all_component_layers = true;
1052 
1053     // We need to add all the individual component layers
1054     loader_api_version meta_layer_api_version = loader_make_version(prop->info.specVersion);
1055     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
1056         struct loader_layer_properties *search_prop =
1057             loader_find_layer_property(prop->component_layer_names.list[comp_layer], source_list);
1058         if (search_prop != NULL) {
1059             loader_api_version search_prop_version = loader_make_version(prop->info.specVersion);
1060             if (!loader_check_version_meets_required(meta_layer_api_version, search_prop_version)) {
1061                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1062                            "Meta-layer \"%s\" API version %u.%u, component layer \"%s\" version %u.%u, may have "
1063                            "incompatibilities (Policy #LLP_LAYER_8)!",
1064                            prop->info.layerName, meta_layer_api_version.major, meta_layer_api_version.minor,
1065                            search_prop->info.layerName, search_prop_version.major, search_prop_version.minor);
1066             }
1067 
1068             if (!loader_layer_is_available(inst, filters, search_prop)) {
1069                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1070                            "Meta Layer \"%s\" component layer \"%s\" disabled.", prop->info.layerName, search_prop->info.layerName);
1071                 continue;
1072             }
1073 
1074             // If the component layer is itself an implicit layer, we need to do the implicit layer enable
1075             // checks
1076             if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
1077                 result = loader_add_implicit_layer(inst, search_prop, filters, target_list, expanded_target_list, source_list);
1078                 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1079             } else {
1080                 if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1081                     bool found_layers_in_component_meta_layer = true;
1082                     result = loader_add_meta_layer(inst, filters, search_prop, target_list, expanded_target_list, source_list,
1083                                                    &found_layers_in_component_meta_layer);
1084                     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1085                     if (!found_layers_in_component_meta_layer) found_all_component_layers = false;
1086                 } else if (!loader_find_layer_name_in_list(&search_prop->info.layerName[0], target_list)) {
1087                     // Make sure the layer isn't already in the output_list, skip adding it if it is.
1088                     result = loader_add_layer_properties_to_list(inst, target_list, search_prop);
1089                     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1090                     if (NULL != expanded_target_list) {
1091                         result = loader_add_layer_properties_to_list(inst, expanded_target_list, search_prop);
1092                         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1093                     }
1094                 }
1095             }
1096         } else {
1097             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1098                        "Failed to find layer name \"%s\" component layer \"%s\" to activate (Policy #LLP_LAYER_7)",
1099                        prop->component_layer_names.list[comp_layer], prop->component_layer_names.list[comp_layer]);
1100             found_all_component_layers = false;
1101         }
1102     }
1103 
1104     // Add this layer to the overall target list (not the expanded one)
1105     if (found_all_component_layers) {
1106         result = loader_add_layer_properties_to_list(inst, target_list, prop);
1107         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1108         // Write the result to out_found_all_component_layers in case this function is being recursed
1109         if (out_found_all_component_layers) *out_found_all_component_layers = found_all_component_layers;
1110     }
1111 
1112     return result;
1113 }
1114 
get_extension_property(const char *name, const struct loader_extension_list *list)1115 VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) {
1116     for (uint32_t i = 0; i < list->count; i++) {
1117         if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i];
1118     }
1119     return NULL;
1120 }
1121 
get_dev_extension_property(const char *name, const struct loader_device_extension_list *list)1122 VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) {
1123     for (uint32_t i = 0; i < list->count; i++) {
1124         if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props;
1125     }
1126     return NULL;
1127 }
1128 
1129 // For Instance extensions implemented within the loader (i.e. DEBUG_REPORT
1130 // the extension must provide two entry points for the loader to use:
1131 // - "trampoline" entry point - this is the address returned by GetProcAddr
1132 //                              and will always do what's necessary to support a
1133 //                              global call.
1134 // - "terminator" function    - this function will be put at the end of the
1135 //                              instance chain and will contain the necessary logic
1136 //                              to call / process the extension for the appropriate
1137 //                              ICDs that are available.
1138 // There is no generic mechanism for including these functions, the references
1139 // must be placed into the appropriate loader entry points.
1140 // GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr
1141 // requests
1142 // loader_coalesce_extensions(void) - add extension records to the list of global
1143 //                                    extension available to the app.
1144 // instance_disp                    - add function pointer for terminator function
1145 //                                    to this array.
1146 // The extension itself should be in a separate file that will be linked directly
1147 // with the loader.
loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, struct loader_extension_list *inst_exts)1148 VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1149                                                    struct loader_extension_list *inst_exts) {
1150     struct loader_extension_list icd_exts;
1151     VkResult res = VK_SUCCESS;
1152     char *env_value;
1153     bool filter_extensions = true;
1154 
1155     // Check if a user wants to disable the instance extension filtering behavior
1156     env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
1157     if (NULL != env_value && atoi(env_value) != 0) {
1158         filter_extensions = false;
1159     }
1160     loader_free_getenv(env_value, inst);
1161 
1162     // traverse scanned icd list adding non-duplicate extensions to the list
1163     for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1164         res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
1165         if (VK_SUCCESS != res) {
1166             goto out;
1167         }
1168         res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties,
1169                                              icd_tramp_list->scanned_list[i].lib_name, &icd_exts);
1170         if (VK_SUCCESS == res) {
1171             if (filter_extensions) {
1172                 // Remove any extensions not recognized by the loader
1173                 for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) {
1174                     // See if the extension is in the list of supported extensions
1175                     bool found = false;
1176                     for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) {
1177                         if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) {
1178                             found = true;
1179                             break;
1180                         }
1181                     }
1182 
1183                     // If it isn't in the list, remove it
1184                     if (!found) {
1185                         for (uint32_t k = j + 1; k < icd_exts.count; k++) {
1186                             icd_exts.list[k - 1] = icd_exts.list[k];
1187                         }
1188                         --icd_exts.count;
1189                         --j;
1190                     }
1191                 }
1192             }
1193 
1194             res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list);
1195         }
1196         loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
1197         if (VK_SUCCESS != res) {
1198             goto out;
1199         }
1200     };
1201 
1202     // Traverse loader's extensions, adding non-duplicate extensions to the list
1203     res = add_debug_extensions_to_ext_list(inst, inst_exts);
1204     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1205         goto out;
1206     }
1207     const VkExtensionProperties portability_enumeration_extension_info[] = {
1208         {VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION}};
1209 
1210     // Add VK_KHR_portability_subset
1211     res = loader_add_to_ext_list(inst, inst_exts, sizeof(portability_enumeration_extension_info) / sizeof(VkExtensionProperties),
1212                                  portability_enumeration_extension_info);
1213     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1214         goto out;
1215     }
1216 
1217     const VkExtensionProperties direct_driver_loading_extension_info[] = {
1218         {VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME, VK_LUNARG_DIRECT_DRIVER_LOADING_SPEC_VERSION}};
1219 
1220     // Add VK_LUNARG_direct_driver_loading
1221     res = loader_add_to_ext_list(inst, inst_exts, sizeof(direct_driver_loading_extension_info) / sizeof(VkExtensionProperties),
1222                                  direct_driver_loading_extension_info);
1223     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1224         goto out;
1225     }
1226 
1227 out:
1228     return res;
1229 }
1230 
loader_get_icd_and_device(const void *device, struct loader_device **found_dev, uint32_t *icd_index)1231 struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev, uint32_t *icd_index) {
1232     VkLayerDispatchTable *dispatch_table_device = loader_get_dispatch(device);
1233     if (NULL == dispatch_table_device) {
1234         *found_dev = NULL;
1235         return NULL;
1236     }
1237     loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
1238     *found_dev = NULL;
1239 
1240     for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
1241         uint32_t index = 0;
1242         for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) {
1243             for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next) {
1244                 // Value comparison of device prevents object wrapping by layers
1245                 if (loader_get_dispatch(dev->icd_device) == dispatch_table_device ||
1246                     (dev->chain_device != VK_NULL_HANDLE && loader_get_dispatch(dev->chain_device) == dispatch_table_device)) {
1247                     *found_dev = dev;
1248                     if (NULL != icd_index) {
1249                         *icd_index = index;
1250                     }
1251                     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1252                     return icd_term;
1253                 }
1254             }
1255             index++;
1256         }
1257     }
1258     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1259     return NULL;
1260 }
1261 
loader_destroy_logical_device(struct loader_device *dev, const VkAllocationCallbacks *pAllocator)1262 void loader_destroy_logical_device(struct loader_device *dev, const VkAllocationCallbacks *pAllocator) {
1263     if (pAllocator) {
1264         dev->alloc_callbacks = *pAllocator;
1265     }
1266     loader_device_heap_free(dev, dev);
1267 }
1268 
loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator)1269 struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) {
1270     struct loader_device *new_dev;
1271     new_dev = loader_calloc(pAllocator, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1272 
1273     if (!new_dev) {
1274         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_logical_device: Failed to alloc struct loader_device");
1275         return NULL;
1276     }
1277 
1278     new_dev->loader_dispatch.core_dispatch.magic = DEVICE_DISP_TABLE_MAGIC_NUMBER;
1279 
1280     if (pAllocator) {
1281         new_dev->alloc_callbacks = *pAllocator;
1282     }
1283 
1284     return new_dev;
1285 }
1286 
loader_add_logical_device(struct loader_icd_term *icd_term, struct loader_device *dev)1287 void loader_add_logical_device(struct loader_icd_term *icd_term, struct loader_device *dev) {
1288     dev->next = icd_term->logical_device_list;
1289     icd_term->logical_device_list = dev;
1290 }
1291 
loader_remove_logical_device(struct loader_icd_term *icd_term, struct loader_device *found_dev, const VkAllocationCallbacks *pAllocator)1292 void loader_remove_logical_device(struct loader_icd_term *icd_term, struct loader_device *found_dev,
1293                                   const VkAllocationCallbacks *pAllocator) {
1294     struct loader_device *dev, *prev_dev;
1295 
1296     if (!icd_term || !found_dev) return;
1297 
1298     prev_dev = NULL;
1299     dev = icd_term->logical_device_list;
1300     while (dev && dev != found_dev) {
1301         prev_dev = dev;
1302         dev = dev->next;
1303     }
1304 
1305     if (prev_dev)
1306         prev_dev->next = found_dev->next;
1307     else
1308         icd_term->logical_device_list = found_dev->next;
1309     loader_destroy_logical_device(found_dev, pAllocator);
1310 }
1311 
loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term, const VkAllocationCallbacks *pAllocator)1312 void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term,
1313                         const VkAllocationCallbacks *pAllocator) {
1314     ptr_inst->total_icd_count--;
1315     for (struct loader_device *dev = icd_term->logical_device_list; dev;) {
1316         struct loader_device *next_dev = dev->next;
1317         loader_destroy_logical_device(dev, pAllocator);
1318         dev = next_dev;
1319     }
1320 
1321     loader_instance_heap_free(ptr_inst, icd_term);
1322 }
1323 
loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd)1324 struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) {
1325     struct loader_icd_term *icd_term;
1326 
1327     icd_term = loader_instance_heap_calloc(ptr_inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1328     if (!icd_term) {
1329         return NULL;
1330     }
1331 
1332     icd_term->scanned_icd = scanned_icd;
1333     icd_term->this_instance = ptr_inst;
1334 
1335     // Prepend to the list
1336     icd_term->next = ptr_inst->icd_terms;
1337     ptr_inst->icd_terms = icd_term;
1338     ptr_inst->total_icd_count++;
1339 
1340     return icd_term;
1341 }
1342 
1343 // Determine the ICD interface version to use.
1344 //     @param icd
1345 //     @param pVersion Output parameter indicating which version to use or 0 if
1346 //            the negotiation API is not supported by the ICD
1347 //     @return  bool indicating true if the selected interface version is supported
1348 //            by the loader, false indicates the version is not supported
loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion)1349 bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) {
1350     if (fp_negotiate_icd_version == NULL) {
1351         // ICD does not support the negotiation API, it supports version 0 or 1
1352         // calling code must determine if it is version 0 or 1
1353         *pVersion = 0;
1354     } else {
1355         // ICD supports the negotiation API, so call it with the loader's
1356         // latest version supported
1357         *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION;
1358         VkResult result = fp_negotiate_icd_version(pVersion);
1359 
1360         if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
1361             // ICD no longer supports the loader's latest interface version so
1362             // fail loading the ICD
1363             return false;
1364         }
1365     }
1366 
1367 #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0
1368     if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
1369         // Loader no longer supports the ICD's latest interface version so fail
1370         // loading the ICD
1371         return false;
1372     }
1373 #endif
1374     return true;
1375 }
1376 
loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list)1377 void loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1378     if (0 != icd_tramp_list->capacity && icd_tramp_list->scanned_list) {
1379         for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1380             if (icd_tramp_list->scanned_list[i].handle) {
1381                 loader_platform_close_library(icd_tramp_list->scanned_list[i].handle);
1382                 icd_tramp_list->scanned_list[i].handle = NULL;
1383             }
1384             loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name);
1385         }
1386         loader_instance_heap_free(inst, icd_tramp_list->scanned_list);
1387     }
1388     memset(icd_tramp_list, 0, sizeof(struct loader_icd_tramp_list));
1389 }
1390 
loader_scanned_icd_init(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list)1391 VkResult loader_scanned_icd_init(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1392     VkResult res = VK_SUCCESS;
1393     loader_scanned_icd_clear(inst, icd_tramp_list);
1394     icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd);
1395     icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1396     if (NULL == icd_tramp_list->scanned_list) {
1397         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1398                    "loader_scanned_icd_init: Realloc failed for layer list when attempting to add new layer");
1399         res = VK_ERROR_OUT_OF_HOST_MEMORY;
1400     }
1401     return res;
1402 }
1403 
loader_add_direct_driver(const struct loader_instance *inst, uint32_t index, const VkDirectDriverLoadingInfoLUNARG *pDriver, struct loader_icd_tramp_list *icd_tramp_list)1404 VkResult loader_add_direct_driver(const struct loader_instance *inst, uint32_t index,
1405                                   const VkDirectDriverLoadingInfoLUNARG *pDriver, struct loader_icd_tramp_list *icd_tramp_list) {
1406     // Assume pDriver is valid, since there is no real way to check it. Calling code should make sure the pointer to the array
1407     // of VkDirectDriverLoadingInfoLUNARG structures is non-null.
1408     if (NULL == pDriver->pfnGetInstanceProcAddr) {
1409         loader_log(
1410             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1411             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d contains a NULL pointer for the "
1412             "pfnGetInstanceProcAddr member, skipping.",
1413             index);
1414         return VK_ERROR_INITIALIZATION_FAILED;
1415     }
1416 
1417     PFN_vkGetInstanceProcAddr fp_get_proc_addr = pDriver->pfnGetInstanceProcAddr;
1418     PFN_vkCreateInstance fp_create_inst = NULL;
1419     PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL;
1420     PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1421     PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL;
1422 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1423     PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1424 #endif
1425     struct loader_scanned_icd *new_scanned_icd;
1426     uint32_t interface_version = 0;
1427 
1428     // Try to get the negotiate ICD interface version function
1429     fp_negotiate_icd_version = (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)pDriver->pfnGetInstanceProcAddr(
1430         NULL, "vk_icdNegotiateLoaderICDInterfaceVersion");
1431 
1432     if (NULL == fp_negotiate_icd_version) {
1433         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1434                    "loader_add_direct_driver: Could not get 'vk_icdNegotiateLoaderICDInterfaceVersion' from "
1435                    "VkDirectDriverLoadingInfoLUNARG structure at "
1436                    "index %d, skipping.",
1437                    index);
1438         return VK_ERROR_INITIALIZATION_FAILED;
1439     }
1440 
1441     if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_version)) {
1442         loader_log(
1443             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1444             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, "
1445             "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading "
1446             "extension, skipping.",
1447             index, interface_version);
1448         return VK_ERROR_INITIALIZATION_FAILED;
1449     }
1450 
1451     if (interface_version < 7) {
1452         loader_log(
1453             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1454             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, "
1455             "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading "
1456             "extension, skipping.",
1457             index, interface_version);
1458         return VK_ERROR_INITIALIZATION_FAILED;
1459     }
1460 
1461     fp_create_inst = (PFN_vkCreateInstance)pDriver->pfnGetInstanceProcAddr(NULL, "vkCreateInstance");
1462     if (NULL == fp_create_inst) {
1463         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1464                    "loader_add_direct_driver: Could not get 'vkCreateInstance' from VkDirectDriverLoadingInfoLUNARG structure at "
1465                    "index %d, skipping.",
1466                    index);
1467         return VK_ERROR_INITIALIZATION_FAILED;
1468     }
1469     fp_get_inst_ext_props =
1470         (PFN_vkEnumerateInstanceExtensionProperties)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties");
1471     if (NULL == fp_get_inst_ext_props) {
1472         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1473                    "loader_add_direct_driver: Could not get 'vkEnumerateInstanceExtensionProperties' from "
1474                    "VkDirectDriverLoadingInfoLUNARG structure at index %d, skipping.",
1475                    index);
1476         return VK_ERROR_INITIALIZATION_FAILED;
1477     }
1478 
1479     fp_get_phys_dev_proc_addr =
1480         (PFN_vk_icdGetPhysicalDeviceProcAddr)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdGetPhysicalDeviceProcAddr");
1481 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1482     // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version
1483     // 7 or greater, otherwise fallback to loading it from the platform dynamic linker
1484     fp_enum_dxgi_adapter_phys_devs =
1485         (PFN_vk_icdEnumerateAdapterPhysicalDevices)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdEnumerateAdapterPhysicalDevices");
1486 #endif
1487 
1488     // check for enough capacity
1489     if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1490         void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1491                                                      icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1492         if (NULL == new_ptr) {
1493             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1494                        "loader_add_direct_driver: Realloc failed on icd library list for ICD index %u", index);
1495             return VK_ERROR_OUT_OF_HOST_MEMORY;
1496         }
1497         icd_tramp_list->scanned_list = new_ptr;
1498 
1499         // double capacity
1500         icd_tramp_list->capacity *= 2;
1501     }
1502 
1503     // Driver must be 1.1 to support version 7
1504     uint32_t api_version = VK_API_VERSION_1_1;
1505     PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
1506         (PFN_vkEnumerateInstanceVersion)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
1507 
1508     if (icd_enumerate_instance_version) {
1509         VkResult res = icd_enumerate_instance_version(&api_version);
1510         if (res != VK_SUCCESS) {
1511             return res;
1512         }
1513     }
1514 
1515     new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1516     new_scanned_icd->handle = NULL;
1517     new_scanned_icd->api_version = api_version;
1518     new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1519     new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1520     new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1521     new_scanned_icd->CreateInstance = fp_create_inst;
1522 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1523     new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1524 #endif
1525     new_scanned_icd->interface_version = interface_version;
1526 
1527     new_scanned_icd->lib_name = NULL;
1528     icd_tramp_list->count++;
1529 
1530     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1531                "loader_add_direct_driver: Adding driver found in index %d of "
1532                "VkDirectDriverLoadingListLUNARG::pDrivers structure. pfnGetInstanceProcAddr was set to %p",
1533                index, pDriver->pfnGetInstanceProcAddr);
1534 
1535     return VK_SUCCESS;
1536 }
1537 
1538 // Search through VkInstanceCreateInfo's pNext chain for any drivers from the direct driver loading extension and load them.
loader_scan_for_direct_drivers(const struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo, struct loader_icd_tramp_list *icd_tramp_list, bool *direct_driver_loading_exclusive_mode)1539 VkResult loader_scan_for_direct_drivers(const struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
1540                                         struct loader_icd_tramp_list *icd_tramp_list, bool *direct_driver_loading_exclusive_mode) {
1541     if (NULL == pCreateInfo) {
1542         // Don't do this logic unless we are being called from vkCreateInstance, when pCreateInfo will be non-null
1543         return VK_SUCCESS;
1544     }
1545     bool direct_driver_loading_enabled = false;
1546     // Try to if VK_LUNARG_direct_driver_loading is enabled and if we are using it exclusively
1547     // Skip this step if inst is NULL, aka when this function is being called before instance creation
1548     if (inst != NULL && pCreateInfo->ppEnabledExtensionNames && pCreateInfo->enabledExtensionCount > 0) {
1549         // Look through the enabled extension list, make sure VK_LUNARG_direct_driver_loading is present
1550         for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1551             if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME) == 0) {
1552                 direct_driver_loading_enabled = true;
1553                 break;
1554             }
1555         }
1556     }
1557     const VkDirectDriverLoadingListLUNARG *ddl_list = NULL;
1558     // Find the VkDirectDriverLoadingListLUNARG struct in the pNext chain of vkInstanceCreateInfo
1559     const VkBaseOutStructure *chain = pCreateInfo->pNext;
1560     while (chain) {
1561         if (chain->sType == VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG) {
1562             ddl_list = (VkDirectDriverLoadingListLUNARG *)chain;
1563             break;
1564         }
1565         chain = (const VkBaseOutStructure *)chain->pNext;
1566     }
1567     if (NULL == ddl_list) {
1568         if (direct_driver_loading_enabled) {
1569             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1570                        "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension was enabled but the "
1571                        "pNext chain of "
1572                        "VkInstanceCreateInfo did not contain the "
1573                        "VkDirectDriverLoadingListLUNARG structure.");
1574         }
1575         // Always want to exit early if there was no VkDirectDriverLoadingListLUNARG in the pNext chain
1576         return VK_SUCCESS;
1577     }
1578 
1579     if (!direct_driver_loading_enabled) {
1580         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1581                    "loader_scan_for_direct_drivers: The pNext chain of VkInstanceCreateInfo contained the "
1582                    "VkDirectDriverLoadingListLUNARG structure, but the VK_LUNARG_direct_driver_loading extension was "
1583                    "not enabled.");
1584         return VK_SUCCESS;
1585     }
1586     // If we are using exclusive mode, skip looking for any more drivers from system or environment variables
1587     if (ddl_list->mode == VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG) {
1588         *direct_driver_loading_exclusive_mode = true;
1589         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1590                    "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension is active and specified "
1591                    "VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG, skipping system and environment "
1592                    "variable driver search mechanisms.");
1593     }
1594     if (NULL == ddl_list->pDrivers) {
1595         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1596                    "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of "
1597                    "VkInstanceCreateInfo has a NULL pDrivers member.");
1598         return VK_SUCCESS;
1599     }
1600     if (ddl_list->driverCount == 0) {
1601         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1602                    "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of "
1603                    "VkInstanceCreateInfo has a non-null pDrivers member but a driverCount member with a value "
1604                    "of zero.");
1605         return VK_SUCCESS;
1606     }
1607     // Go through all VkDirectDriverLoadingInfoLUNARG entries and add each driver
1608     // Because icd_tramp's are prepended, this will result in the drivers appearing at the end
1609     for (uint32_t i = 0; i < ddl_list->driverCount; i++) {
1610         VkResult res = loader_add_direct_driver(inst, i, &ddl_list->pDrivers[i], icd_tramp_list);
1611         if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1612             return res;
1613         }
1614     }
1615 
1616     return VK_SUCCESS;
1617 }
1618 
loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, const char *filename, uint32_t api_version, enum loader_layer_library_status *lib_status)1619 VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1620                                 const char *filename, uint32_t api_version, enum loader_layer_library_status *lib_status) {
1621     loader_platform_dl_handle handle = NULL;
1622     PFN_vkCreateInstance fp_create_inst = NULL;
1623     PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL;
1624     PFN_vkGetInstanceProcAddr fp_get_proc_addr = NULL;
1625     PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1626     PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL;
1627 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1628     PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1629 #endif
1630     struct loader_scanned_icd *new_scanned_icd = NULL;
1631     uint32_t interface_vers;
1632     VkResult res = VK_SUCCESS;
1633 
1634     // This shouldn't happen, but the check is necessary because dlopen returns a handle to the main program when
1635     // filename is NULL
1636     if (filename == NULL) {
1637         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: A NULL filename was used, skipping this ICD",
1638                    filename);
1639         res = VK_ERROR_INCOMPATIBLE_DRIVER;
1640         goto out;
1641     }
1642 
1643 // TODO implement smarter opening/closing of libraries. For now this
1644 // function leaves libraries open and the scanned_icd_clear closes them
1645 #if defined(__Fuchsia__)
1646     handle = loader_platform_open_driver(filename);
1647 #else
1648     handle = loader_platform_open_library(filename);
1649 #endif
1650     if (NULL == handle) {
1651         loader_handle_load_library_error(inst, filename, lib_status);
1652         if (lib_status && *lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) {
1653             res = VK_ERROR_OUT_OF_HOST_MEMORY;
1654         } else {
1655             res = VK_ERROR_INCOMPATIBLE_DRIVER;
1656         }
1657         goto out;
1658     }
1659 
1660     // Try to load the driver's exported vk_icdNegotiateLoaderICDInterfaceVersion
1661     fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion");
1662 
1663     // If it isn't exported, we are dealing with either a v0, v1, or a v7 and up driver
1664     if (NULL == fp_negotiate_icd_version) {
1665         // Try to load the driver's exported vk_icdGetInstanceProcAddr - if this is a v7 or up driver, we can use it to get
1666         // the driver's vk_icdNegotiateLoaderICDInterfaceVersion function
1667         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1668 
1669         // If we successfully loaded vk_icdGetInstanceProcAddr, try to get vk_icdNegotiateLoaderICDInterfaceVersion
1670         if (fp_get_proc_addr) {
1671             fp_negotiate_icd_version =
1672                 (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)fp_get_proc_addr(NULL, "vk_icdNegotiateLoaderICDInterfaceVersion");
1673         }
1674     }
1675 
1676     // Try to negotiate the Loader and Driver Interface Versions
1677     // loader_get_icd_interface_version will check if fp_negotiate_icd_version is NULL, so we don't have to.
1678     // If it *is* NULL, that means this driver uses interface version 0 or 1
1679     if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) {
1680         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1681                    "loader_scanned_icd_add: ICD %s doesn't support interface version compatible with loader, skip this ICD.",
1682                    filename);
1683         goto out;
1684     }
1685 
1686     // If we didn't already query vk_icdGetInstanceProcAddr, try now
1687     if (NULL == fp_get_proc_addr) {
1688         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1689     }
1690 
1691     // If vk_icdGetInstanceProcAddr is NULL, this ICD is using version 0 and so we should respond accordingly.
1692     if (NULL == fp_get_proc_addr) {
1693         // Exporting vk_icdNegotiateLoaderICDInterfaceVersion but not vk_icdGetInstanceProcAddr violates Version 2's
1694         // requirements, as for Version 2 to be supported Version 1 must also be supported
1695         if (interface_vers != 0) {
1696             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1697                        "loader_scanned_icd_add: ICD %s reports an interface version of %d but doesn't export "
1698                        "vk_icdGetInstanceProcAddr, skip this ICD.",
1699                        filename, interface_vers);
1700             goto out;
1701         }
1702         // Use deprecated interface from version 0
1703         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr");
1704         if (NULL == fp_get_proc_addr) {
1705             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1706                        "loader_scanned_icd_add: Attempt to retrieve either \'vkGetInstanceProcAddr\' or "
1707                        "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.",
1708                        filename);
1709             goto out;
1710         } else {
1711             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1712                        "loader_scanned_icd_add: Using deprecated ICD interface of \'vkGetInstanceProcAddr\' instead of "
1713                        "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1714                        filename);
1715         }
1716         fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance");
1717         if (NULL == fp_create_inst) {
1718             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1719                        "loader_scanned_icd_add:  Failed querying \'vkCreateInstance\' via dlsym/LoadLibrary for ICD %s", filename);
1720             goto out;
1721         }
1722         fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties");
1723         if (NULL == fp_get_inst_ext_props) {
1724             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1725                        "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via dlsym/LoadLibrary "
1726                        "for ICD %s",
1727                        filename);
1728             goto out;
1729         }
1730     } else {
1731         // vk_icdGetInstanceProcAddr was successfully found, we can assume the version is at least one
1732         // If vk_icdNegotiateLoaderICDInterfaceVersion was also found, interface_vers must be 2 or greater, so this check is
1733         // fine
1734         if (interface_vers == 0) {
1735             interface_vers = 1;
1736         }
1737 
1738         fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance");
1739         if (NULL == fp_create_inst) {
1740             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1741                        "loader_scanned_icd_add: Could not get \'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\' for ICD %s",
1742                        filename);
1743             goto out;
1744         }
1745         fp_get_inst_ext_props =
1746             (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties");
1747         if (NULL == fp_get_inst_ext_props) {
1748             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1749                        "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via "
1750                        "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1751                        filename);
1752             goto out;
1753         }
1754         // Query "vk_icdGetPhysicalDeviceProcAddr" with vk_icdGetInstanceProcAddr if the library reports interface version 7 or
1755         // greater, otherwise fallback to loading it from the platform dynamic linker
1756         if (interface_vers >= 7) {
1757             fp_get_phys_dev_proc_addr =
1758                 (PFN_vk_icdGetPhysicalDeviceProcAddr)fp_get_proc_addr(NULL, "vk_icdGetPhysicalDeviceProcAddr");
1759         }
1760         if (NULL == fp_get_phys_dev_proc_addr && interface_vers >= 3) {
1761             fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr");
1762         }
1763 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1764         // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version
1765         // 7 or greater, otherwise fallback to loading it from the platform dynamic linker
1766         if (interface_vers >= 7) {
1767             fp_enum_dxgi_adapter_phys_devs =
1768                 (PFN_vk_icdEnumerateAdapterPhysicalDevices)fp_get_proc_addr(NULL, "vk_icdEnumerateAdapterPhysicalDevices");
1769         }
1770         if (NULL == fp_enum_dxgi_adapter_phys_devs && interface_vers >= 6) {
1771             fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices");
1772         }
1773 #endif
1774     }
1775 
1776     // check for enough capacity
1777     if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1778         void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1779                                                      icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1780         if (NULL == new_ptr) {
1781             res = VK_ERROR_OUT_OF_HOST_MEMORY;
1782             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s",
1783                        filename);
1784             goto out;
1785         }
1786         icd_tramp_list->scanned_list = new_ptr;
1787 
1788         // double capacity
1789         icd_tramp_list->capacity *= 2;
1790     }
1791 
1792     loader_api_version api_version_struct = loader_make_version(api_version);
1793     if (interface_vers <= 4 && loader_check_version_meets_required(LOADER_VERSION_1_1_0, api_version_struct)) {
1794         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1795                    "loader_scanned_icd_add: Driver %s supports Vulkan %u.%u, but only supports loader interface version %u."
1796                    " Interface version 5 or newer required to support this version of Vulkan (Policy #LDP_DRIVER_7)",
1797                    filename, api_version_struct.major, api_version_struct.minor, interface_vers);
1798     }
1799 
1800     new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1801     new_scanned_icd->handle = handle;
1802     new_scanned_icd->api_version = api_version;
1803     new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1804     new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1805     new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1806     new_scanned_icd->CreateInstance = fp_create_inst;
1807 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1808     new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1809 #endif
1810     new_scanned_icd->interface_version = interface_vers;
1811 
1812     res = loader_copy_to_new_str(inst, filename, &new_scanned_icd->lib_name);
1813     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
1814         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename);
1815         goto out;
1816     }
1817     icd_tramp_list->count++;
1818 
1819 out:
1820 
1821     return res;
1822 }
1823 
loader_initialize(void)1824 void loader_initialize(void) {
1825     // initialize mutexes
1826     loader_platform_thread_create_mutex(&loader_lock);
1827     loader_platform_thread_create_mutex(&loader_preload_icd_lock);
1828     loader_platform_thread_create_mutex(&loader_global_instance_list_lock);
1829     init_global_loader_settings();
1830 
1831     // initialize logging
1832     loader_init_global_debug_level();
1833 #if defined(_WIN32)
1834     windows_initialization();
1835 #endif
1836 
1837     loader_api_version version = loader_make_full_version(VK_HEADER_VERSION_COMPLETE);
1838     loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "Vulkan Loader Version %d.%d.%d", version.major, version.minor, version.patch);
1839 
1840 #if defined(GIT_BRANCH_NAME) && defined(GIT_TAG_INFO)
1841     loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "[Vulkan Loader Git - Tag: " GIT_BRANCH_NAME ", Branch/Commit: " GIT_TAG_INFO "]");
1842 #endif
1843 
1844     char *loader_disable_dynamic_library_unloading_env_var = loader_getenv("VK_LOADER_DISABLE_DYNAMIC_LIBRARY_UNLOADING", NULL);
1845     if (loader_disable_dynamic_library_unloading_env_var &&
1846         0 == strncmp(loader_disable_dynamic_library_unloading_env_var, "1", 2)) {
1847         loader_disable_dynamic_library_unloading = true;
1848         loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: library unloading is disabled");
1849     } else {
1850         loader_disable_dynamic_library_unloading = false;
1851     }
1852     loader_free_getenv(loader_disable_dynamic_library_unloading_env_var, NULL);
1853 #if defined(LOADER_USE_UNSAFE_FILE_SEARCH)
1854     loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: unsafe searching is enabled");
1855 #endif
1856 }
1857 
loader_releasenull1858 void loader_release() {
1859     // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance.
1860     loader_unload_preloaded_icds();
1861 
1862     // release mutexes
1863     teardown_global_loader_settings();
1864     loader_platform_thread_delete_mutex(&loader_lock);
1865     loader_platform_thread_delete_mutex(&loader_preload_icd_lock);
1866     loader_platform_thread_delete_mutex(&loader_global_instance_list_lock);
1867 }
1868 
1869 // Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later
loader_preload_icds(void)1870 void loader_preload_icds(void) {
1871     loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
1872 
1873     // Already preloaded, skip loading again.
1874     if (scanned_icds.scanned_list != NULL) {
1875         loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1876         return;
1877     }
1878 
1879     VkResult result = loader_icd_scan(NULL, &scanned_icds, NULL, NULL);
1880     if (result != VK_SUCCESS) {
1881         loader_scanned_icd_clear(NULL, &scanned_icds);
1882     }
1883     loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1884 }
1885 
1886 // Release the ICD libraries that were preloaded
loader_unload_preloaded_icds(void)1887 void loader_unload_preloaded_icds(void) {
1888     loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
1889     loader_scanned_icd_clear(NULL, &scanned_icds);
1890     loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1891 }
1892 
1893 #if !defined(_WIN32)
loader_init_library(void)1894 __attribute__((constructor)) void loader_init_library(void) { loader_initialize(); }
1895 
loader_free_library(void)1896 __attribute__((destructor)) void loader_free_library(void) { loader_release(); }
1897 #endif
1898 
1899 // Get next file or dirname given a string list or registry key path
1900 //
1901 // \returns
1902 // A pointer to first char in the next path.
1903 // The next path (or NULL) in the list is returned in next_path.
1904 // Note: input string is modified in some cases. PASS IN A COPY!
loader_get_next_path(char *path)1905 char *loader_get_next_path(char *path) {
1906     uint32_t len;
1907     char *next;
1908 
1909     if (path == NULL) return NULL;
1910     next = strchr(path, PATH_SEPARATOR);
1911     if (next == NULL) {
1912         len = (uint32_t)strlen(path);
1913         next = path + len;
1914     } else {
1915         *next = '\0';
1916         next++;
1917     }
1918 
1919     return next;
1920 }
1921 
1922 /* Processes a json manifest's library_path and the location of the json manifest to create the path of the library
1923  * The output is stored in out_fullpath by allocating a string - so its the caller's responsibility to free it
1924  * The output is the combination of the base path of manifest_file_path concatenated with library path
1925  * If library_path is an absolute path, we do not prepend the base path of manifest_file_path
1926  *
1927  * This function takes ownership of library_path - caller does not need to worry about freeing it.
1928  */
combine_manifest_directory_and_library_path(const struct loader_instance *inst, char *library_path, const char *manifest_file_path, char **out_fullpath)1929 VkResult combine_manifest_directory_and_library_path(const struct loader_instance *inst, char *library_path,
1930                                                      const char *manifest_file_path, char **out_fullpath) {
1931     assert(library_path && manifest_file_path && out_fullpath);
1932     if (loader_platform_is_path_absolute(library_path)) {
1933         *out_fullpath = library_path;
1934         return VK_SUCCESS;
1935     }
1936     VkResult res = VK_SUCCESS;
1937 
1938     size_t library_path_len = strlen(library_path);
1939     size_t manifest_file_path_str_len = strlen(manifest_file_path);
1940     bool library_path_contains_directory_symbol = false;
1941     for (size_t i = 0; i < library_path_len; i++) {
1942         if (library_path[i] == DIRECTORY_SYMBOL) {
1943             library_path_contains_directory_symbol = true;
1944             break;
1945         }
1946     }
1947     // Means that the library_path is neither absolute nor relative - thus we should not modify it at all
1948     if (!library_path_contains_directory_symbol) {
1949         *out_fullpath = library_path;
1950         return VK_SUCCESS;
1951     }
1952     // must include both a directory symbol and the null terminator
1953     size_t new_str_len = library_path_len + manifest_file_path_str_len + 1 + 1;
1954 
1955     *out_fullpath = loader_instance_heap_calloc(inst, new_str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1956     if (NULL == *out_fullpath) {
1957         res = VK_ERROR_OUT_OF_HOST_MEMORY;
1958         goto out;
1959     }
1960     size_t cur_loc_in_out_fullpath = 0;
1961     // look for the last occurrence of DIRECTORY_SYMBOL in manifest_file_path
1962     size_t last_directory_symbol = 0;
1963     bool found_directory_symbol = false;
1964     for (size_t i = 0; i < manifest_file_path_str_len; i++) {
1965         if (manifest_file_path[i] == DIRECTORY_SYMBOL) {
1966             last_directory_symbol = i + 1;  // we want to include the symbol
1967             found_directory_symbol = true;
1968             // dont break because we want to find the last occurrence
1969         }
1970     }
1971     // Add manifest_file_path up to the last directory symbol
1972     if (found_directory_symbol) {
1973         loader_strncpy(*out_fullpath, new_str_len, manifest_file_path, last_directory_symbol);
1974         cur_loc_in_out_fullpath += last_directory_symbol;
1975     }
1976     loader_strncpy(&(*out_fullpath)[cur_loc_in_out_fullpath], new_str_len - cur_loc_in_out_fullpath, library_path,
1977                    library_path_len);
1978     cur_loc_in_out_fullpath += library_path_len + 1;
1979     (*out_fullpath)[cur_loc_in_out_fullpath] = '\0';
1980 
1981 out:
1982     loader_instance_heap_free(inst, library_path);
1983 
1984     return res;
1985 }
1986 
1987 // Given a filename (file)  and a list of paths (in_dirs), try to find an existing
1988 // file in the paths.  If filename already is a path then no searching in the given paths.
1989 //
1990 // @return - A string in out_fullpath of either the full path or file.
loader_get_fullpath(const char *file, const char *in_dirs, size_t out_size, char *out_fullpath)1991 void loader_get_fullpath(const char *file, const char *in_dirs, size_t out_size, char *out_fullpath) {
1992     if (!loader_platform_is_path(file) && *in_dirs) {
1993         size_t dirs_copy_len = strlen(in_dirs) + 1;
1994         char *dirs_copy = loader_stack_alloc(dirs_copy_len);
1995         loader_strncpy(dirs_copy, dirs_copy_len, in_dirs, dirs_copy_len);
1996 
1997         // find if file exists after prepending paths in given list
1998         // for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) {
1999         char *dir = dirs_copy;
2000         char *next_dir = loader_get_next_path(dir);
2001         while (*dir && next_dir) {
2002             int path_concat_ret = snprintf(out_fullpath, out_size, "%s%c%s", dir, DIRECTORY_SYMBOL, file);
2003             if (path_concat_ret < 0) {
2004                 continue;
2005             }
2006             if (loader_platform_file_exists(out_fullpath)) {
2007                 return;
2008             }
2009             dir = next_dir;
2010             next_dir = loader_get_next_path(dir);
2011         }
2012     }
2013 
2014     (void)snprintf(out_fullpath, out_size, "%s", file);
2015 }
2016 
2017 // Verify that all component layers in a meta-layer are valid.
verify_meta_layer_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop, struct loader_layer_list *instance_layers)2018 bool verify_meta_layer_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop,
2019                                         struct loader_layer_list *instance_layers) {
2020     loader_api_version meta_layer_version = loader_make_version(prop->info.specVersion);
2021 
2022     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2023         struct loader_layer_properties *comp_prop =
2024             loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers);
2025         if (comp_prop == NULL) {
2026             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2027                        "verify_meta_layer_component_layers: Meta-layer %s can't find component layer %s at index %d."
2028                        "  Skipping this layer.",
2029                        prop->info.layerName, prop->component_layer_names.list[comp_layer], comp_layer);
2030 
2031             return false;
2032         }
2033 
2034         // Check the version of each layer, they need to be at least MAJOR and MINOR
2035         loader_api_version comp_prop_version = loader_make_version(comp_prop->info.specVersion);
2036         if (!loader_check_version_meets_required(meta_layer_version, comp_prop_version)) {
2037             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2038                        "verify_meta_layer_component_layers: Meta-layer uses API version %d.%d, but component "
2039                        "layer %d has API version %d.%d that is lower.  Skipping this layer.",
2040                        meta_layer_version.major, meta_layer_version.minor, comp_layer, comp_prop_version.major,
2041                        comp_prop_version.minor);
2042 
2043             return false;
2044         }
2045 
2046         // Make sure the layer isn't using it's own name
2047         if (!strcmp(prop->info.layerName, prop->component_layer_names.list[comp_layer])) {
2048             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2049                        "verify_meta_layer_component_layers: Meta-layer %s lists itself in its component layer "
2050                        "list at index %d.  Skipping this layer.",
2051                        prop->info.layerName, comp_layer);
2052 
2053             return false;
2054         }
2055         if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
2056             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2057                        "verify_meta_layer_component_layers: Adding meta-layer %s which also contains meta-layer %s",
2058                        prop->info.layerName, comp_prop->info.layerName);
2059 
2060             // Make sure if the layer is using a meta-layer in its component list that we also verify that.
2061             if (!verify_meta_layer_component_layers(inst, comp_prop, instance_layers)) {
2062                 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2063                            "Meta-layer %s component layer %s can not find all component layers."
2064                            "  Skipping this layer.",
2065                            prop->info.layerName, prop->component_layer_names.list[comp_layer]);
2066                 return false;
2067             }
2068         }
2069     }
2070     // Didn't exit early so that means it passed all checks
2071     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2072                "Meta-layer \"%s\" all %d component layers appear to be valid.", prop->info.layerName,
2073                prop->component_layer_names.count);
2074 
2075     // If layer logging is on, list the internals included in the meta-layer
2076     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2077         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "  [%d] %s", comp_layer, prop->component_layer_names.list[comp_layer]);
2078     }
2079     return true;
2080 }
2081 
2082 // Add any instance and device extensions from component layers to this layer
2083 // list, so that anyone querying extensions will only need to look at the meta-layer
update_meta_layer_extensions_from_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop, struct loader_layer_list *instance_layers)2084 bool update_meta_layer_extensions_from_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop,
2085                                                         struct loader_layer_list *instance_layers) {
2086     VkResult res = VK_SUCCESS;
2087     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2088         struct loader_layer_properties *comp_prop =
2089             loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers);
2090 
2091         if (NULL != comp_prop->instance_extension_list.list) {
2092             for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) {
2093                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding instance extension %s",
2094                            prop->info.layerName, prop->component_layer_names.list[comp_layer],
2095                            comp_prop->instance_extension_list.list[ext].extensionName);
2096 
2097                 if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) {
2098                     res = loader_add_to_ext_list(inst, &prop->instance_extension_list, 1,
2099                                                  &comp_prop->instance_extension_list.list[ext]);
2100                     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2101                         return res;
2102                     }
2103                 }
2104             }
2105         }
2106         if (NULL != comp_prop->device_extension_list.list) {
2107             for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) {
2108                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding device extension %s",
2109                            prop->info.layerName, prop->component_layer_names.list[comp_layer],
2110                            comp_prop->device_extension_list.list[ext].props.extensionName);
2111 
2112                 if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) {
2113                     loader_add_to_dev_ext_list(inst, &prop->device_extension_list,
2114                                                &comp_prop->device_extension_list.list[ext].props, NULL);
2115                     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2116                         return res;
2117                     }
2118                 }
2119             }
2120         }
2121     }
2122     return res;
2123 }
2124 
2125 // Verify that all meta-layers in a layer list are valid.
verify_all_meta_layers(struct loader_instance *inst, const struct loader_envvar_all_filters *filters, struct loader_layer_list *instance_layers, bool *override_layer_present)2126 VkResult verify_all_meta_layers(struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
2127                                 struct loader_layer_list *instance_layers, bool *override_layer_present) {
2128     VkResult res = VK_SUCCESS;
2129     *override_layer_present = false;
2130     for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
2131         struct loader_layer_properties *prop = &instance_layers->list[i];
2132 
2133         // If this is a meta-layer, make sure it is valid
2134         if (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
2135             if (verify_meta_layer_component_layers(inst, prop, instance_layers)) {
2136                 // If any meta layer is valid, update its extension list to include the extensions from its component layers.
2137                 res = update_meta_layer_extensions_from_component_layers(inst, prop, instance_layers);
2138                 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2139                     return res;
2140                 }
2141                 if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop)) {
2142                     *override_layer_present = true;
2143                 }
2144             } else {
2145                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
2146                            "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName);
2147 
2148                 loader_remove_layer_in_list(inst, instance_layers, i);
2149                 i--;
2150             }
2151         }
2152     }
2153     return res;
2154 }
2155 
2156 // If the current working directory matches any app_key_path of the layers, remove all other override layers.
2157 // Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path.
remove_all_non_valid_override_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers)2158 void remove_all_non_valid_override_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
2159     if (instance_layers == NULL) {
2160         return;
2161     }
2162 
2163     char cur_path[1024];
2164     char *ret = loader_platform_executable_path(cur_path, 1024);
2165     if (NULL == ret) {
2166         return;
2167     }
2168     // Find out if there is an override layer with same the app_key_path as the path to the current executable.
2169     // If more than one is found, remove it and use the first layer
2170     // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable.
2171     bool found_active_override_layer = false;
2172     int global_layer_index = -1;
2173     for (uint32_t i = 0; i < instance_layers->count; i++) {
2174         struct loader_layer_properties *props = &instance_layers->list[i];
2175         if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
2176             if (props->app_key_paths.count > 0) {  // not the global layer
2177                 for (uint32_t j = 0; j < props->app_key_paths.count; j++) {
2178                     if (strcmp(props->app_key_paths.list[j], cur_path) == 0) {
2179                         if (!found_active_override_layer) {
2180                             found_active_override_layer = true;
2181                         } else {
2182                             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2183                                        "remove_all_non_valid_override_layers: Multiple override layers where the same path in "
2184                                        "app_keys "
2185                                        "was found. Using the first layer found");
2186 
2187                             // Remove duplicate active override layers that have the same app_key_path
2188                             loader_remove_layer_in_list(inst, instance_layers, i);
2189                             i--;
2190                         }
2191                     }
2192                 }
2193                 if (!found_active_override_layer) {
2194                     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2195                                "--Override layer found but not used because app \'%s\' is not in \'app_keys\' list!", cur_path);
2196 
2197                     // Remove non-global override layers that don't have an app_key that matches cur_path
2198                     loader_remove_layer_in_list(inst, instance_layers, i);
2199                     i--;
2200                 }
2201             } else {
2202                 if (global_layer_index == -1) {
2203                     global_layer_index = i;
2204                 } else {
2205                     loader_log(
2206                         inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2207                         "remove_all_non_valid_override_layers: Multiple global override layers found. Using the first global "
2208                         "layer found");
2209                     loader_remove_layer_in_list(inst, instance_layers, i);
2210                     i--;
2211                 }
2212             }
2213         }
2214     }
2215     // Remove global layer if layer with same the app_key_path as the path to the current executable is found
2216     if (found_active_override_layer && global_layer_index >= 0) {
2217         loader_remove_layer_in_list(inst, instance_layers, global_layer_index);
2218     }
2219     // Should be at most 1 override layer in the list now.
2220     if (found_active_override_layer) {
2221         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the override layer for app key %s", cur_path);
2222     } else if (global_layer_index >= 0) {
2223         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the global override layer");
2224     }
2225 }
2226 
2227 /* The following are required in the "layer" object:
2228  * "name"
2229  * "type"
2230  * (for non-meta layers) "library_path"
2231  * (for meta layers) "component_layers"
2232  * "api_version"
2233  * "implementation_version"
2234  * "description"
2235  * (for implicit layers) "disable_environment"
2236  */
2237 
loader_read_layer_json(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, cJSON *layer_node, loader_api_version version, bool is_implicit, char *filename)2238 VkResult loader_read_layer_json(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list,
2239                                 cJSON *layer_node, loader_api_version version, bool is_implicit, char *filename) {
2240     assert(layer_instance_list);
2241     char *type = NULL;
2242     char *api_version = NULL;
2243     char *implementation_version = NULL;
2244     VkResult result = VK_SUCCESS;
2245     struct loader_layer_properties props = {0};
2246 
2247     // Parse name
2248 
2249     result = loader_parse_json_string_to_existing_str(inst, layer_node, "name", VK_MAX_EXTENSION_NAME_SIZE, props.info.layerName);
2250     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2251     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2252         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2253                    "Layer located at %s didn't find required layer value \"name\" in manifest JSON file, skipping this layer",
2254                    filename);
2255         goto out;
2256     }
2257 
2258     // Check if this layer's name matches the override layer name, set is_override to true if so.
2259     if (!strcmp(props.info.layerName, VK_OVERRIDE_LAYER_NAME)) {
2260         props.is_override = true;
2261     }
2262 
2263     if (0 != strncmp(props.info.layerName, "VK_LAYER_", 9)) {
2264         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, "Layer name %s does not conform to naming standard (Policy #LLP_LAYER_3)",
2265                    props.info.layerName);
2266     }
2267 
2268     // Parse type
2269 
2270     result = loader_parse_json_string(layer_node, "type", &type);
2271     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2272     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2273         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2274                    "Layer located at %s didn't find required layer value \"type\" in manifest JSON file, skipping this layer",
2275                    filename);
2276         goto out;
2277     }
2278 
2279     // Add list entry
2280     if (!strcmp(type, "DEVICE")) {
2281         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Device layers are deprecated. Skipping this layer");
2282         result = VK_ERROR_INITIALIZATION_FAILED;
2283         goto out;
2284     }
2285 
2286     // Allow either GLOBAL or INSTANCE type interchangeably to handle layers that must work with older loaders
2287     if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) {
2288         props.type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER;
2289         if (!is_implicit) {
2290             props.type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER;
2291         }
2292     } else {
2293         result = VK_ERROR_INITIALIZATION_FAILED;
2294         goto out;
2295     }
2296 
2297     // Parse api_version
2298 
2299     result = loader_parse_json_string(layer_node, "api_version", &api_version);
2300     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2301     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2302         loader_log(
2303             inst, VULKAN_LOADER_WARN_BIT, 0,
2304             "Layer located at %s didn't find required layer value \"api_version\" in manifest JSON file, skipping this layer",
2305             filename);
2306         goto out;
2307     }
2308 
2309     props.info.specVersion = loader_parse_version_string(api_version);
2310 
2311     // Make sure the layer's manifest doesn't contain a non zero variant value
2312     if (VK_API_VERSION_VARIANT(props.info.specVersion) != 0) {
2313         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2314                    "Layer \"%s\" has an \'api_version\' field which contains a non-zero variant value of %d. "
2315                    " Skipping Layer.",
2316                    props.info.layerName, VK_API_VERSION_VARIANT(props.info.specVersion));
2317         result = VK_ERROR_INITIALIZATION_FAILED;
2318         goto out;
2319     }
2320 
2321     // Parse implementation_version
2322 
2323     result = loader_parse_json_string(layer_node, "implementation_version", &implementation_version);
2324     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2325     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2326         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2327                    "Layer located at %s didn't find required layer value \"implementation_version\" in manifest JSON file, "
2328                    "skipping this layer",
2329                    filename);
2330         goto out;
2331     }
2332     props.info.implementationVersion = atoi(implementation_version);
2333 
2334     // Parse description
2335 
2336     result = loader_parse_json_string_to_existing_str(inst, layer_node, "description", VK_MAX_EXTENSION_NAME_SIZE,
2337                                                       props.info.description);
2338     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2339     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2340         loader_log(
2341             inst, VULKAN_LOADER_WARN_BIT, 0,
2342             "Layer located at %s didn't find required layer value \"description\" in manifest JSON file, skipping this layer",
2343             filename);
2344         goto out;
2345     }
2346 
2347     // Parse library_path
2348 
2349     // Library path no longer required unless component_layers is also not defined
2350     cJSON *library_path = loader_cJSON_GetObjectItem(layer_node, "library_path");
2351 
2352     if (NULL != library_path) {
2353         if (NULL != loader_cJSON_GetObjectItem(layer_node, "component_layers")) {
2354             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2355                        "Indicating meta-layer-specific component_layers, but also defining layer library path.  Both are not "
2356                        "compatible, so skipping this layer");
2357             result = VK_ERROR_INITIALIZATION_FAILED;
2358             goto out;
2359         }
2360 
2361         result = loader_copy_to_new_str(inst, filename, &props.manifest_file_name);
2362         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2363 
2364         char *library_path_str = loader_cJSON_Print(library_path);
2365         if (NULL == library_path_str) {
2366             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2367                        "Skipping layer due to problem accessing the library_path value in manifest JSON file %s", filename);
2368             result = VK_ERROR_OUT_OF_HOST_MEMORY;
2369             goto out;
2370         }
2371 
2372         // This function takes ownership of library_path_str - so we don't need to clean it up
2373         result = combine_manifest_directory_and_library_path(inst, library_path_str, filename, &props.lib_name);
2374         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2375     }
2376 
2377     // Parse component_layers
2378 
2379     if (NULL == library_path) {
2380         if (!loader_check_version_meets_required(LOADER_VERSION_1_1_0, version)) {
2381             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2382                        "Indicating meta-layer-specific component_layers, but using older JSON file version.");
2383         }
2384 
2385         result = loader_parse_json_array_of_strings(inst, layer_node, "component_layers", &(props.component_layer_names));
2386         if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2387             goto out;
2388         }
2389         if (VK_ERROR_INITIALIZATION_FAILED == result) {
2390             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2391                        "Layer missing both library_path and component_layers fields.  One or the other MUST be defined.  Skipping "
2392                        "this layer");
2393             goto out;
2394         }
2395         // This is now, officially, a meta-layer
2396         props.type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER;
2397         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Encountered meta-layer \"%s\"",
2398                    props.info.layerName);
2399     }
2400 
2401     // Parse blacklisted_layers
2402 
2403     if (props.is_override) {
2404         result = loader_parse_json_array_of_strings(inst, layer_node, "blacklisted_layers", &(props.blacklist_layer_names));
2405         if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2406             goto out;
2407         }
2408     }
2409 
2410     // Parse override_paths
2411 
2412     result = loader_parse_json_array_of_strings(inst, layer_node, "override_paths", &(props.override_paths));
2413     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2414         goto out;
2415     }
2416     if (NULL != props.override_paths.list && !loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2417         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2418                    "Indicating meta-layer-specific override paths, but using older JSON file version.");
2419     }
2420 
2421     // Parse disable_environment
2422 
2423     if (is_implicit) {
2424         cJSON *disable_environment = loader_cJSON_GetObjectItem(layer_node, "disable_environment");
2425         if (disable_environment == NULL) {
2426             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2427                        "Didn't find required layer object disable_environment in manifest JSON file, skipping this layer");
2428             result = VK_ERROR_INITIALIZATION_FAILED;
2429             goto out;
2430         }
2431 
2432         if (!disable_environment->child || disable_environment->child->type != cJSON_String) {
2433             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2434                        "Didn't find required layer child value disable_environment in manifest JSON file, skipping this layer "
2435                        "(Policy #LLP_LAYER_9)");
2436             result = VK_ERROR_INITIALIZATION_FAILED;
2437             goto out;
2438         }
2439         result = loader_copy_to_new_str(inst, disable_environment->child->string, &(props.disable_env_var.name));
2440         if (VK_SUCCESS != result) goto out;
2441         result = loader_copy_to_new_str(inst, disable_environment->child->valuestring, &(props.disable_env_var.value));
2442         if (VK_SUCCESS != result) goto out;
2443     }
2444 
2445     // Now get all optional items and objects and put in list:
2446     // functions
2447     // instance_extensions
2448     // device_extensions
2449     // enable_environment (implicit layers only)
2450     // library_arch
2451 
2452     // Layer interface functions
2453     //    vkGetInstanceProcAddr
2454     //    vkGetDeviceProcAddr
2455     //    vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0)
2456     cJSON *functions = loader_cJSON_GetObjectItem(layer_node, "functions");
2457     if (functions != NULL) {
2458         if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2459             result = loader_parse_json_string(functions, "vkNegotiateLoaderLayerInterfaceVersion",
2460                                               &props.functions.str_negotiate_interface);
2461             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2462         }
2463         result = loader_parse_json_string(functions, "vkGetInstanceProcAddr", &props.functions.str_gipa);
2464         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2465 
2466         if (props.functions.str_gipa && loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2467             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2468                        "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON "
2469                        "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2470                        "compatibility reasons it may be desirable to continue using the deprecated tag.",
2471                        props.info.layerName);
2472         }
2473 
2474         result = loader_parse_json_string(functions, "vkGetDeviceProcAddr", &props.functions.str_gdpa);
2475         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2476 
2477         if (props.functions.str_gdpa && loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2478             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2479                        "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON "
2480                        "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2481                        "compatibility reasons it may be desirable to continue using the deprecated tag.",
2482                        props.info.layerName);
2483         }
2484     }
2485 
2486     // instance_extensions
2487     //   array of {
2488     //     name
2489     //     spec_version
2490     //   }
2491 
2492     cJSON *instance_extensions = loader_cJSON_GetObjectItem(layer_node, "instance_extensions");
2493     if (instance_extensions != NULL) {
2494         int count = loader_cJSON_GetArraySize(instance_extensions);
2495         for (int i = 0; i < count; i++) {
2496             VkExtensionProperties ext_prop = {0};
2497             cJSON *ext_item = loader_cJSON_GetArrayItem(instance_extensions, i);
2498             result = loader_parse_json_string_to_existing_str(inst, ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE,
2499                                                               ext_prop.extensionName);
2500             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2501             if (result == VK_ERROR_INITIALIZATION_FAILED) continue;
2502             char *spec_version = NULL;
2503             result = loader_parse_json_string(ext_item, "spec_version", &spec_version);
2504             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2505             if (NULL != spec_version) {
2506                 ext_prop.specVersion = atoi(spec_version);
2507             }
2508             loader_instance_heap_free(inst, spec_version);
2509             bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop);
2510             if (!ext_unsupported) {
2511                 loader_add_to_ext_list(inst, &props.instance_extension_list, 1, &ext_prop);
2512             }
2513         }
2514     }
2515 
2516     // device_extensions
2517     //   array of {
2518     //     name
2519     //     spec_version
2520     //     entrypoints
2521     //   }
2522     cJSON *device_extensions = loader_cJSON_GetObjectItem(layer_node, "device_extensions");
2523     if (device_extensions != NULL) {
2524         int count = loader_cJSON_GetArraySize(device_extensions);
2525         for (int i = 0; i < count; i++) {
2526             VkExtensionProperties ext_prop = {0};
2527 
2528             cJSON *ext_item = loader_cJSON_GetArrayItem(device_extensions, i);
2529 
2530             result = loader_parse_json_string_to_existing_str(inst, ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE,
2531                                                               ext_prop.extensionName);
2532             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2533 
2534             char *spec_version = NULL;
2535             result = loader_parse_json_string(ext_item, "spec_version", &spec_version);
2536             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2537             if (NULL != spec_version) {
2538                 ext_prop.specVersion = atoi(spec_version);
2539             }
2540             loader_instance_heap_free(inst, spec_version);
2541 
2542             cJSON *entrypoints = loader_cJSON_GetObjectItem(ext_item, "entrypoints");
2543             if (entrypoints == NULL) {
2544                 result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, NULL);
2545                 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2546                 continue;
2547             }
2548 
2549             struct loader_string_list entrys = {0};
2550             result = loader_parse_json_array_of_strings(inst, ext_item, "entrypoints", &entrys);
2551             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2552             result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, &entrys);
2553             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2554         }
2555     }
2556     if (is_implicit) {
2557         cJSON *enable_environment = loader_cJSON_GetObjectItem(layer_node, "enable_environment");
2558 
2559         // enable_environment is optional
2560         if (enable_environment && enable_environment->child && enable_environment->child->type == cJSON_String) {
2561             result = loader_copy_to_new_str(inst, enable_environment->child->string, &(props.enable_env_var.name));
2562             if (VK_SUCCESS != result) goto out;
2563             result = loader_copy_to_new_str(inst, enable_environment->child->valuestring, &(props.enable_env_var.value));
2564             if (VK_SUCCESS != result) goto out;
2565         }
2566     }
2567 
2568     // Read in the pre-instance stuff
2569     cJSON *pre_instance = loader_cJSON_GetObjectItem(layer_node, "pre_instance_functions");
2570     if (NULL != pre_instance) {
2571         // Supported versions started in 1.1.2, so anything newer
2572         if (!loader_check_version_meets_required(loader_combine_version(1, 1, 2), version)) {
2573             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
2574                        "Found pre_instance_functions section in layer from \"%s\". This section is only valid in manifest version "
2575                        "1.1.2 or later. The section will be ignored",
2576                        filename);
2577         } else if (!is_implicit) {
2578             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2579                        "Found pre_instance_functions section in explicit layer from \"%s\". This section is only valid in implicit "
2580                        "layers. The section will be ignored",
2581                        filename);
2582         } else {
2583             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceExtensionProperties",
2584                                               &props.pre_instance_functions.enumerate_instance_extension_properties);
2585             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2586 
2587             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceLayerProperties",
2588                                               &props.pre_instance_functions.enumerate_instance_layer_properties);
2589             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2590 
2591             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceVersion",
2592                                               &props.pre_instance_functions.enumerate_instance_version);
2593             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2594         }
2595     }
2596 
2597     if (loader_cJSON_GetObjectItem(layer_node, "app_keys")) {
2598         if (!props.is_override) {
2599             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2600                        "Layer %s contains app_keys, but any app_keys can only be provided by the override meta layer. "
2601                        "These will be ignored.",
2602                        props.info.layerName);
2603         }
2604 
2605         result = loader_parse_json_array_of_strings(inst, layer_node, "app_keys", &props.app_key_paths);
2606         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2607     }
2608 
2609     char *library_arch = NULL;
2610     result = loader_parse_json_string(layer_node, "library_arch", &library_arch);
2611     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2612     if (library_arch != NULL) {
2613         if ((strncmp(library_arch, "32", 2) == 0 && sizeof(void *) != 4) ||
2614             (strncmp(library_arch, "64", 2) == 0 && sizeof(void *) != 8)) {
2615             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2616                        "Layer library architecture doesn't match the current running architecture, skipping this layer");
2617             loader_instance_heap_free(inst, library_arch);
2618             result = VK_ERROR_INITIALIZATION_FAILED;
2619             goto out;
2620         }
2621         loader_instance_heap_free(inst, library_arch);
2622     }
2623 
2624     result = VK_SUCCESS;
2625 
2626 out:
2627     // Try to append the layer property
2628     if (VK_SUCCESS == result) {
2629         result = loader_append_layer_property(inst, layer_instance_list, &props);
2630     }
2631     // If appending fails - free all the memory allocated in it
2632     if (VK_SUCCESS != result) {
2633         loader_free_layer_properties(inst, &props);
2634     }
2635     loader_instance_heap_free(inst, type);
2636     loader_instance_heap_free(inst, api_version);
2637     loader_instance_heap_free(inst, implementation_version);
2638     return result;
2639 }
2640 
is_valid_layer_json_version(const loader_api_version *layer_json)2641 bool is_valid_layer_json_version(const loader_api_version *layer_json) {
2642     // Supported versions are: 1.0.0, 1.0.1, 1.1.0 - 1.1.2, and 1.2.0 - 1.2.1.
2643     if ((layer_json->major == 1 && layer_json->minor == 2 && layer_json->patch < 2) ||
2644         (layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) ||
2645         (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) {
2646         return true;
2647     }
2648     return false;
2649 }
2650 
2651 // Given a cJSON struct (json) of the top level JSON object from layer manifest
2652 // file, add entry to the layer_list. Fill out the layer_properties in this list
2653 // entry from the input cJSON object.
2654 //
2655 // \returns
2656 // void
2657 // layer_list has a new entry and initialized accordingly.
2658 // If the json input object does not have all the required fields no entry
2659 // is added to the list.
loader_add_layer_properties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, cJSON *json, bool is_implicit, char *filename)2660 VkResult loader_add_layer_properties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, cJSON *json,
2661                                      bool is_implicit, char *filename) {
2662     // The following Fields in layer manifest file that are required:
2663     //   - "file_format_version"
2664     //   - If more than one "layer" object are used, then the "layers" array is
2665     //     required
2666     VkResult result = VK_ERROR_INITIALIZATION_FAILED;
2667     cJSON *item, *layers_node, *layer_node;
2668     loader_api_version json_version = {0, 0, 0};
2669     char *file_vers = NULL;
2670     // Make sure sure the top level json value is an object
2671     if (!json || json->type != 6) {
2672         goto out;
2673     }
2674     item = loader_cJSON_GetObjectItem(json, "file_format_version");
2675     if (item == NULL) {
2676         goto out;
2677     }
2678     file_vers = loader_cJSON_PrintUnformatted(item);
2679     if (NULL == file_vers) {
2680         result = VK_ERROR_OUT_OF_HOST_MEMORY;
2681         goto out;
2682     }
2683     loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, "Found manifest file %s (file version %s)", filename, file_vers);
2684     // Get the major/minor/and patch as integers for easier comparison
2685     json_version = loader_make_full_version(loader_parse_version_string(file_vers));
2686 
2687     if (!is_valid_layer_json_version(&json_version)) {
2688         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2689                    "loader_add_layer_properties: %s has unknown layer manifest file version %d.%d.%d.  May cause errors.", filename,
2690                    json_version.major, json_version.minor, json_version.patch);
2691     }
2692 
2693     // If "layers" is present, read in the array of layer objects
2694     layers_node = loader_cJSON_GetObjectItem(json, "layers");
2695     if (layers_node != NULL) {
2696         int numItems = loader_cJSON_GetArraySize(layers_node);
2697         // Supported versions started in 1.0.1, so anything newer
2698         if (!loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2699             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2700                        "loader_add_layer_properties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting "
2701                        "version %s",
2702                        filename, file_vers);
2703         }
2704         for (int curLayer = 0; curLayer < numItems; curLayer++) {
2705             layer_node = loader_cJSON_GetArrayItem(layers_node, curLayer);
2706             if (layer_node == NULL) {
2707                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2708                            "loader_add_layer_properties: Can not find 'layers' array element %d object in manifest JSON file %s.  "
2709                            "Skipping this file",
2710                            curLayer, filename);
2711                 goto out;
2712             }
2713             result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename);
2714         }
2715     } else {
2716         // Otherwise, try to read in individual layers
2717         layer_node = loader_cJSON_GetObjectItem(json, "layer");
2718         if (layer_node == NULL) {
2719             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2720                        "loader_add_layer_properties: Can not find 'layer' object in manifest JSON file %s.  Skipping this file.",
2721                        filename);
2722             goto out;
2723         }
2724         // Loop through all "layer" objects in the file to get a count of them
2725         // first.
2726         uint16_t layer_count = 0;
2727         cJSON *tempNode = layer_node;
2728         do {
2729             tempNode = tempNode->next;
2730             layer_count++;
2731         } while (tempNode != NULL);
2732 
2733         // Throw a warning if we encounter multiple "layer" objects in file
2734         // versions newer than 1.0.0.  Having multiple objects with the same
2735         // name at the same level is actually a JSON standard violation.
2736         if (layer_count > 1 && loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2737             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2738                        "loader_add_layer_properties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\".  "
2739                        "Please use 'layers' : [] array instead in %s.",
2740                        filename);
2741         } else {
2742             do {
2743                 result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename);
2744                 layer_node = layer_node->next;
2745             } while (layer_node != NULL);
2746         }
2747     }
2748 
2749 out:
2750     loader_instance_heap_free(inst, file_vers);
2751 
2752     return result;
2753 }
2754 
determine_data_file_path_size(const char *cur_path, size_t relative_path_size)2755 size_t determine_data_file_path_size(const char *cur_path, size_t relative_path_size) {
2756     size_t path_size = 0;
2757 
2758     if (NULL != cur_path) {
2759         // For each folder in cur_path, (detected by finding additional
2760         // path separators in the string) we need to add the relative path on
2761         // the end.  Plus, leave an additional two slots on the end to add an
2762         // additional directory slash and path separator if needed
2763         path_size += strlen(cur_path) + relative_path_size + 2;
2764         for (const char *x = cur_path; *x; ++x) {
2765             if (*x == PATH_SEPARATOR) {
2766                 path_size += relative_path_size + 2;
2767             }
2768         }
2769     }
2770 
2771     return path_size;
2772 }
2773 
copy_data_file_info(const char *cur_path, const char *relative_path, size_t relative_path_size, char **output_path)2774 void copy_data_file_info(const char *cur_path, const char *relative_path, size_t relative_path_size, char **output_path) {
2775     if (NULL != cur_path) {
2776         uint32_t start = 0;
2777         uint32_t stop = 0;
2778         char *cur_write = *output_path;
2779 
2780         while (cur_path[start] != '\0') {
2781             while (cur_path[start] == PATH_SEPARATOR) {
2782                 start++;
2783             }
2784             stop = start;
2785             while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') {
2786                 stop++;
2787             }
2788             const size_t s = stop - start;
2789             if (s) {
2790                 memcpy(cur_write, &cur_path[start], s);
2791                 cur_write += s;
2792 
2793                 // If this is a specific JSON file, just add it and don't add any
2794                 // relative path or directory symbol to it.
2795                 if (!is_json(cur_write - 5, s)) {
2796                     // Add the relative directory if present.
2797                     if (relative_path_size > 0) {
2798                         // If last symbol written was not a directory symbol, add it.
2799                         if (*(cur_write - 1) != DIRECTORY_SYMBOL) {
2800                             *cur_write++ = DIRECTORY_SYMBOL;
2801                         }
2802                         memcpy(cur_write, relative_path, relative_path_size);
2803                         cur_write += relative_path_size;
2804                     }
2805                 }
2806 
2807                 *cur_write++ = PATH_SEPARATOR;
2808                 start = stop;
2809             }
2810         }
2811         *output_path = cur_write;
2812     }
2813 }
2814 
2815 // If the file found is a manifest file name, add it to the out_files manifest list.
add_if_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_string_list *out_files)2816 VkResult add_if_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_string_list *out_files) {
2817     VkResult vk_result = VK_SUCCESS;
2818 
2819     assert(NULL != file_name && "add_if_manifest_file: Received NULL pointer for file_name");
2820     assert(NULL != out_files && "add_if_manifest_file: Received NULL pointer for out_files");
2821 
2822     // Look for files ending with ".json" suffix
2823     size_t name_len = strlen(file_name);
2824     const char *name_suffix = file_name + name_len - 5;
2825     if (!is_json(name_suffix, name_len)) {
2826         // Use incomplete to indicate invalid name, but to keep going.
2827         vk_result = VK_INCOMPLETE;
2828         goto out;
2829     }
2830 
2831     vk_result = copy_str_to_string_list(inst, out_files, file_name, name_len);
2832 
2833 out:
2834 
2835     return vk_result;
2836 }
2837 
2838 // Add any files found in the search_path.  If any path in the search path points to a specific JSON, attempt to
2839 // only open that one JSON.  Otherwise, if the path is a folder, search the folder for JSON files.
add_data_files(const struct loader_instance *inst, char *search_path, struct loader_string_list *out_files, bool use_first_found_manifest)2840 VkResult add_data_files(const struct loader_instance *inst, char *search_path, struct loader_string_list *out_files,
2841                         bool use_first_found_manifest) {
2842     VkResult vk_result = VK_SUCCESS;
2843     DIR *dir_stream = NULL;
2844     struct dirent *dir_entry;
2845     char *cur_file;
2846     char *next_file;
2847     char *name;
2848     char full_path[2048];
2849 #if !defined(_WIN32)
2850     char temp_path[2048];
2851 #endif
2852 
2853     // Now, parse the paths
2854     next_file = search_path;
2855     while (NULL != next_file && *next_file != '\0') {
2856         name = NULL;
2857         cur_file = next_file;
2858         next_file = loader_get_next_path(cur_file);
2859 
2860         // Is this a JSON file, then try to open it.
2861         size_t len = strlen(cur_file);
2862         if (is_json(cur_file + len - 5, len)) {
2863 #if defined(_WIN32)
2864             name = cur_file;
2865 #elif COMMON_UNIX_PLATFORMS
2866             // Only Linux has relative paths, make a copy of location so it isn't modified
2867             size_t str_len;
2868             if (NULL != next_file) {
2869                 str_len = next_file - cur_file + 1;
2870             } else {
2871                 str_len = strlen(cur_file) + 1;
2872             }
2873             if (str_len > sizeof(temp_path)) {
2874                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "add_data_files: Path to %s too long", cur_file);
2875                 continue;
2876             }
2877             strncpy(temp_path, cur_file, str_len);
2878             name = temp_path;
2879 #else
2880 #warning add_data_files must define relative path copy for this platform
2881 #endif
2882             loader_get_fullpath(cur_file, name, sizeof(full_path), full_path);
2883             name = full_path;
2884 
2885             VkResult local_res;
2886             local_res = add_if_manifest_file(inst, name, out_files);
2887 
2888             // Incomplete means this was not a valid data file.
2889             if (local_res == VK_INCOMPLETE) {
2890                 continue;
2891             } else if (local_res != VK_SUCCESS) {
2892                 vk_result = local_res;
2893                 break;
2894             }
2895         } else {  // Otherwise, treat it as a directory
2896             dir_stream = loader_opendir(inst, cur_file);
2897             if (NULL == dir_stream) {
2898                 continue;
2899             }
2900             while (1) {
2901                 dir_entry = readdir(dir_stream);
2902                 if (NULL == dir_entry) {
2903                     break;
2904                 }
2905 
2906                 name = &(dir_entry->d_name[0]);
2907                 loader_get_fullpath(name, cur_file, sizeof(full_path), full_path);
2908                 name = full_path;
2909 
2910                 VkResult local_res;
2911                 local_res = add_if_manifest_file(inst, name, out_files);
2912 
2913                 // Incomplete means this was not a valid data file.
2914                 if (local_res == VK_INCOMPLETE) {
2915                     continue;
2916                 } else if (local_res != VK_SUCCESS) {
2917                     vk_result = local_res;
2918                     break;
2919                 }
2920             }
2921             loader_closedir(inst, dir_stream);
2922             if (vk_result != VK_SUCCESS) {
2923                 goto out;
2924             }
2925         }
2926         if (use_first_found_manifest && out_files->count > 0) {
2927             break;
2928         }
2929     }
2930 
2931 out:
2932 
2933     return vk_result;
2934 }
2935 
2936 // Look for data files in the provided paths, but first check the environment override to determine if we should use that
2937 // instead.
read_data_files_in_search_paths(const struct loader_instance *inst, enum loader_data_files_type manifest_type, const char *path_override, bool *override_active, struct loader_string_list *out_files)2938 VkResult read_data_files_in_search_paths(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
2939                                          const char *path_override, bool *override_active, struct loader_string_list *out_files) {
2940     VkResult vk_result = VK_SUCCESS;
2941     char *override_env = NULL;
2942     const char *override_path = NULL;
2943     char *additional_env = NULL;
2944     size_t search_path_size = 0;
2945     char *search_path = NULL;
2946     char *cur_path_ptr = NULL;
2947     bool use_first_found_manifest = false;
2948 #if COMMON_UNIX_PLATFORMS
2949     char *relative_location = NULL;  // Only used on unix platforms
2950     size_t rel_size = 0;             // unused in windows, dont declare so no compiler warnings are generated
2951 #endif
2952 
2953 #if defined(_WIN32)
2954     char *package_path = NULL;
2955 #elif COMMON_UNIX_PLATFORMS
2956     // Determine how much space is needed to generate the full search path
2957     // for the current manifest files.
2958     char *xdg_config_home = loader_secure_getenv("XDG_CONFIG_HOME", inst);
2959     char *xdg_config_dirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst);
2960 
2961 #if !defined(__Fuchsia__) && !defined(__QNX__) && !defined(__OHOS__)
2962     if (NULL == xdg_config_dirs || '\0' == xdg_config_dirs[0]) {
2963         xdg_config_dirs = FALLBACK_CONFIG_DIRS;
2964     }
2965 #endif
2966 
2967     char *xdg_data_home = loader_secure_getenv("XDG_DATA_HOME", inst);
2968     char *xdg_data_dirs = loader_secure_getenv("XDG_DATA_DIRS", inst);
2969 
2970 #if !defined(__Fuchsia__) && !defined(__QNX__) && !defined(__OHOS__)
2971     if (NULL == xdg_data_dirs || '\0' == xdg_data_dirs[0]) {
2972         xdg_data_dirs = FALLBACK_DATA_DIRS;
2973     }
2974 #endif
2975 
2976     char *home = NULL;
2977     char *default_data_home = NULL;
2978     char *default_config_home = NULL;
2979     char *home_data_dir = NULL;
2980     char *home_config_dir = NULL;
2981 
2982     // Only use HOME if XDG_DATA_HOME is not present on the system
2983     home = loader_secure_getenv("HOME", inst);
2984     if (home != NULL) {
2985         if (NULL == xdg_config_home || '\0' == xdg_config_home[0]) {
2986             const char config_suffix[] = "/.config";
2987             size_t default_config_home_len = strlen(home) + sizeof(config_suffix) + 1;
2988             default_config_home = loader_instance_heap_calloc(inst, default_config_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2989             if (default_config_home == NULL) {
2990                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2991                 goto out;
2992             }
2993             strncpy(default_config_home, home, default_config_home_len);
2994             strncat(default_config_home, config_suffix, default_config_home_len);
2995         }
2996         if (NULL == xdg_data_home || '\0' == xdg_data_home[0]) {
2997             const char data_suffix[] = "/.local/share";
2998             size_t default_data_home_len = strlen(home) + sizeof(data_suffix) + 1;
2999             default_data_home = loader_instance_heap_calloc(inst, default_data_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3000             if (default_data_home == NULL) {
3001                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3002                 goto out;
3003             }
3004             strncpy(default_data_home, home, default_data_home_len);
3005             strncat(default_data_home, data_suffix, default_data_home_len);
3006         }
3007     }
3008 
3009     if (NULL != default_config_home) {
3010         home_config_dir = default_config_home;
3011     } else {
3012         home_config_dir = xdg_config_home;
3013     }
3014     if (NULL != default_data_home) {
3015         home_data_dir = default_data_home;
3016     } else {
3017         home_data_dir = xdg_data_home;
3018     }
3019 
3020 #if defined(__OHOS__)
3021     char *debug_layer_name = loader_secure_getenv("debug.graphic.debug_layer", inst); // squid squidsubcapture
3022     char *debug_hap_name = loader_secure_getenv("debug.graphic.debug_hap", inst);
3023     char *debug_layer_json_path = NULL;
3024 
3025     bool currentProcessEnableDebugLayer = false;
3026     if (NULL != debug_layer_name && '\0' != debug_layer_name[0] && InitBundleInfo(debug_hap_name)) {
3027         currentProcessEnableDebugLayer = true;
3028         debug_layer_json_path = loader_secure_getenv("debug.graphic.vklayer_json_path",inst);
3029         if (NULL == debug_layer_json_path || '\0' == debug_layer_json_path[0]){
3030             const char default_json_path[] = "/data/storage/el2/base/haps/entry/files/";
3031             const char json_suffix[] = ".json";
3032             size_t debug_layer_json_path_len = strlen(default_json_path) + sizeof(debug_layer_name) + sizeof(json_suffix) +1;
3033             debug_layer_json_path = loader_instance_heap_calloc(inst,debug_layer_json_path_len,VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3034             if(debug_layer_json_path == NULL){
3035                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3036                 goto out;
3037             }
3038             strncpy(debug_layer_json_path,default_json_path,debug_layer_json_path_len);
3039             strncat(debug_layer_json_path,debug_layer_name,debug_layer_json_path_len);
3040             strncat(debug_layer_json_path,json_suffix,debug_layer_json_path_len);
3041         }
3042         loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "OHOS:: debug_layer_json_path: %s", debug_layer_json_path);
3043     }
3044 #endif
3045 
3046 #else
3047 #warning read_data_files_in_search_paths unsupported platform
3048 #endif
3049 
3050     switch (manifest_type) {
3051         case LOADER_DATA_FILE_MANIFEST_DRIVER:
3052             override_env = loader_secure_getenv(VK_DRIVER_FILES_ENV_VAR, inst);
3053             if (NULL == override_env) {
3054                 // Not there, so fall back to the old name
3055                 override_env = loader_secure_getenv(VK_ICD_FILENAMES_ENV_VAR, inst);
3056             }
3057             additional_env = loader_secure_getenv(VK_ADDITIONAL_DRIVER_FILES_ENV_VAR, inst);
3058 #if COMMON_UNIX_PLATFORMS
3059             relative_location = VK_DRIVERS_INFO_RELATIVE_DIR;
3060 #endif
3061 #if defined(_WIN32)
3062             package_path = windows_get_app_package_manifest_path(inst);
3063 #endif
3064             break;
3065         case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3066 #if COMMON_UNIX_PLATFORMS
3067             relative_location = VK_ILAYERS_INFO_RELATIVE_DIR;
3068 #endif
3069             break;
3070         case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3071             override_env = loader_secure_getenv(VK_LAYER_PATH_ENV_VAR, inst);
3072             additional_env = loader_secure_getenv(VK_ADDITIONAL_LAYER_PATH_ENV_VAR, inst);
3073 #if COMMON_UNIX_PLATFORMS
3074             relative_location = VK_ELAYERS_INFO_RELATIVE_DIR;
3075 #endif
3076             break;
3077         default:
3078             assert(false && "Shouldn't get here!");
3079             break;
3080     }
3081 
3082     // Log a message when VK_LAYER_PATH is set but the override layer paths take priority
3083     if (manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER && NULL != override_env && NULL != path_override) {
3084         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
3085                    "Ignoring VK_LAYER_PATH. The Override layer is active and has override paths set, which takes priority. "
3086                    "VK_LAYER_PATH is set to %s",
3087                    override_env);
3088     }
3089 
3090     if (path_override != NULL) {
3091         override_path = path_override;
3092     } else if (override_env != NULL) {
3093         override_path = override_env;
3094     }
3095 
3096     // Add two by default for NULL terminator and one path separator on end (just in case)
3097     search_path_size = 2;
3098 
3099     // If there's an override, use that (and the local folder if required) and nothing else
3100     if (NULL != override_path) {
3101         // Local folder and null terminator
3102         search_path_size += strlen(override_path) + 2;
3103     } else {
3104         // Add the size of any additional search paths defined in the additive environment variable
3105         if (NULL != additional_env) {
3106             search_path_size += determine_data_file_path_size(additional_env, 0) + 2;
3107 #if defined(_WIN32)
3108         }
3109         if (NULL != package_path) {
3110             search_path_size += determine_data_file_path_size(package_path, 0) + 2;
3111         }
3112         if (search_path_size == 2) {
3113             goto out;
3114         }
3115 #elif COMMON_UNIX_PLATFORMS
3116         }
3117 
3118         // Add the general search folders (with the appropriate relative folder added)
3119         rel_size = strlen(relative_location);
3120         if (rel_size > 0) {
3121 #if defined(__APPLE__)
3122             search_path_size += MAXPATHLEN;
3123 #endif
3124             // Only add the home folders if defined
3125             if (NULL != home_config_dir) {
3126                 search_path_size += determine_data_file_path_size(home_config_dir, rel_size);
3127             }
3128             search_path_size += determine_data_file_path_size(xdg_config_dirs, rel_size);
3129             search_path_size += determine_data_file_path_size(SYSCONFDIR, rel_size);
3130 #if defined(EXTRASYSCONFDIR)
3131             search_path_size += determine_data_file_path_size(EXTRASYSCONFDIR, rel_size);
3132 #endif
3133             // Only add the home folders if defined
3134             if (NULL != home_data_dir) {
3135                 search_path_size += determine_data_file_path_size(home_data_dir, rel_size);
3136             }
3137             search_path_size += determine_data_file_path_size(xdg_data_dirs, rel_size);
3138 #if defined (__OHOS__)
3139             if(currentProcessEnableDebugLayer) {
3140                 search_path_size += determine_data_file_path_size(debug_layer_json_path, rel_size);
3141             }
3142             search_path_size += determine_data_file_path_size("/system/etc/vulkan/swapchain", rel_size);
3143 #endif
3144         }
3145 #else
3146 #warning read_data_files_in_search_paths unsupported platform
3147 #endif
3148     }
3149 
3150     // Allocate the required space
3151     search_path = loader_instance_heap_calloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3152     if (NULL == search_path) {
3153         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
3154                    "read_data_files_in_search_paths: Failed to allocate space for search path of length %d",
3155                    (uint32_t)search_path_size);
3156         vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3157         goto out;
3158     }
3159 
3160     cur_path_ptr = search_path;
3161 
3162     // Add the remaining paths to the list
3163     if (NULL != override_path) {
3164         size_t override_path_len = strlen(override_path);
3165         loader_strncpy(cur_path_ptr, search_path_size, override_path, override_path_len);
3166         cur_path_ptr += override_path_len;
3167     } else {
3168         // Add any additional search paths defined in the additive environment variable
3169         if (NULL != additional_env) {
3170             copy_data_file_info(additional_env, NULL, 0, &cur_path_ptr);
3171         }
3172 
3173 #if defined(_WIN32)
3174         if (NULL != package_path) {
3175             copy_data_file_info(package_path, NULL, 0, &cur_path_ptr);
3176         }
3177 #elif COMMON_UNIX_PLATFORMS
3178         if (rel_size > 0) {
3179 #if defined(__APPLE__)
3180             // Add the bundle's Resources dir to the beginning of the search path.
3181             // Looks for manifests in the bundle first, before any system directories.
3182             // This also appears to work unmodified for iOS, it finds the app bundle on the devices
3183             // file system. (RSW)
3184             CFBundleRef main_bundle = CFBundleGetMainBundle();
3185             if (NULL != main_bundle) {
3186                 CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle);
3187                 if (NULL != ref) {
3188                     if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) {
3189                         cur_path_ptr += strlen(cur_path_ptr);
3190                         *cur_path_ptr++ = DIRECTORY_SYMBOL;
3191                         memcpy(cur_path_ptr, relative_location, rel_size);
3192                         cur_path_ptr += rel_size;
3193                         *cur_path_ptr++ = PATH_SEPARATOR;
3194                         if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3195                             use_first_found_manifest = true;
3196                         }
3197                     }
3198                     CFRelease(ref);
3199                 }
3200             }
3201 #endif  // __APPLE__
3202 
3203             // Only add the home folders if not NULL
3204             if (NULL != home_config_dir) {
3205                 copy_data_file_info(home_config_dir, relative_location, rel_size, &cur_path_ptr);
3206             }
3207             copy_data_file_info(xdg_config_dirs, relative_location, rel_size, &cur_path_ptr);
3208             copy_data_file_info(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3209 #if defined(EXTRASYSCONFDIR)
3210             copy_data_file_info(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3211 #endif
3212 
3213             // Only add the home folders if not NULL
3214             if (NULL != home_data_dir) {
3215                 copy_data_file_info(home_data_dir, relative_location, rel_size, &cur_path_ptr);
3216             }
3217             copy_data_file_info(xdg_data_dirs, relative_location, rel_size, &cur_path_ptr);
3218 #if defined (__OHOS__)
3219             if(currentProcessEnableDebugLayer){
3220                 copy_data_file_info(debug_layer_json_path,relative_location,rel_size,&cur_path_ptr);
3221             }
3222             copy_data_file_info("/system/etc/vulkan/swapchain/",relative_location,rel_size,&cur_path_ptr);
3223 #endif
3224         }
3225 
3226         // Remove the last path separator
3227         --cur_path_ptr;
3228 
3229         assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size);
3230         *cur_path_ptr = '\0';
3231 #else
3232 #warning read_data_files_in_search_paths unsupported platform
3233 #endif
3234     }
3235 
3236     // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc.
3237     // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths.
3238     char path_sep_str[2] = {PATH_SEPARATOR, '\0'};
3239     size_t search_path_updated_size = strlen(search_path);
3240     for (size_t first = 0; first < search_path_updated_size;) {
3241         // If this is an empty path, erase it
3242         if (search_path[first] == PATH_SEPARATOR) {
3243             memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1);
3244             search_path_updated_size -= 1;
3245             continue;
3246         }
3247 
3248         size_t first_end = first + 1;
3249         first_end += strcspn(&search_path[first_end], path_sep_str);
3250         for (size_t second = first_end + 1; second < search_path_updated_size;) {
3251             size_t second_end = second + 1;
3252             second_end += strcspn(&search_path[second_end], path_sep_str);
3253             if (first_end - first == second_end - second &&
3254                 !strncmp(&search_path[first], &search_path[second], second_end - second)) {
3255                 // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path.
3256                 if (search_path[second_end] == PATH_SEPARATOR) {
3257                     second_end++;
3258                 }
3259                 memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1);
3260                 search_path_updated_size -= second_end - second;
3261             } else {
3262                 second = second_end + 1;
3263             }
3264         }
3265         first = first_end + 1;
3266     }
3267     search_path_size = search_path_updated_size;
3268 
3269     // Print out the paths being searched if debugging is enabled
3270     uint32_t log_flags = 0;
3271     if (search_path_size > 0) {
3272         char *tmp_search_path = loader_instance_heap_alloc(inst, search_path_size + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3273         if (NULL != tmp_search_path) {
3274             loader_strncpy(tmp_search_path, search_path_size + 1, search_path, search_path_size);
3275             tmp_search_path[search_path_size] = '\0';
3276             if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3277                 log_flags = VULKAN_LOADER_DRIVER_BIT;
3278                 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Searching for driver manifest files");
3279             } else {
3280                 log_flags = VULKAN_LOADER_LAYER_BIT;
3281                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "Searching for %s layer manifest files",
3282                            manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER ? "explicit" : "implicit");
3283             }
3284             loader_log(inst, log_flags, 0, "   In following locations:");
3285             char *cur_file;
3286             char *next_file = tmp_search_path;
3287             while (NULL != next_file && *next_file != '\0') {
3288                 cur_file = next_file;
3289                 next_file = loader_get_next_path(cur_file);
3290                 loader_log(inst, log_flags, 0, "      %s", cur_file);
3291             }
3292             loader_instance_heap_free(inst, tmp_search_path);
3293         }
3294     }
3295 
3296     // Now, parse the paths and add any manifest files found in them.
3297     vk_result = add_data_files(inst, search_path, out_files, use_first_found_manifest);
3298 
3299     if (log_flags != 0 && out_files->count > 0) {
3300         loader_log(inst, log_flags, 0, "   Found the following files:");
3301         for (uint32_t cur_file = 0; cur_file < out_files->count; ++cur_file) {
3302             loader_log(inst, log_flags, 0, "      %s", out_files->list[cur_file]);
3303         }
3304     } else {
3305         loader_log(inst, log_flags, 0, "   Found no files");
3306     }
3307 
3308     if (NULL != override_path) {
3309         *override_active = true;
3310     } else {
3311         *override_active = false;
3312     }
3313 
3314 out:
3315 
3316     loader_free_getenv(additional_env, inst);
3317     loader_free_getenv(override_env, inst);
3318 #if defined(_WIN32)
3319     loader_instance_heap_free(inst, package_path);
3320 #elif COMMON_UNIX_PLATFORMS
3321     loader_free_getenv(xdg_config_home, inst);
3322     loader_free_getenv(xdg_config_dirs, inst);
3323     loader_free_getenv(xdg_data_home, inst);
3324     loader_free_getenv(xdg_data_dirs, inst);
3325     loader_free_getenv(xdg_data_home, inst);
3326     loader_free_getenv(home, inst);
3327     loader_instance_heap_free(inst, default_data_home);
3328     loader_instance_heap_free(inst, default_config_home);
3329 #elif defined(__OHOS__)
3330     if(currentProcessEnableDebugLayer){
3331         loader_free_getenv(debug_layer_json_path, inst);
3332     }
3333     loader_free_getenv(debug_layer_name, inst);
3334     loader_free_getenv(debug_hap_name, inst);
3335 #else
3336 #warning read_data_files_in_search_paths unsupported platform
3337 #endif
3338 
3339     loader_instance_heap_free(inst, search_path);
3340 
3341     return vk_result;
3342 }
3343 
3344 // Find the Vulkan library manifest files.
3345 //
3346 // This function scans the appropriate locations for a list of JSON manifest files based on the
3347 // "manifest_type".  The location is interpreted as Registry path on Windows and a directory path(s)
3348 // on Linux.
3349 // "home_location" is an additional directory in the users home directory to look at. It is
3350 // expanded into the dir path $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location
3351 // depending on environment variables. This "home_location" is only used on Linux.
3352 //
3353 // \returns
3354 // VKResult
3355 // A string list of manifest files to be opened in out_files param.
3356 // List has a pointer to string for each manifest filename.
3357 // When done using the list in out_files, pointers should be freed.
3358 // Location or override  string lists can be either files or directories as
3359 // follows:
3360 //            | location | override
3361 // --------------------------------
3362 // Win ICD    | files    | files
3363 // Win Layer  | files    | dirs
3364 // Linux ICD  | dirs     | files
3365 // Linux Layer| dirs     | dirs
3366 
3367 VkResult loader_get_data_files(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
3368                                const char *path_override, struct loader_string_list *out_files) {
3369     VkResult res = VK_SUCCESS;
3370     bool override_active = false;
3371 
3372     // Free and init the out_files information so there's no false data left from uninitialized variables.
3373     free_string_list(inst, out_files);
3374 
3375     res = read_data_files_in_search_paths(inst, manifest_type, path_override, &override_active, out_files);
3376     if (VK_SUCCESS != res) {
3377         goto out;
3378     }
3379 
3380 #if defined(_WIN32)
3381     // Read the registry if the override wasn't active.
3382     if (!override_active) {
3383         bool warn_if_not_present = false;
3384         char *registry_location = NULL;
3385 
3386         switch (manifest_type) {
3387             default:
3388                 goto out;
3389             case LOADER_DATA_FILE_MANIFEST_DRIVER:
3390                 warn_if_not_present = true;
3391                 registry_location = VK_DRIVERS_INFO_REGISTRY_LOC;
3392                 break;
3393             case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3394                 registry_location = VK_ILAYERS_INFO_REGISTRY_LOC;
3395                 break;
3396             case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3397                 warn_if_not_present = true;
3398                 registry_location = VK_ELAYERS_INFO_REGISTRY_LOC;
3399                 break;
3400         }
3401         VkResult tmp_res =
3402             windows_read_data_files_in_registry(inst, manifest_type, warn_if_not_present, registry_location, out_files);
3403         // Only return an error if there was an error this time, and no manifest files from before.
3404         if (VK_SUCCESS != tmp_res && out_files->count == 0) {
3405             res = tmp_res;
3406             goto out;
3407         }
3408     }
3409 #endif
3410 
3411 out:
3412 
3413     if (VK_SUCCESS != res) {
3414         free_string_list(inst, out_files);
3415     }
3416 
3417     return res;
3418 }
3419 
3420 struct ICDManifestInfo {
3421     char *full_library_path;
3422     uint32_t version;
3423 };
3424 
3425 // Takes a json file, opens, reads, and parses an ICD Manifest out of it.
3426 // Should only return VK_SUCCESS, VK_ERROR_INCOMPATIBLE_DRIVER, or VK_ERROR_OUT_OF_HOST_MEMORY
3427 VkResult loader_parse_icd_manifest(const struct loader_instance *inst, char *file_str, struct ICDManifestInfo *icd,
3428                                    bool *skipped_portability_drivers) {
3429     VkResult res = VK_SUCCESS;
3430     cJSON *json = NULL;
3431     char *file_vers_str = NULL;
3432     char *library_arch_str = NULL;
3433     char *version_str = NULL;
3434 
3435     if (file_str == NULL) {
3436         goto out;
3437     }
3438 
3439     res = loader_get_json(inst, file_str, &json);
3440     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3441         goto out;
3442     }
3443     if (res != VK_SUCCESS || NULL == json) {
3444         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3445         goto out;
3446     }
3447 
3448     cJSON *item = loader_cJSON_GetObjectItem(json, "file_format_version");
3449     if (item == NULL) {
3450         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3451                    "loader_parse_icd_manifest: ICD JSON %s does not have a \'file_format_version\' field. Skipping ICD JSON.",
3452                    file_str);
3453         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3454         goto out;
3455     }
3456 
3457     file_vers_str = loader_cJSON_Print(item);
3458     if (NULL == file_vers_str) {
3459         // Only reason the print can fail is if there was an allocation issue
3460         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3461                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'file_format_version\' field. Skipping ICD JSON",
3462                    file_str);
3463         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3464         goto out;
3465     }
3466     loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers_str);
3467 
3468     // Get the version of the driver manifest
3469     loader_api_version json_file_version = loader_make_full_version(loader_parse_version_string(file_vers_str));
3470 
3471     // Loader only knows versions 1.0.0 and 1.0.1, anything above it is unknown
3472     if (loader_check_version_meets_required(loader_combine_version(1, 0, 2), json_file_version)) {
3473         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3474                    "loader_parse_icd_manifest: %s has unknown icd manifest file version %d.%d.%d. May cause errors.", file_str,
3475                    json_file_version.major, json_file_version.minor, json_file_version.patch);
3476     }
3477 
3478     cJSON *itemICD = loader_cJSON_GetObjectItem(json, "ICD");
3479     if (itemICD == NULL) {
3480         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3481                    "loader_parse_icd_manifest: Can not find \'ICD\' object in ICD JSON file %s. Skipping ICD JSON", file_str);
3482         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3483         goto out;
3484     }
3485 
3486     item = loader_cJSON_GetObjectItem(itemICD, "library_path");
3487     if (item == NULL) {
3488         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3489                    "loader_parse_icd_manifest: Failed to find \'library_path\' object in ICD JSON file %s. Skipping ICD JSON.",
3490                    file_str);
3491         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3492         goto out;
3493     }
3494     char *library_path = loader_cJSON_Print(item);
3495     if (!library_path) {
3496         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3497                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'library_path\' field. Skipping ICD JSON.", file_str);
3498         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3499         goto out;
3500     }
3501 
3502     if (strlen(library_path) == 0) {
3503         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3504                    "loader_parse_icd_manifest: ICD JSON %s \'library_path\' field is empty. Skipping ICD JSON.", file_str);
3505         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3506         goto out;
3507     }
3508 
3509     // Print out the paths being searched if debugging is enabled
3510     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, "Searching for ICD drivers named %s", library_path);
3511     // This function takes ownership of library_path - so we don't need to clean it up
3512     res = combine_manifest_directory_and_library_path(inst, library_path, file_str, &icd->full_library_path);
3513     if (VK_SUCCESS != res) {
3514         goto out;
3515     }
3516 
3517     item = loader_cJSON_GetObjectItem(itemICD, "api_version");
3518     if (item == NULL) {
3519         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3520                    "loader_parse_icd_manifest: ICD JSON %s does not have an \'api_version\' field. Skipping ICD JSON.", file_str);
3521         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3522         goto out;
3523     }
3524     version_str = loader_cJSON_Print(item);
3525     if (NULL == version_str) {
3526         // Only reason the print can fail is if there was an allocation issue
3527         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3528                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'api_version\' field. Skipping ICD JSON.", file_str);
3529 
3530         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3531         goto out;
3532     }
3533     icd->version = loader_parse_version_string(version_str);
3534 
3535     if (VK_API_VERSION_VARIANT(icd->version) != 0) {
3536         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3537                    "loader_parse_icd_manifest: Driver's ICD JSON %s \'api_version\' field contains a non-zero variant value of %d. "
3538                    " Skipping ICD JSON.",
3539                    file_str, VK_API_VERSION_VARIANT(icd->version));
3540         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3541         goto out;
3542     }
3543 
3544     // Skip over ICD's which contain a true "is_portability_driver" value whenever the application doesn't enable
3545     // portability enumeration.
3546     item = loader_cJSON_GetObjectItem(itemICD, "is_portability_driver");
3547     if (item != NULL && item->type == cJSON_True && inst && !inst->portability_enumeration_enabled) {
3548         if (skipped_portability_drivers) {
3549             *skipped_portability_drivers = true;
3550         }
3551         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3552         goto out;
3553     }
3554 
3555     item = loader_cJSON_GetObjectItem(itemICD, "library_arch");
3556     if (item != NULL) {
3557         library_arch_str = loader_cJSON_Print(item);
3558         if (NULL != library_arch_str) {
3559             // cJSON includes the quotes by default, so we need to look for those here
3560             if ((strncmp(library_arch_str, "32", 4) == 0 && sizeof(void *) != 4) ||
3561                 (strncmp(library_arch_str, "64", 4) == 0 && sizeof(void *) != 8)) {
3562                 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
3563                            "loader_parse_icd_manifest: Driver library architecture doesn't match the current running "
3564                            "architecture, skipping this driver");
3565                 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3566                 goto out;
3567             }
3568         } else {
3569             res = VK_ERROR_OUT_OF_HOST_MEMORY;
3570             goto out;
3571         }
3572     }
3573 out:
3574     loader_cJSON_Delete(json);
3575     loader_instance_heap_free(inst, file_vers_str);
3576     loader_instance_heap_free(inst, version_str);
3577     loader_instance_heap_free(inst, library_arch_str);
3578     return res;
3579 }
3580 
3581 // Try to find the Vulkan ICD driver(s).
3582 //
3583 // This function scans the default system loader path(s) or path specified by either the
3584 // VK_DRIVER_FILES or VK_ICD_FILENAMES environment variable in order to find loadable
3585 // VK ICDs manifest files.
3586 // From these manifest files it finds the ICD libraries.
3587 //
3588 // skipped_portability_drivers is used to report whether the loader found drivers which report
3589 // portability but the application didn't enable the bit to enumerate them
3590 // Can be NULL
3591 //
3592 // \returns
3593 // Vulkan result
3594 // (on result == VK_SUCCESS) a list of icds that were discovered
3595 VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
3596                          const VkInstanceCreateInfo *pCreateInfo, bool *skipped_portability_drivers) {
3597     VkResult res = VK_SUCCESS;
3598     struct loader_string_list manifest_files = {0};
3599     struct loader_envvar_filter select_filter = {0};
3600     struct loader_envvar_filter disable_filter = {0};
3601     struct ICDManifestInfo *icd_details = NULL;
3602 
3603     // Set up the ICD Trampoline list so elements can be written into it.
3604     res = loader_scanned_icd_init(inst, icd_tramp_list);
3605     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3606         return res;
3607     }
3608 
3609     bool direct_driver_loading_exclusive_mode = false;
3610     res = loader_scan_for_direct_drivers(inst, pCreateInfo, icd_tramp_list, &direct_driver_loading_exclusive_mode);
3611     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3612         goto out;
3613     }
3614     if (direct_driver_loading_exclusive_mode) {
3615         // Make sure to jump over the system & env-var driver discovery mechanisms if exclusive mode is set, even if no drivers
3616         // were successfully found through the direct driver loading mechanism
3617         goto out;
3618     }
3619 
3620     // Parse the filter environment variables to determine if we have any special behavior
3621     res = parse_generic_filter_environment_var(inst, VK_DRIVERS_SELECT_ENV_VAR, &select_filter);
3622     if (VK_SUCCESS != res) {
3623         goto out;
3624     }
3625     res = parse_generic_filter_environment_var(inst, VK_DRIVERS_DISABLE_ENV_VAR, &disable_filter);
3626     if (VK_SUCCESS != res) {
3627         goto out;
3628     }
3629 
3630     // Get a list of manifest files for ICDs
3631     res = loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_DRIVER, NULL, &manifest_files);
3632     if (VK_SUCCESS != res) {
3633         goto out;
3634     }
3635 
3636     icd_details = loader_stack_alloc(sizeof(struct ICDManifestInfo) * manifest_files.count);
3637     if (NULL == icd_details) {
3638         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3639         goto out;
3640     }
3641     memset(icd_details, 0, sizeof(struct ICDManifestInfo) * manifest_files.count);
3642 
3643     for (uint32_t i = 0; i < manifest_files.count; i++) {
3644         VkResult icd_res = VK_SUCCESS;
3645 
3646         icd_res = loader_parse_icd_manifest(inst, manifest_files.list[i], &icd_details[i], skipped_portability_drivers);
3647         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3648             res = icd_res;
3649             goto out;
3650         } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3651             continue;
3652         }
3653 
3654         if (select_filter.count > 0 || disable_filter.count > 0) {
3655             // Get only the filename for comparing to the filters
3656             char *just_filename_str = strrchr(manifest_files.list[i], DIRECTORY_SYMBOL);
3657 
3658             // No directory symbol, just the filename
3659             if (NULL == just_filename_str) {
3660                 just_filename_str = manifest_files.list[i];
3661             } else {
3662                 just_filename_str++;
3663             }
3664 
3665             bool name_matches_select =
3666                 (select_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &select_filter));
3667             bool name_matches_disable =
3668                 (disable_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &disable_filter));
3669 
3670             if (name_matches_disable && !name_matches_select) {
3671                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3672                            "Driver \"%s\" ignored because it was disabled by env var \'%s\'", just_filename_str,
3673                            VK_DRIVERS_DISABLE_ENV_VAR);
3674                 continue;
3675             }
3676             if (select_filter.count != 0 && !name_matches_select) {
3677                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3678                            "Driver \"%s\" ignored because not selected by env var \'%s\'", just_filename_str,
3679                            VK_DRIVERS_SELECT_ENV_VAR);
3680                 continue;
3681             }
3682         }
3683 
3684         enum loader_layer_library_status lib_status;
3685         icd_res =
3686             loader_scanned_icd_add(inst, icd_tramp_list, icd_details[i].full_library_path, icd_details[i].version, &lib_status);
3687         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3688             res = icd_res;
3689             goto out;
3690         } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3691             switch (lib_status) {
3692                 case LOADER_LAYER_LIB_NOT_LOADED:
3693                 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
3694                     loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3695                                "loader_icd_scan: Failed loading library associated with ICD JSON %s. Ignoring this JSON",
3696                                icd_details[i].full_library_path);
3697                     break;
3698                 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
3699                     loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Requested ICD %s was wrong bit-type. Ignoring this JSON",
3700                                icd_details[i].full_library_path);
3701                     break;
3702                 }
3703                 case LOADER_LAYER_LIB_SUCCESS_LOADED:
3704                 case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY:
3705                     // Shouldn't be able to reach this but if it is, best to report a debug
3706                     loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3707                                "Shouldn't reach this. A valid version of requested ICD %s was loaded but something bad "
3708                                "happened afterwards.",
3709                                icd_details[i].full_library_path);
3710                     break;
3711             }
3712         }
3713     }
3714 
3715 out:
3716     if (NULL != icd_details) {
3717         // Successfully got the icd_details structure, which means we need to free the paths contained within
3718         for (uint32_t i = 0; i < manifest_files.count; i++) {
3719             loader_instance_heap_free(inst, icd_details[i].full_library_path);
3720         }
3721     }
3722     free_string_list(inst, &manifest_files);
3723     return res;
3724 }
3725 
3726 // Gets the layer data files corresponding to manifest_type & path_override, then parses the resulting json objects
3727 // into instance_layers
3728 // Manifest type must be either implicit or explicit
3729 VkResult loader_parse_instance_layers(struct loader_instance *inst, enum loader_data_files_type manifest_type,
3730                                       const char *path_override, struct loader_layer_list *instance_layers) {
3731     assert(manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER || manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER);
3732     VkResult res = VK_SUCCESS;
3733     struct loader_string_list manifest_files = {0};
3734 
3735     res = loader_get_data_files(inst, manifest_type, path_override, &manifest_files);
3736     if (VK_SUCCESS != res) {
3737         goto out;
3738     }
3739 
3740     for (uint32_t i = 0; i < manifest_files.count; i++) {
3741         char *file_str = manifest_files.list[i];
3742         if (file_str == NULL) {
3743             continue;
3744         }
3745 
3746         // Parse file into JSON struct
3747         cJSON *json = NULL;
3748         VkResult local_res = loader_get_json(inst, file_str, &json);
3749         if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3750             res = VK_ERROR_OUT_OF_HOST_MEMORY;
3751             goto out;
3752         } else if (VK_SUCCESS != local_res || NULL == json) {
3753             continue;
3754         }
3755 
3756         local_res = loader_add_layer_properties(inst, instance_layers, json,
3757                                                 manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, file_str);
3758         loader_cJSON_Delete(json);
3759 
3760         // If the error is anything other than out of memory we still want to try to load the other layers
3761         if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3762             res = VK_ERROR_OUT_OF_HOST_MEMORY;
3763             goto out;
3764         }
3765     }
3766 out:
3767     free_string_list(inst, &manifest_files);
3768 
3769     return res;
3770 }
3771 
3772 // Given a loader_layer_properties struct that is a valid override layer, concatenate the properties override paths and put them
3773 // into the output parameter override_paths
3774 VkResult get_override_layer_override_paths(struct loader_instance *inst, struct loader_layer_properties *prop,
3775                                            char **override_paths) {
3776     if (prop->override_paths.count > 0) {
3777         char *cur_write_ptr = NULL;
3778         size_t override_path_size = 0;
3779         for (uint32_t j = 0; j < prop->override_paths.count; j++) {
3780             override_path_size += determine_data_file_path_size(prop->override_paths.list[j], 0);
3781         }
3782         *override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3783         if (*override_paths == NULL) {
3784             return VK_ERROR_OUT_OF_HOST_MEMORY;
3785         }
3786         cur_write_ptr = &(*override_paths)[0];
3787         for (uint32_t j = 0; j < prop->override_paths.count; j++) {
3788             copy_data_file_info(prop->override_paths.list[j], NULL, 0, &cur_write_ptr);
3789         }
3790         // Remove the last path separator
3791         --cur_write_ptr;
3792         assert(cur_write_ptr - (*override_paths) < (ptrdiff_t)override_path_size);
3793         *cur_write_ptr = '\0';
3794         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Override layer has override paths set to %s",
3795                    *override_paths);
3796     }
3797     return VK_SUCCESS;
3798 }
3799 
3800 VkResult loader_scan_for_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
3801                                 const struct loader_envvar_all_filters *filters) {
3802     VkResult res = VK_SUCCESS;
3803     struct loader_layer_list settings_layers = {0};
3804     struct loader_layer_list regular_instance_layers = {0};
3805     bool override_layer_valid = false;
3806     char *override_paths = NULL;
3807 
3808     bool should_search_for_other_layers = true;
3809     res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers);
3810     if (VK_SUCCESS != res) {
3811         goto out;
3812     }
3813 
3814     // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the
3815     // output
3816     if (!should_search_for_other_layers) {
3817         *instance_layers = settings_layers;
3818         memset(&settings_layers, 0, sizeof(struct loader_layer_list));
3819         goto out;
3820     }
3821 
3822     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, &regular_instance_layers);
3823     if (VK_SUCCESS != res) {
3824         goto out;
3825     }
3826 
3827     // Remove any extraneous override layers.
3828     remove_all_non_valid_override_layers(inst, &regular_instance_layers);
3829 
3830     // Check to see if the override layer is present, and use it's override paths.
3831     for (uint32_t i = 0; i < regular_instance_layers.count; i++) {
3832         struct loader_layer_properties *prop = &regular_instance_layers.list[i];
3833         if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop) && prop->override_paths.count > 0) {
3834             res = get_override_layer_override_paths(inst, prop, &override_paths);
3835             if (VK_SUCCESS != res) {
3836                 goto out;
3837             }
3838             break;
3839         }
3840     }
3841 
3842     // Get a list of manifest files for explicit layers
3843     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, &regular_instance_layers);
3844     if (VK_SUCCESS != res) {
3845         goto out;
3846     }
3847 
3848     // Verify any meta-layers in the list are valid and all the component layers are
3849     // actually present in the available layer list
3850     res = verify_all_meta_layers(inst, filters, &regular_instance_layers, &override_layer_valid);
3851     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3852         return res;
3853     }
3854 
3855     if (override_layer_valid) {
3856         loader_remove_layers_in_blacklist(inst, &regular_instance_layers);
3857         if (NULL != inst) {
3858             inst->override_layer_present = true;
3859         }
3860     }
3861 
3862     // Remove disabled layers
3863     for (uint32_t i = 0; i < regular_instance_layers.count; ++i) {
3864         if (!loader_layer_is_available(inst, filters, &regular_instance_layers.list[i])) {
3865             loader_remove_layer_in_list(inst, &regular_instance_layers, i);
3866             i--;
3867         }
3868     }
3869 
3870     res = combine_settings_layers_with_regular_layers(inst, &settings_layers, &regular_instance_layers, instance_layers);
3871 
3872 out:
3873     loader_delete_layer_list_and_properties(inst, &settings_layers);
3874     loader_delete_layer_list_and_properties(inst, &regular_instance_layers);
3875 
3876     loader_instance_heap_free(inst, override_paths);
3877     return res;
3878 }
3879 
3880 VkResult loader_scan_for_implicit_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
3881                                          const struct loader_envvar_all_filters *layer_filters) {
3882     VkResult res = VK_SUCCESS;
3883     struct loader_layer_list settings_layers = {0};
3884     struct loader_layer_list regular_instance_layers = {0};
3885     bool override_layer_valid = false;
3886     char *override_paths = NULL;
3887     bool implicit_metalayer_present = false;
3888 
3889     bool should_search_for_other_layers = true;
3890     res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers);
3891     if (VK_SUCCESS != res) {
3892         goto out;
3893     }
3894 
3895     // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the
3896     // output
3897     if (!should_search_for_other_layers) {
3898         *instance_layers = settings_layers;
3899         memset(&settings_layers, 0, sizeof(struct loader_layer_list));
3900         goto out;
3901     }
3902 
3903     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, &regular_instance_layers);
3904     if (VK_SUCCESS != res) {
3905         goto out;
3906     }
3907 
3908     // Remove any extraneous override layers.
3909     remove_all_non_valid_override_layers(inst, &regular_instance_layers);
3910 
3911     // Check to see if either the override layer is present, or another implicit meta-layer.
3912     // Each of these may require explicit layers to be enabled at this time.
3913     for (uint32_t i = 0; i < regular_instance_layers.count; i++) {
3914         struct loader_layer_properties *prop = &regular_instance_layers.list[i];
3915         if (prop->is_override && loader_implicit_layer_is_enabled(inst, layer_filters, prop)) {
3916             override_layer_valid = true;
3917             res = get_override_layer_override_paths(inst, prop, &override_paths);
3918             if (VK_SUCCESS != res) {
3919                 goto out;
3920             }
3921         } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
3922             implicit_metalayer_present = true;
3923         }
3924     }
3925 
3926     // If either the override layer or an implicit meta-layer are present, we need to add
3927     // explicit layer info as well.  Not to worry, though, all explicit layers not included
3928     // in the override layer will be removed below in loader_remove_layers_in_blacklist().
3929     if (override_layer_valid || implicit_metalayer_present) {
3930         res =
3931             loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, &regular_instance_layers);
3932         if (VK_SUCCESS != res) {
3933             goto out;
3934         }
3935     }
3936 
3937     // Verify any meta-layers in the list are valid and all the component layers are
3938     // actually present in the available layer list
3939     res = verify_all_meta_layers(inst, layer_filters, &regular_instance_layers, &override_layer_valid);
3940     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3941         return res;
3942     }
3943 
3944     if (override_layer_valid || implicit_metalayer_present) {
3945         loader_remove_layers_not_in_implicit_meta_layers(inst, &regular_instance_layers);
3946         if (override_layer_valid && inst != NULL) {
3947             inst->override_layer_present = true;
3948         }
3949     }
3950 
3951     // Remove disabled layers
3952     for (uint32_t i = 0; i < regular_instance_layers.count; ++i) {
3953         if (!loader_implicit_layer_is_enabled(inst, layer_filters, &regular_instance_layers.list[i])) {
3954             loader_remove_layer_in_list(inst, &regular_instance_layers, i);
3955             i--;
3956         }
3957     }
3958 
3959     res = combine_settings_layers_with_regular_layers(inst, &settings_layers, &regular_instance_layers, instance_layers);
3960 
3961 out:
3962     loader_delete_layer_list_and_properties(inst, &settings_layers);
3963     loader_delete_layer_list_and_properties(inst, &regular_instance_layers);
3964 
3965     loader_instance_heap_free(inst, override_paths);
3966     return res;
3967 }
3968 
3969 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) {
3970     // inst is not wrapped
3971     if (inst == VK_NULL_HANDLE) {
3972         return NULL;
3973     }
3974 
3975     VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
3976 
3977     if (disp_table == NULL) return NULL;
3978 
3979     struct loader_instance *loader_inst = loader_get_instance(inst);
3980 
3981     if (loader_inst->instance_finished_creation) {
3982         disp_table = &loader_inst->terminator_dispatch;
3983     }
3984 
3985     bool found_name;
3986     void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
3987     if (found_name) {
3988         return addr;
3989     }
3990 
3991     // Check if any drivers support the function, and if so, add it to the unknown function list
3992     addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
3993     if (NULL != addr) return addr;
3994 
3995     // Don't call down the chain, this would be an infinite loop
3996     loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName);
3997     return NULL;
3998 }
3999 
4000 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_terminator(VkInstance inst, const char *pName) {
4001     // Global functions - Do not need a valid instance handle to query
4002     if (!strcmp(pName, "vkGetInstanceProcAddr")) {
4003         return (PFN_vkVoidFunction)loader_gpa_instance_terminator;
4004     }
4005     if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) {
4006         return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator;
4007     }
4008     if (!strcmp(pName, "vkCreateInstance")) {
4009         return (PFN_vkVoidFunction)terminator_CreateInstance;
4010     }
4011 
4012     // While the spec is very clear that querying vkCreateDevice requires a valid VkInstance, because the loader allowed querying
4013     // with a NULL VkInstance handle for a long enough time, it is impractical to fix this bug in the loader
4014 
4015     // As such, this is a bug to maintain compatibility for the RTSS layer (Riva Tuner Statistics Server) but may
4016     // be depended upon by other layers out in the wild.
4017     if (!strcmp(pName, "vkCreateDevice")) {
4018         return (PFN_vkVoidFunction)terminator_CreateDevice;
4019     }
4020 
4021     // inst is not wrapped
4022     if (inst == VK_NULL_HANDLE) {
4023         return NULL;
4024     }
4025     VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
4026 
4027     if (disp_table == NULL) return NULL;
4028 
4029     struct loader_instance *loader_inst = loader_get_instance(inst);
4030 
4031     // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from
4032     // vkGetInstanceProcAddr This is because VK_EXT_debug_utils is an instance level extension with device level functions, and
4033     // is 'supported' by the loader.
4034     // These functions need a terminator to handle the case of a driver not supporting VK_EXT_debug_utils when there are layers
4035     // present which not check for NULL before calling the function.
4036     if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) {
4037         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT
4038                                                                      : NULL;
4039     }
4040     if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) {
4041         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT
4042                                                                      : NULL;
4043     }
4044     if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) {
4045         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT
4046                                                                      : NULL;
4047     }
4048     if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) {
4049         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT
4050                                                                      : NULL;
4051     }
4052     if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) {
4053         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT
4054                                                                      : NULL;
4055     }
4056     if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) {
4057         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT
4058                                                                      : NULL;
4059     }
4060     if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) {
4061         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT
4062                                                                      : NULL;
4063     }
4064     if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) {
4065         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT
4066                                                                      : NULL;
4067     }
4068 
4069     if (loader_inst->instance_finished_creation) {
4070         disp_table = &loader_inst->terminator_dispatch;
4071     }
4072 
4073     bool found_name;
4074     void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
4075     if (found_name) {
4076         return addr;
4077     }
4078 
4079     // Check if it is an unknown physical device function, to see if any drivers support it.
4080     addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
4081     if (addr) {
4082         return addr;
4083     }
4084 
4085     // Assume it is an unknown device function, check to see if any drivers support it.
4086     addr = loader_dev_ext_gpa_term(loader_get_instance(inst), pName);
4087     if (addr) {
4088         return addr;
4089     }
4090 
4091     // Don't call down the chain, this would be an infinite loop
4092     loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpa_instance_terminator() unrecognized name %s", pName);
4093     return NULL;
4094 }
4095 
4096 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_terminator(VkDevice device, const char *pName) {
4097     struct loader_device *dev;
4098     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
4099 
4100     // Return this function if a layer above here is asking for the vkGetDeviceProcAddr.
4101     // This is so we can properly intercept any device commands needing a terminator.
4102     if (!strcmp(pName, "vkGetDeviceProcAddr")) {
4103         return (PFN_vkVoidFunction)loader_gpa_device_terminator;
4104     }
4105 
4106     // NOTE: Device Funcs needing Trampoline/Terminator.
4107     // Overrides for device functions needing a trampoline and
4108     // a terminator because certain device entry-points still need to go
4109     // through a terminator before hitting the ICD.  This could be for
4110     // several reasons, but the main one is currently unwrapping an
4111     // object before passing the appropriate info along to the ICD.
4112     // This is why we also have to override the direct ICD call to
4113     // vkGetDeviceProcAddr to intercept those calls.
4114     // If the pName is for a 'known' function but isn't available, due to
4115     // the corresponding extension/feature not being enabled, we need to
4116     // return NULL and not call down to the driver's GetDeviceProcAddr.
4117     if (NULL != dev) {
4118         bool found_name = false;
4119         PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName, &found_name);
4120         if (found_name) {
4121             return addr;
4122         }
4123     }
4124 
4125     if (icd_term == NULL) {
4126         return NULL;
4127     }
4128 
4129     return icd_term->dispatch.GetDeviceProcAddr(device, pName);
4130 }
4131 
4132 struct loader_instance *loader_get_instance(const VkInstance instance) {
4133     // look up the loader_instance in our list by comparing dispatch tables, as
4134     // there is no guarantee the instance is still a loader_instance* after any
4135     // layers which wrap the instance object.
4136     const VkLayerInstanceDispatchTable *disp;
4137     struct loader_instance *ptr_instance = (struct loader_instance *)instance;
4138     if (VK_NULL_HANDLE == instance || LOADER_MAGIC_NUMBER != ptr_instance->magic) {
4139         return NULL;
4140     } else {
4141         disp = loader_get_instance_layer_dispatch(instance);
4142         loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
4143         for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
4144             if (&inst->disp->layer_inst_disp == disp) {
4145                 ptr_instance = inst;
4146                 break;
4147             }
4148         }
4149         loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
4150     }
4151     return ptr_instance;
4152 }
4153 
4154 loader_platform_dl_handle loader_open_layer_file(const struct loader_instance *inst, struct loader_layer_properties *prop) {
4155     char* libPath = prop->lib_name;
4156 #if defined(__OHOS__)
4157     char *debug_layer_name = loader_secure_getenv("debug.graphic.debug_layer", inst);
4158     char *debug_hap_name = loader_secure_getenv("debug.graphic.debug_hap", inst);
4159     bool isDebugLayer = false;
4160     char* debugLayerLibPath = NULL;
4161     if (NULL != debug_layer_name && '\0' != debug_layer_name[0] && InitBundleInfo(debug_hap_name)) {
4162         const char lib_prefix[] = "lib";
4163         const char so_suffix[] = ".so";
4164         size_t totalLen = strlen(debug_layer_name) + strlen(lib_prefix) + strlen(so_suffix) + 1;
4165         char* layerSoName = loader_instance_heap_calloc(inst, totalLen, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4166         if (layerSoName == NULL) {
4167             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "malloc layerSoName fail");
4168             goto mallocErr;
4169         }
4170         strncpy(layerSoName, lib_prefix, totalLen);
4171         strncat(layerSoName, debug_layer_name, totalLen);
4172         strncat(layerSoName, so_suffix, totalLen);
4173         if (strcmp(layerSoName, libPath) == 0) {
4174             isDebugLayer = true;
4175             debugLayerLibPath = GetDebugLayerLibPath(inst, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4176             if(debugLayerLibPath == NULL) {
4177                 loader_instance_heap_free(inst, layerSoName);
4178                 isDebugLayer = false;
4179                 goto mallocErr;
4180             }
4181             size_t totalLength = strlen(libPath) + strlen(debugLayerLibPath) + 1;
4182             libPath = loader_instance_heap_calloc(inst, totalLength, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4183             if (libPath == NULL) {
4184                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "malloc libPath fail");
4185                 loader_instance_heap_free(inst, layerSoName);
4186                 loader_instance_heap_free(inst, debugLayerLibPath);
4187                 libPath = prop->lib_name;
4188                 isDebugLayer = false;
4189                 goto mallocErr;
4190             }
4191             strncpy(libPath, debugLayerLibPath, totalLength);
4192             strncat(libPath, prop->lib_name, totalLength);
4193         } else {
4194             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "layerSoName != libPath : %s != %s",
4195                 layerSoName, libPath);
4196         }
4197         loader_instance_heap_free(inst, layerSoName);
4198     }
4199 mallocErr:
4200     loader_free_getenv(debug_layer_name, inst);
4201     loader_free_getenv(debug_hap_name, inst);
4202 #endif
4203     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "try to open libPath: %s", libPath);
4204     if ((prop->lib_handle = loader_platform_open_library(libPath)) == NULL) {
4205         loader_handle_load_library_error(inst, prop->lib_name, &prop->lib_status);
4206     } else {
4207         prop->lib_status = LOADER_LAYER_LIB_SUCCESS_LOADED;
4208         loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Loading layer library %s", prop->lib_name);
4209     }
4210 #if defined(__OHOS__)
4211     if (isDebugLayer) {
4212         loader_instance_heap_free(inst, debugLayerLibPath);
4213         loader_instance_heap_free(inst, libPath);
4214     }
4215 #endif
4216     return prop->lib_handle;
4217 }
4218 
4219 // Go through the search_list and find any layers which match type. If layer
4220 // type match is found in then add it to ext_list.
4221 VkResult loader_add_implicit_layers(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
4222                                     struct loader_pointer_layer_list *target_list,
4223                                     struct loader_pointer_layer_list *expanded_target_list,
4224                                     const struct loader_layer_list *source_list) {
4225     for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) {
4226         struct loader_layer_properties *prop = &source_list->list[src_layer];
4227         if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
4228             VkResult result = loader_add_implicit_layer(inst, prop, filters, target_list, expanded_target_list, source_list);
4229             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
4230         }
4231     }
4232     return VK_SUCCESS;
4233 }
4234 
4235 void warn_if_layers_are_older_than_application(struct loader_instance *inst) {
4236     for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) {
4237         // Verify that the layer api version is at least that of the application's request, if not, throw a warning since
4238         // undefined behavior could occur.
4239         struct loader_layer_properties *prop = inst->expanded_activated_layer_list.list[i];
4240         loader_api_version prop_spec_version = loader_make_version(prop->info.specVersion);
4241         if (!loader_check_version_meets_required(inst->app_api_version, prop_spec_version)) {
4242             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4243                        "Layer %s uses API version %u.%u which is older than the application specified "
4244                        "API version of %u.%u. May cause issues.",
4245                        prop->info.layerName, prop_spec_version.major, prop_spec_version.minor, inst->app_api_version.major,
4246                        inst->app_api_version.minor);
4247         }
4248     }
4249 }
4250 
4251 VkResult loader_enable_instance_layers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
4252                                        const struct loader_layer_list *instance_layers,
4253                                        const struct loader_envvar_all_filters *layer_filters) {
4254     VkResult res = VK_SUCCESS;
4255 
4256     assert(inst && "Cannot have null instance");
4257 
4258     if (!loader_init_pointer_layer_list(inst, &inst->app_activated_layer_list)) {
4259         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4260                    "loader_enable_instance_layers: Failed to initialize application version of the layer list");
4261         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4262         goto out;
4263     }
4264 
4265     if (!loader_init_pointer_layer_list(inst, &inst->expanded_activated_layer_list)) {
4266         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4267                    "loader_enable_instance_layers: Failed to initialize expanded version of the layer list");
4268         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4269         goto out;
4270     }
4271 
4272     if (inst->settings.settings_active) {
4273         res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount,
4274                                                   pCreateInfo->ppEnabledLayerNames, &inst->instance_layer_list,
4275                                                   &inst->app_activated_layer_list, &inst->expanded_activated_layer_list);
4276         warn_if_layers_are_older_than_application(inst);
4277 
4278         goto out;
4279     }
4280 
4281     // Add any implicit layers first
4282     res = loader_add_implicit_layers(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4283                                      instance_layers);
4284     if (res != VK_SUCCESS) {
4285         goto out;
4286     }
4287 
4288     // Add any layers specified via environment variable next
4289     res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &inst->app_activated_layer_list,
4290                                         &inst->expanded_activated_layer_list, instance_layers);
4291     if (res != VK_SUCCESS) {
4292         goto out;
4293     }
4294 
4295     // Add layers specified by the application
4296     res = loader_add_layer_names_to_list(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4297                                          pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers);
4298 
4299     warn_if_layers_are_older_than_application(inst);
4300 out:
4301     return res;
4302 }
4303 
4304 // Determine the layer interface version to use.
4305 bool loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,
4306                                         VkNegotiateLayerInterface *interface_struct) {
4307     memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface));
4308     interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT;
4309     interface_struct->loaderLayerInterfaceVersion = 1;
4310     interface_struct->pNext = NULL;
4311 
4312     if (fp_negotiate_layer_version != NULL) {
4313         // Layer supports the negotiation API, so call it with the loader's
4314         // latest version supported
4315         interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
4316         VkResult result = fp_negotiate_layer_version(interface_struct);
4317 
4318         if (result != VK_SUCCESS) {
4319             // Layer no longer supports the loader's latest interface version so
4320             // fail loading the Layer
4321             return false;
4322         }
4323     }
4324 
4325     if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) {
4326         // Loader no longer supports the layer's latest interface version so
4327         // fail loading the layer
4328         return false;
4329     }
4330 
4331     return true;
4332 }
4333 
4334 // Every extension that has a loader-defined trampoline needs to be marked as enabled or disabled so that we know whether or
4335 // not to return that trampoline when vkGetDeviceProcAddr is called
4336 void setup_logical_device_enabled_layer_extensions(const struct loader_instance *inst, struct loader_device *dev,
4337                                                    const struct loader_extension_list *icd_exts,
4338                                                    const VkDeviceCreateInfo *pCreateInfo) {
4339     // Can only setup debug marker as debug utils is an instance extensions.
4340     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i) {
4341         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4342             // Check if its supported by the driver
4343             for (uint32_t j = 0; j < icd_exts->count; ++j) {
4344                 if (!strcmp(icd_exts->list[j].extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4345                     dev->layer_extensions.ext_debug_marker_enabled = true;
4346                 }
4347             }
4348             // also check if any layers support it.
4349             for (uint32_t j = 0; j < inst->app_activated_layer_list.count; j++) {
4350                 struct loader_layer_properties *layer = inst->app_activated_layer_list.list[j];
4351                 for (uint32_t k = 0; k < layer->device_extension_list.count; k++) {
4352                     if (!strcmp(layer->device_extension_list.list[k].props.extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4353                         dev->layer_extensions.ext_debug_marker_enabled = true;
4354                     }
4355                 }
4356             }
4357         }
4358     }
4359 }
4360 
4361 VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice,
4362                                                           const VkDeviceCreateInfo *pCreateInfo,
4363                                                           const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
4364                                                           PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) {
4365     VkResult res;
4366     VkPhysicalDevice internal_device = VK_NULL_HANDLE;
4367     struct loader_device *dev = NULL;
4368     struct loader_instance *inst = NULL;
4369 
4370     if (instance != VK_NULL_HANDLE) {
4371         inst = loader_get_instance(instance);
4372         internal_device = physicalDevice;
4373     } else {
4374         struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice;
4375         internal_device = phys_dev->phys_dev;
4376         inst = (struct loader_instance *)phys_dev->this_instance;
4377     }
4378 
4379     // Get the physical device (ICD) extensions
4380     struct loader_extension_list icd_exts = {0};
4381     icd_exts.list = NULL;
4382     res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
4383     if (VK_SUCCESS != res) {
4384         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create ICD extension list");
4385         goto out;
4386     }
4387 
4388     PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL;
4389     if (layerGIPA != NULL) {
4390         enumDeviceExtensionProperties =
4391             (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties");
4392     } else {
4393         enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties;
4394     }
4395     res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts);
4396     if (res != VK_SUCCESS) {
4397         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to add extensions to list");
4398         goto out;
4399     }
4400 
4401     // Make sure requested extensions to be enabled are supported
4402     res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo);
4403     if (res != VK_SUCCESS) {
4404         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to validate extensions in list");
4405         goto out;
4406     }
4407 
4408     dev = loader_create_logical_device(inst, pAllocator);
4409     if (dev == NULL) {
4410         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4411         goto out;
4412     }
4413 
4414     setup_logical_device_enabled_layer_extensions(inst, dev, &icd_exts, pCreateInfo);
4415 
4416     res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA);
4417     if (res != VK_SUCCESS) {
4418         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice:  Failed to create device chain.");
4419         goto out;
4420     }
4421 
4422     *pDevice = dev->chain_device;
4423 
4424     // Initialize any device extension dispatch entry's from the instance list
4425     loader_init_dispatch_dev_ext(inst, dev);
4426 
4427     // Initialize WSI device extensions as part of core dispatch since loader
4428     // has dedicated trampoline code for these
4429     loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4430                                                 dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice);
4431 
4432 out:
4433 
4434     // Failure cleanup
4435     if (VK_SUCCESS != res) {
4436         if (NULL != dev) {
4437             // Find the icd_term this device belongs to then remove it from that icd_term.
4438             // Need to iterate the linked lists and remove the device from it. Don't delete
4439             // the device here since it may not have been added to the icd_term and there
4440             // are other allocations attached to it.
4441             struct loader_icd_term *icd_term = inst->icd_terms;
4442             bool found = false;
4443             while (!found && NULL != icd_term) {
4444                 struct loader_device *cur_dev = icd_term->logical_device_list;
4445                 struct loader_device *prev_dev = NULL;
4446                 while (NULL != cur_dev) {
4447                     if (cur_dev == dev) {
4448                         if (cur_dev == icd_term->logical_device_list) {
4449                             icd_term->logical_device_list = cur_dev->next;
4450                         } else if (prev_dev) {
4451                             prev_dev->next = cur_dev->next;
4452                         }
4453 
4454                         found = true;
4455                         break;
4456                     }
4457                     prev_dev = cur_dev;
4458                     cur_dev = cur_dev->next;
4459                 }
4460                 icd_term = icd_term->next;
4461             }
4462             // Now destroy the device and the allocations associated with it.
4463             loader_destroy_logical_device(dev, pAllocator);
4464         }
4465     }
4466 
4467     if (NULL != icd_exts.list) {
4468         loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
4469     }
4470     return res;
4471 }
4472 
4473 VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator,
4474                                                        PFN_vkDestroyDevice destroyFunction) {
4475     struct loader_device *dev;
4476 
4477     if (device == VK_NULL_HANDLE) {
4478         return;
4479     }
4480 
4481     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
4482 
4483     destroyFunction(device, pAllocator);
4484     if (NULL != dev) {
4485         dev->chain_device = NULL;
4486         dev->icd_device = NULL;
4487         loader_remove_logical_device(icd_term, dev, pAllocator);
4488     }
4489 }
4490 
4491 // Given the list of layers to activate in the loader_instance
4492 // structure. This function will add a VkLayerInstanceCreateInfo
4493 // structure to the VkInstanceCreateInfo.pNext pointer.
4494 // Each activated layer will have it's own VkLayerInstanceLink
4495 // structure that tells the layer what Get*ProcAddr to call to
4496 // get function pointers to the next layer down.
4497 // Once the chain info has been created this function will
4498 // execute the CreateInstance call chain. Each layer will
4499 // then have an opportunity in it's CreateInstance function
4500 // to setup it's dispatch table when the lower layer returns
4501 // successfully.
4502 // Each layer can wrap or not-wrap the returned VkInstance object
4503 // as it sees fit.
4504 // The instance chain is terminated by a loader function
4505 // that will call CreateInstance on all available ICD's and
4506 // cache those VkInstance objects for future use.
4507 VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
4508                                       struct loader_instance *inst, VkInstance *created_instance) {
4509     uint32_t num_activated_layers = 0;
4510     struct activated_layer_info *activated_layers = NULL;
4511     VkLayerInstanceCreateInfo chain_info;
4512     VkLayerInstanceLink *layer_instance_link_info = NULL;
4513     VkInstanceCreateInfo loader_create_info;
4514     VkResult res;
4515 
4516     PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_terminator;
4517     PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_terminator;
4518     PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_terminator;
4519     PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_terminator;
4520     PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_terminator;
4521 
4522     memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo));
4523 
4524     if (inst->expanded_activated_layer_list.count > 0) {
4525         chain_info.u.pLayerInfo = NULL;
4526         chain_info.pNext = pCreateInfo->pNext;
4527         chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4528         chain_info.function = VK_LAYER_LINK_INFO;
4529         loader_create_info.pNext = &chain_info;
4530 
4531         layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count);
4532         if (!layer_instance_link_info) {
4533             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4534                        "loader_create_instance_chain: Failed to alloc Instance objects for layer");
4535             return VK_ERROR_OUT_OF_HOST_MEMORY;
4536         }
4537 
4538         activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4539         if (!activated_layers) {
4540             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4541                        "loader_create_instance_chain: Failed to alloc activated layer storage array");
4542             return VK_ERROR_OUT_OF_HOST_MEMORY;
4543         }
4544 
4545         // Create instance chain of enabled layers
4546         for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4547             struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i];
4548             loader_platform_dl_handle lib_handle;
4549 
4550             // Skip it if a Layer with the same name has been already successfully activated
4551             if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4552                 continue;
4553             }
4554 
4555             lib_handle = loader_open_layer_file(inst, layer_prop);
4556             if (layer_prop->lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) {
4557                 return VK_ERROR_OUT_OF_HOST_MEMORY;
4558             }
4559             if (!lib_handle) {
4560                 continue;
4561             }
4562 
4563             if (NULL == layer_prop->functions.negotiate_layer_interface) {
4564                 PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL;
4565                 bool functions_in_interface = false;
4566                 if (!layer_prop->functions.str_negotiate_interface || strlen(layer_prop->functions.str_negotiate_interface) == 0) {
4567                     negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4568                         lib_handle, "vkNegotiateLoaderLayerInterfaceVersion");
4569                 } else {
4570                     negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4571                         lib_handle, layer_prop->functions.str_negotiate_interface);
4572                 }
4573 
4574                 // If we can negotiate an interface version, then we can also
4575                 // get everything we need from the one function call, so try
4576                 // that first, and see if we can get all the function pointers
4577                 // necessary from that one call.
4578                 if (NULL != negotiate_interface) {
4579                     layer_prop->functions.negotiate_layer_interface = negotiate_interface;
4580 
4581                     VkNegotiateLayerInterface interface_struct;
4582 
4583                     if (loader_get_layer_interface_version(negotiate_interface, &interface_struct)) {
4584                         // Go ahead and set the properties version to the
4585                         // correct value.
4586                         layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion;
4587 
4588                         // If the interface is 2 or newer, we have access to the
4589                         // new GetPhysicalDeviceProcAddr function, so grab it,
4590                         // and the other necessary functions, from the
4591                         // structure.
4592                         if (interface_struct.loaderLayerInterfaceVersion > 1) {
4593                             cur_gipa = interface_struct.pfnGetInstanceProcAddr;
4594                             cur_gdpa = interface_struct.pfnGetDeviceProcAddr;
4595                             cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr;
4596                             if (cur_gipa != NULL) {
4597                                 // We've set the functions, so make sure we
4598                                 // don't do the unnecessary calls later.
4599                                 functions_in_interface = true;
4600                             }
4601                         }
4602                     }
4603                 }
4604 
4605                 if (!functions_in_interface) {
4606                     if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) {
4607                         if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) {
4608                             cur_gipa =
4609                                 (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4610                             layer_prop->functions.get_instance_proc_addr = cur_gipa;
4611 
4612                             if (NULL == cur_gipa) {
4613                                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4614                                            "loader_create_instance_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\"",
4615                                            layer_prop->lib_name);
4616                                 continue;
4617                             }
4618                         } else {
4619                             cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle,
4620                                                                                                    layer_prop->functions.str_gipa);
4621 
4622                             if (NULL == cur_gipa) {
4623                                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4624                                            "loader_create_instance_chain: Failed to find \'%s\' in layer \"%s\"",
4625                                            layer_prop->functions.str_gipa, layer_prop->lib_name);
4626                                 continue;
4627                             }
4628                         }
4629                     }
4630                 }
4631             }
4632 
4633             layer_instance_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4634             layer_instance_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = next_gipa;
4635             layer_instance_link_info[num_activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa;
4636             next_gipa = cur_gipa;
4637             if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) {
4638                 layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa;
4639                 next_gpdpa = cur_gpdpa;
4640             }
4641             if (layer_prop->interface_version > 1 && cur_gipa != NULL) {
4642                 layer_prop->functions.get_instance_proc_addr = cur_gipa;
4643             }
4644             if (layer_prop->interface_version > 1 && cur_gdpa != NULL) {
4645                 layer_prop->functions.get_device_proc_addr = cur_gdpa;
4646             }
4647 
4648             chain_info.u.pLayerInfo = &layer_instance_link_info[num_activated_layers];
4649 
4650             activated_layers[num_activated_layers].name = layer_prop->info.layerName;
4651             activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
4652             activated_layers[num_activated_layers].library = layer_prop->lib_name;
4653             activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
4654             if (activated_layers[num_activated_layers].is_implicit) {
4655                 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
4656             }
4657 
4658             loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Insert instance layer \"%s\" (%s)",
4659                        layer_prop->info.layerName, layer_prop->lib_name);
4660 
4661             num_activated_layers++;
4662         }
4663     }
4664 
4665     // Make sure each layer requested by the application was actually loaded
4666     for (uint32_t exp = 0; exp < inst->expanded_activated_layer_list.count; ++exp) {
4667         struct loader_layer_properties *exp_layer_prop = inst->expanded_activated_layer_list.list[exp];
4668         bool found = false;
4669         for (uint32_t act = 0; act < num_activated_layers; ++act) {
4670             if (!strcmp(activated_layers[act].name, exp_layer_prop->info.layerName)) {
4671                 found = true;
4672                 break;
4673             }
4674         }
4675         // If it wasn't found, we want to at least log an error.  However, if it was enabled by the application directly,
4676         // we want to return a bad layer error.
4677         if (!found) {
4678             bool app_requested = false;
4679             for (uint32_t act = 0; act < pCreateInfo->enabledLayerCount; ++act) {
4680                 if (!strcmp(pCreateInfo->ppEnabledLayerNames[act], exp_layer_prop->info.layerName)) {
4681                     app_requested = true;
4682                     break;
4683                 }
4684             }
4685             VkFlags log_flag = VULKAN_LOADER_LAYER_BIT;
4686             char ending = '.';
4687             if (app_requested) {
4688                 log_flag |= VULKAN_LOADER_ERROR_BIT;
4689                 ending = '!';
4690             } else {
4691                 log_flag |= VULKAN_LOADER_INFO_BIT;
4692             }
4693             switch (exp_layer_prop->lib_status) {
4694                 case LOADER_LAYER_LIB_NOT_LOADED:
4695                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" was not loaded%c", exp_layer_prop->info.layerName,
4696                                ending);
4697                     break;
4698                 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
4699                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" was wrong bit-type%c", exp_layer_prop->info.layerName,
4700                                ending);
4701                     break;
4702                 }
4703                 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
4704                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" failed to load%c", exp_layer_prop->info.layerName,
4705                                ending);
4706                     break;
4707                 case LOADER_LAYER_LIB_SUCCESS_LOADED:
4708                 case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY:
4709                     // Shouldn't be able to reach this but if it is, best to report a debug
4710                     loader_log(inst, log_flag, 0,
4711                                "Shouldn't reach this. A valid version of requested layer %s was loaded but was not found in the "
4712                                "list of activated layers%c",
4713                                exp_layer_prop->info.layerName, ending);
4714                     break;
4715             }
4716             if (app_requested) {
4717                 return VK_ERROR_LAYER_NOT_PRESENT;
4718             }
4719         }
4720     }
4721 
4722     VkLoaderFeatureFlags feature_flags = 0;
4723 #if defined(_WIN32)
4724     feature_flags = windows_initialize_dxgi();
4725 #endif
4726 
4727     PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance");
4728     if (fpCreateInstance) {
4729         VkLayerInstanceCreateInfo instance_dispatch;
4730         instance_dispatch.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4731         instance_dispatch.pNext = loader_create_info.pNext;
4732         instance_dispatch.function = VK_LOADER_DATA_CALLBACK;
4733         instance_dispatch.u.pfnSetInstanceLoaderData = vkSetInstanceDispatch;
4734 
4735         VkLayerInstanceCreateInfo device_callback;
4736         device_callback.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4737         device_callback.pNext = &instance_dispatch;
4738         device_callback.function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK;
4739         device_callback.u.layerDevice.pfnLayerCreateDevice = loader_layer_create_device;
4740         device_callback.u.layerDevice.pfnLayerDestroyDevice = loader_layer_destroy_device;
4741 
4742         VkLayerInstanceCreateInfo loader_features;
4743         loader_features.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4744         loader_features.pNext = &device_callback;
4745         loader_features.function = VK_LOADER_FEATURES;
4746         loader_features.u.loaderFeatures = feature_flags;
4747 
4748         loader_create_info.pNext = &loader_features;
4749 
4750         // If layer debugging is enabled, let's print out the full callstack with layers in their
4751         // defined order.
4752         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "vkCreateInstance layer callstack setup to:");
4753         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Application>");
4754         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
4755         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Loader>");
4756         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
4757         for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
4758             uint32_t index = num_activated_layers - cur_layer - 1;
4759             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   %s", activated_layers[index].name);
4760             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Type: %s",
4761                        activated_layers[index].is_implicit ? "Implicit" : "Explicit");
4762             if (activated_layers[index].is_implicit) {
4763                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "               Disable Env Var:  %s",
4764                            activated_layers[index].disable_env);
4765             }
4766             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Manifest: %s", activated_layers[index].manifest);
4767             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Library:  %s", activated_layers[index].library);
4768             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
4769         }
4770         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Drivers>");
4771 
4772         res = fpCreateInstance(&loader_create_info, pAllocator, created_instance);
4773     } else {
4774         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_instance_chain: Failed to find \'vkCreateInstance\'");
4775         // Couldn't find CreateInstance function!
4776         res = VK_ERROR_INITIALIZATION_FAILED;
4777     }
4778 
4779     if (res == VK_SUCCESS) {
4780         // Copy the current disp table into the terminator_dispatch table so we can use it in loader_gpa_instance_terminator()
4781         memcpy(&inst->terminator_dispatch, &inst->disp->layer_inst_disp, sizeof(VkLayerInstanceDispatchTable));
4782 
4783         loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance);
4784         inst->instance = *created_instance;
4785 
4786         if (pCreateInfo->enabledLayerCount > 0 && pCreateInfo->ppEnabledLayerNames != NULL) {
4787             res = create_string_list(inst, pCreateInfo->enabledLayerCount, &inst->enabled_layer_names);
4788             if (res != VK_SUCCESS) {
4789                 return res;
4790             }
4791 
4792             for (uint32_t i = 0; i < pCreateInfo->enabledLayerCount; ++i) {
4793                 res = copy_str_to_string_list(inst, &inst->enabled_layer_names, pCreateInfo->ppEnabledLayerNames[i],
4794                                               strlen(pCreateInfo->ppEnabledLayerNames[i]));
4795                 if (res != VK_SUCCESS) return res;
4796             }
4797         }
4798     }
4799 
4800     return res;
4801 }
4802 
4803 void loader_activate_instance_layer_extensions(struct loader_instance *inst, VkInstance created_inst) {
4804     loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4805                                                   created_inst);
4806 }
4807 
4808 #if defined(__APPLE__)
4809 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
4810                                     const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
4811                                     struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
4812                                     PFN_vkGetDeviceProcAddr *layerNextGDPA) __attribute__((optnone)) {
4813 #else
4814 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
4815                                     const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
4816                                     struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
4817                                     PFN_vkGetDeviceProcAddr *layerNextGDPA) {
4818 #endif
4819     uint32_t num_activated_layers = 0;
4820     struct activated_layer_info *activated_layers = NULL;
4821     VkLayerDeviceLink *layer_device_link_info;
4822     VkLayerDeviceCreateInfo chain_info;
4823     VkDeviceCreateInfo loader_create_info;
4824     VkDeviceGroupDeviceCreateInfo *original_device_group_create_info_struct = NULL;
4825     VkResult res;
4826 
4827     PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_terminator;
4828     PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_terminator;
4829 
4830     memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo));
4831 
4832     if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames != NULL) {
4833         bool invalid_device_layer_usage = false;
4834 
4835         if (loader_create_info.enabledLayerCount != inst->enabled_layer_names.count && loader_create_info.enabledLayerCount > 0) {
4836             invalid_device_layer_usage = true;
4837         } else if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames == NULL) {
4838             invalid_device_layer_usage = true;
4839         } else if (loader_create_info.enabledLayerCount == 0 && loader_create_info.ppEnabledLayerNames != NULL) {
4840             invalid_device_layer_usage = true;
4841         } else if (inst->enabled_layer_names.list != NULL) {
4842             for (uint32_t i = 0; i < loader_create_info.enabledLayerCount; i++) {
4843                 const char *device_layer_names = loader_create_info.ppEnabledLayerNames[i];
4844 
4845                 if (strcmp(device_layer_names, inst->enabled_layer_names.list[i]) != 0) {
4846                     invalid_device_layer_usage = true;
4847                     break;
4848                 }
4849             }
4850         }
4851 
4852         if (invalid_device_layer_usage) {
4853             loader_log(
4854                 inst, VULKAN_LOADER_WARN_BIT, 0,
4855                 "loader_create_device_chain: Using deprecated and ignored 'ppEnabledLayerNames' member of 'VkDeviceCreateInfo' "
4856                 "when creating a Vulkan device.");
4857         }
4858     }
4859 
4860     // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list.  If it is, we then
4861     // need to look for the corresponding VkDeviceGroupDeviceCreateInfo struct in the device list.  This is because we
4862     // need to replace all the incoming physical device values (which are really loader trampoline physical device values)
4863     // with the layer/ICD version.
4864     {
4865         VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
4866         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
4867         while (NULL != pNext) {
4868             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
4869                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
4870                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
4871                     VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
4872                     VkPhysicalDevice *phys_dev_array = NULL;
4873                     if (NULL == temp_struct) {
4874                         return VK_ERROR_OUT_OF_HOST_MEMORY;
4875                     }
4876                     memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
4877                     phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
4878                     if (NULL == phys_dev_array) {
4879                         return VK_ERROR_OUT_OF_HOST_MEMORY;
4880                     }
4881 
4882                     // Before calling down, replace the incoming physical device values (which are really loader trampoline
4883                     // physical devices) with the next layer (or possibly even the terminator) physical device values.
4884                     struct loader_physical_device_tramp *cur_tramp;
4885                     for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
4886                         cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev];
4887                         phys_dev_array[phys_dev] = cur_tramp->phys_dev;
4888                     }
4889                     temp_struct->pPhysicalDevices = phys_dev_array;
4890 
4891                     original_device_group_create_info_struct = (VkDeviceGroupDeviceCreateInfo *)pPrev->pNext;
4892 
4893                     // Replace the old struct in the pNext chain with this one.
4894                     pPrev->pNext = (VkBaseOutStructure *)temp_struct;
4895                 }
4896                 break;
4897             }
4898 
4899             pPrev = pNext;
4900             pNext = pNext->pNext;
4901         }
4902     }
4903     if (inst->expanded_activated_layer_list.count > 0) {
4904         layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * inst->expanded_activated_layer_list.count);
4905         if (!layer_device_link_info) {
4906             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4907                        "loader_create_device_chain: Failed to alloc Device objects for layer. Skipping Layer.");
4908             return VK_ERROR_OUT_OF_HOST_MEMORY;
4909         }
4910 
4911         activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4912         if (!activated_layers) {
4913             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4914                        "loader_create_device_chain: Failed to alloc activated layer storage array");
4915             return VK_ERROR_OUT_OF_HOST_MEMORY;
4916         }
4917 
4918         chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
4919         chain_info.function = VK_LAYER_LINK_INFO;
4920         chain_info.u.pLayerInfo = NULL;
4921         chain_info.pNext = loader_create_info.pNext;
4922         loader_create_info.pNext = &chain_info;
4923 
4924         // Create instance chain of enabled layers
4925         for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4926             struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i];
4927             loader_platform_dl_handle lib_handle = layer_prop->lib_handle;
4928 
4929             // Skip it if a Layer with the same name has been already successfully activated
4930             if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4931                 continue;
4932             }
4933 
4934             // Skip the layer if the handle is NULL - this is likely because the library failed to load but wasn't removed from
4935             // the list.
4936             if (!lib_handle) {
4937                 continue;
4938             }
4939 
4940             // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the
4941             // version negotiation
4942             if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) {
4943                 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) {
4944                     fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4945                     layer_prop->functions.get_instance_proc_addr = fpGIPA;
4946                 } else
4947                     fpGIPA =
4948                         (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa);
4949                 if (!fpGIPA) {
4950                     loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4951                                "loader_create_device_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\".  "
4952                                "Skipping layer.",
4953                                layer_prop->lib_name);
4954                     continue;
4955                 }
4956             }
4957 
4958             if (fpGIPA == callingLayer) {
4959                 if (layerNextGDPA != NULL) {
4960                     *layerNextGDPA = nextGDPA;
4961                 }
4962                 // Break here because if fpGIPA is the same as callingLayer, that means a layer is trying to create a device,
4963                 // and once we don't want to continue any further as the next layer will be the calling layer
4964                 break;
4965             }
4966 
4967             if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) {
4968                 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) {
4969                     fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr");
4970                     layer_prop->functions.get_device_proc_addr = fpGDPA;
4971                 } else
4972                     fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa);
4973                 if (!fpGDPA) {
4974                     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4975                                "Failed to find vkGetDeviceProcAddr in layer \"%s\"", layer_prop->lib_name);
4976                     continue;
4977                 }
4978             }
4979 
4980             layer_device_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4981             layer_device_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = nextGIPA;
4982             layer_device_link_info[num_activated_layers].pfnNextGetDeviceProcAddr = nextGDPA;
4983             chain_info.u.pLayerInfo = &layer_device_link_info[num_activated_layers];
4984             nextGIPA = fpGIPA;
4985             nextGDPA = fpGDPA;
4986 
4987             activated_layers[num_activated_layers].name = layer_prop->info.layerName;
4988             activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
4989             activated_layers[num_activated_layers].library = layer_prop->lib_name;
4990             activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
4991             if (activated_layers[num_activated_layers].is_implicit) {
4992                 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
4993             }
4994 
4995             loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Inserted device layer \"%s\" (%s)",
4996                        layer_prop->info.layerName, layer_prop->lib_name);
4997 
4998             num_activated_layers++;
4999         }
5000     }
5001 
5002     VkDevice created_device = (VkDevice)dev;
5003     PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice");
5004     if (fpCreateDevice) {
5005         VkLayerDeviceCreateInfo create_info_disp;
5006 
5007         create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
5008         create_info_disp.function = VK_LOADER_DATA_CALLBACK;
5009 
5010         create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch;
5011 
5012         // If layer debugging is enabled, let's print out the full callstack with layers in their
5013         // defined order.
5014         uint32_t layer_driver_bits = VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT;
5015         loader_log(inst, layer_driver_bits, 0, "vkCreateDevice layer callstack setup to:");
5016         loader_log(inst, layer_driver_bits, 0, "   <Application>");
5017         loader_log(inst, layer_driver_bits, 0, "     ||");
5018         loader_log(inst, layer_driver_bits, 0, "   <Loader>");
5019         loader_log(inst, layer_driver_bits, 0, "     ||");
5020         for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
5021             uint32_t index = num_activated_layers - cur_layer - 1;
5022             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   %s", activated_layers[index].name);
5023             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Type: %s",
5024                        activated_layers[index].is_implicit ? "Implicit" : "Explicit");
5025             if (activated_layers[index].is_implicit) {
5026                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "               Disable Env Var:  %s",
5027                            activated_layers[index].disable_env);
5028             }
5029             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Manifest: %s", activated_layers[index].manifest);
5030             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Library:  %s", activated_layers[index].library);
5031             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
5032         }
5033         loader_log(inst, layer_driver_bits, 0, "   <Device>");
5034         create_info_disp.pNext = loader_create_info.pNext;
5035         loader_create_info.pNext = &create_info_disp;
5036         res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device);
5037         if (res != VK_SUCCESS) {
5038             return res;
5039         }
5040         dev->chain_device = created_device;
5041 
5042         // Because we changed the pNext chain to use our own VkDeviceGroupDeviceCreateInfo, we need to fixup the chain to
5043         // point back at the original VkDeviceGroupDeviceCreateInfo.
5044         VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
5045         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
5046         while (NULL != pNext) {
5047             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
5048                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
5049                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
5050                     pPrev->pNext = (VkBaseOutStructure *)original_device_group_create_info_struct;
5051                 }
5052                 break;
5053             }
5054 
5055             pPrev = pNext;
5056             pNext = pNext->pNext;
5057         }
5058 
5059     } else {
5060         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5061                    "loader_create_device_chain: Failed to find \'vkCreateDevice\' in layers or ICD");
5062         // Couldn't find CreateDevice function!
5063         return VK_ERROR_INITIALIZATION_FAILED;
5064     }
5065 
5066     // Initialize device dispatch table
5067     loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device);
5068     // Initialize the dispatch table to functions which need terminators
5069     // These functions point directly to the driver, not the terminator functions
5070     init_extension_device_proc_terminator_dispatch(dev);
5071 
5072     return res;
5073 }
5074 
5075 VkResult loader_validate_layers(const struct loader_instance *inst, const uint32_t layer_count,
5076                                 const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) {
5077     struct loader_layer_properties *prop;
5078 
5079     if (layer_count > 0 && ppEnabledLayerNames == NULL) {
5080         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5081                    "loader_validate_layers: ppEnabledLayerNames is NULL but enabledLayerCount is greater than zero");
5082         return VK_ERROR_LAYER_NOT_PRESENT;
5083     }
5084 
5085     for (uint32_t i = 0; i < layer_count; i++) {
5086         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]);
5087         if (result != VK_STRING_ERROR_NONE) {
5088             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5089                        "loader_validate_layers: ppEnabledLayerNames contains string that is too long or is badly formed");
5090             return VK_ERROR_LAYER_NOT_PRESENT;
5091         }
5092 
5093         prop = loader_find_layer_property(ppEnabledLayerNames[i], list);
5094         if (NULL == prop) {
5095             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5096                        "loader_validate_layers: Layer %d does not exist in the list of available layers", i);
5097             return VK_ERROR_LAYER_NOT_PRESENT;
5098         }
5099         if (inst->settings.settings_active && prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_ON &&
5100             prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
5101             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5102                        "loader_validate_layers: Layer %d was explicitly prevented from being enabled by the loader settings file",
5103                        i);
5104             return VK_ERROR_LAYER_NOT_PRESENT;
5105         }
5106     }
5107     return VK_SUCCESS;
5108 }
5109 
5110 VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts,
5111                                              const struct loader_layer_list *instance_layers,
5112                                              const struct loader_envvar_all_filters *layer_filters,
5113                                              const VkInstanceCreateInfo *pCreateInfo) {
5114     VkExtensionProperties *extension_prop;
5115     char *env_value;
5116     bool check_if_known = true;
5117     VkResult res = VK_SUCCESS;
5118 
5119     struct loader_pointer_layer_list active_layers = {0};
5120     struct loader_pointer_layer_list expanded_layers = {0};
5121 
5122     if (pCreateInfo->enabledExtensionCount > 0 && pCreateInfo->ppEnabledExtensionNames == NULL) {
5123         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5124                    "loader_validate_instance_extensions: Instance ppEnabledExtensionNames is NULL but enabledExtensionCount is "
5125                    "greater than zero");
5126         return VK_ERROR_EXTENSION_NOT_PRESENT;
5127     }
5128     if (!loader_init_pointer_layer_list(inst, &active_layers)) {
5129         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5130         goto out;
5131     }
5132     if (!loader_init_pointer_layer_list(inst, &expanded_layers)) {
5133         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5134         goto out;
5135     }
5136 
5137     if (inst->settings.settings_active) {
5138         res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount,
5139                                                   pCreateInfo->ppEnabledLayerNames, instance_layers, &active_layers,
5140                                                   &expanded_layers);
5141         if (res != VK_SUCCESS) {
5142             goto out;
5143         }
5144     } else {
5145         // Build the lists of active layers (including meta layers) and expanded layers (with meta layers resolved to their
5146         // components)
5147         res = loader_add_implicit_layers(inst, layer_filters, &active_layers, &expanded_layers, instance_layers);
5148         if (res != VK_SUCCESS) {
5149             goto out;
5150         }
5151         res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &active_layers,
5152                                             &expanded_layers, instance_layers);
5153         if (res != VK_SUCCESS) {
5154             goto out;
5155         }
5156         res = loader_add_layer_names_to_list(inst, layer_filters, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount,
5157                                              pCreateInfo->ppEnabledLayerNames, instance_layers);
5158         if (VK_SUCCESS != res) {
5159             goto out;
5160         }
5161     }
5162     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5163         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
5164         if (result != VK_STRING_ERROR_NONE) {
5165             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5166                        "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains "
5167                        "string that is too long or is badly formed");
5168             res = VK_ERROR_EXTENSION_NOT_PRESENT;
5169             goto out;
5170         }
5171 
5172         // Check if a user wants to disable the instance extension filtering behavior
5173         env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
5174         if (NULL != env_value && atoi(env_value) != 0) {
5175             check_if_known = false;
5176         }
5177         loader_free_getenv(env_value, inst);
5178 
5179         if (check_if_known) {
5180             // See if the extension is in the list of supported extensions
5181             bool found = false;
5182             for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) {
5183                 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) {
5184                     found = true;
5185                     break;
5186                 }
5187             }
5188 
5189             // If it isn't in the list, return an error
5190             if (!found) {
5191                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5192                            "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.",
5193                            pCreateInfo->ppEnabledExtensionNames[i]);
5194                 res = VK_ERROR_EXTENSION_NOT_PRESENT;
5195                 goto out;
5196             }
5197         }
5198 
5199         extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts);
5200 
5201         if (extension_prop) {
5202             continue;
5203         }
5204 
5205         extension_prop = NULL;
5206 
5207         // Not in global list, search layer extension lists
5208         for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) {
5209             extension_prop =
5210                 get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j]->instance_extension_list);
5211             if (extension_prop) {
5212                 // Found the extension in one of the layers enabled by the app.
5213                 break;
5214             }
5215 
5216             struct loader_layer_properties *layer_prop =
5217                 loader_find_layer_property(expanded_layers.list[j]->info.layerName, instance_layers);
5218             if (NULL == layer_prop) {
5219                 // Should NOT get here, loader_validate_layers should have already filtered this case out.
5220                 continue;
5221             }
5222         }
5223 
5224         if (!extension_prop) {
5225             // Didn't find extension name in any of the global layers, error out
5226             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5227                        "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled "
5228                        "layers.",
5229                        pCreateInfo->ppEnabledExtensionNames[i]);
5230             res = VK_ERROR_EXTENSION_NOT_PRESENT;
5231             goto out;
5232         }
5233     }
5234 
5235 out:
5236     loader_destroy_pointer_layer_list(inst, &active_layers);
5237     loader_destroy_pointer_layer_list(inst, &expanded_layers);
5238     return res;
5239 }
5240 
5241 VkResult loader_validate_device_extensions(struct loader_instance *this_instance,
5242                                            const struct loader_pointer_layer_list *activated_device_layers,
5243                                            const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) {
5244     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5245         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
5246         if (result != VK_STRING_ERROR_NONE) {
5247             loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5248                        "loader_validate_device_extensions: Device ppEnabledExtensionNames contains "
5249                        "string that is too long or is badly formed");
5250             return VK_ERROR_EXTENSION_NOT_PRESENT;
5251         }
5252 
5253         const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5254         VkExtensionProperties *extension_prop = get_extension_property(extension_name, icd_exts);
5255 
5256         if (extension_prop) {
5257             continue;
5258         }
5259 
5260         // Not in global list, search activated layer extension lists
5261         for (uint32_t j = 0; j < activated_device_layers->count; j++) {
5262             struct loader_layer_properties *layer_prop = activated_device_layers->list[j];
5263 
5264             extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list);
5265             if (extension_prop) {
5266                 // Found the extension in one of the layers enabled by the app.
5267                 break;
5268             }
5269         }
5270 
5271         if (!extension_prop) {
5272             // Didn't find extension name in any of the device layers, error out
5273             loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5274                        "loader_validate_device_extensions: Device extension %s not supported by selected physical device "
5275                        "or enabled layers.",
5276                        pCreateInfo->ppEnabledExtensionNames[i]);
5277             return VK_ERROR_EXTENSION_NOT_PRESENT;
5278         }
5279     }
5280     return VK_SUCCESS;
5281 }
5282 
5283 // Terminator functions for the Instance chain
5284 // All named terminator_<Vulkan API name>
5285 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
5286                                                          const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
5287     struct loader_icd_term *icd_term;
5288     VkExtensionProperties *prop;
5289     char **filtered_extension_names = NULL;
5290     VkInstanceCreateInfo icd_create_info;
5291     VkResult res = VK_SUCCESS;
5292     bool one_icd_successful = false;
5293 
5294     struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance;
5295     if (NULL == ptr_instance) {
5296         loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5297                    "terminator_CreateInstance: Loader instance pointer null encountered.  Possibly set by active layer. (Policy "
5298                    "#LLP_LAYER_21)");
5299     } else if (LOADER_MAGIC_NUMBER != ptr_instance->magic) {
5300         loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5301                    "terminator_CreateInstance: Instance pointer (%p) has invalid MAGIC value 0x%08lx. Instance value possibly "
5302                    "corrupted by active layer (Policy #LLP_LAYER_21).  ",
5303                    ptr_instance, ptr_instance->magic);
5304     }
5305 
5306     // Save the application version if it has been modified - layers sometimes needs features in newer API versions than
5307     // what the application requested, and thus will increase the instance version to a level that suites their needs.
5308     if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion) {
5309         loader_api_version altered_version = loader_make_version(pCreateInfo->pApplicationInfo->apiVersion);
5310         if (altered_version.major != ptr_instance->app_api_version.major ||
5311             altered_version.minor != ptr_instance->app_api_version.minor) {
5312             ptr_instance->app_api_version = altered_version;
5313         }
5314     }
5315 
5316     memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info));
5317 
5318     icd_create_info.enabledLayerCount = 0;
5319     icd_create_info.ppEnabledLayerNames = NULL;
5320 
5321     // NOTE: Need to filter the extensions to only those supported by the ICD.
5322     //       No ICD will advertise support for layers. An ICD library could
5323     //       support a layer, but it would be independent of the actual ICD,
5324     //       just in the same library.
5325     uint32_t extension_count = pCreateInfo->enabledExtensionCount;
5326 #if defined(LOADER_ENABLE_LINUX_SORT)
5327     extension_count += 1;
5328 #endif  // LOADER_ENABLE_LINUX_SORT
5329     filtered_extension_names = loader_stack_alloc(extension_count * sizeof(char *));
5330     if (!filtered_extension_names) {
5331         loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5332                    "terminator_CreateInstance: Failed create extension name array for %d extensions", extension_count);
5333         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5334         goto out;
5335     }
5336     icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5337 
5338     // Determine if Get Physical Device Properties 2 is available to this Instance
5339     if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion >= VK_API_VERSION_1_1) {
5340         ptr_instance->supports_get_dev_prop_2 = true;
5341     } else {
5342         for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5343             if (!strcmp(pCreateInfo->ppEnabledExtensionNames[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5344                 ptr_instance->supports_get_dev_prop_2 = true;
5345                 break;
5346             }
5347         }
5348     }
5349 
5350     for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) {
5351         icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]);
5352         if (NULL == icd_term) {
5353             loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5354                        "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i);
5355             res = VK_ERROR_OUT_OF_HOST_MEMORY;
5356             goto out;
5357         }
5358 
5359         // If any error happens after here, we need to remove the ICD from the list,
5360         // because we've already added it, but haven't validated it
5361 
5362         // Make sure that we reset the pApplicationInfo so we don't get an old pointer
5363         icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo;
5364         icd_create_info.enabledExtensionCount = 0;
5365         struct loader_extension_list icd_exts = {0};
5366 
5367         // traverse scanned icd list adding non-duplicate extensions to the list
5368         res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5369         if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5370             // If out of memory, bail immediately.
5371             goto out;
5372         } else if (VK_SUCCESS != res) {
5373             // Something bad happened with this ICD, so free it and try the
5374             // next.
5375             ptr_instance->icd_terms = icd_term->next;
5376             icd_term->next = NULL;
5377             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5378             continue;
5379         }
5380 
5381         res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties,
5382                                              icd_term->scanned_icd->lib_name, &icd_exts);
5383         if (VK_SUCCESS != res) {
5384             loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5385             if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5386                 // If out of memory, bail immediately.
5387                 goto out;
5388             } else {
5389                 // Something bad happened with this ICD, so free it and try the next.
5390                 ptr_instance->icd_terms = icd_term->next;
5391                 icd_term->next = NULL;
5392                 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5393                 continue;
5394             }
5395         }
5396 
5397         for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5398             prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts);
5399             if (prop) {
5400                 filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j];
5401                 icd_create_info.enabledExtensionCount++;
5402             }
5403         }
5404 #if defined(LOADER_ENABLE_LINUX_SORT)
5405         // Force on "VK_KHR_get_physical_device_properties2" for Linux as we use it for GPU sorting.  This
5406         // should be done if the API version of either the application or the driver does not natively support
5407         // the core version of vkGetPhysicalDeviceProperties2 entrypoint.
5408         if ((ptr_instance->app_api_version.major == 1 && ptr_instance->app_api_version.minor == 0) ||
5409             (VK_API_VERSION_MAJOR(icd_term->scanned_icd->api_version) == 1 &&
5410              VK_API_VERSION_MINOR(icd_term->scanned_icd->api_version) == 0)) {
5411             prop = get_extension_property(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, &icd_exts);
5412             if (prop) {
5413                 filtered_extension_names[icd_create_info.enabledExtensionCount] =
5414                     (char *)VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME;
5415                 icd_create_info.enabledExtensionCount++;
5416 
5417                 // At least one ICD supports this, so the instance should be able to support it
5418                 ptr_instance->supports_get_dev_prop_2 = true;
5419             }
5420         }
5421 #endif  // LOADER_ENABLE_LINUX_SORT
5422 
5423         // Determine if vkGetPhysicalDeviceProperties2 is available to this Instance
5424         if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5425             icd_term->supports_get_dev_prop_2 = true;
5426         } else {
5427             for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5428                 if (!strcmp(filtered_extension_names[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5429                     icd_term->supports_get_dev_prop_2 = true;
5430                     break;
5431                 }
5432             }
5433         }
5434 
5435         loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5436 
5437         // Get the driver version from vkEnumerateInstanceVersion
5438         uint32_t icd_version = VK_API_VERSION_1_0;
5439         VkResult icd_result = VK_SUCCESS;
5440         if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5441             PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
5442                 (PFN_vkEnumerateInstanceVersion)icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
5443             if (icd_enumerate_instance_version != NULL) {
5444                 icd_result = icd_enumerate_instance_version(&icd_version);
5445                 if (icd_result != VK_SUCCESS) {
5446                     icd_version = VK_API_VERSION_1_0;
5447                     loader_log(ptr_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5448                                "terminator_CreateInstance: ICD \"%s\" vkEnumerateInstanceVersion returned error. The ICD will be "
5449                                "treated as a 1.0 ICD",
5450                                icd_term->scanned_icd->lib_name);
5451                 } else if (VK_API_VERSION_MINOR(icd_version) == 0) {
5452                     loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5453                                "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but "
5454                                "vkEnumerateInstanceVersion returned 1.0, treating as a 1.0 ICD",
5455                                icd_term->scanned_icd->lib_name);
5456                 }
5457             } else {
5458                 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5459                            "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but does "
5460                            "not support vkEnumerateInstanceVersion, treating as a 1.0 ICD",
5461                            icd_term->scanned_icd->lib_name);
5462             }
5463         }
5464 
5465         // Remove the portability enumeration flag bit if the ICD doesn't support the extension
5466         if ((pCreateInfo->flags & VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR) == 1) {
5467             bool supports_portability_enumeration = false;
5468             for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5469                 if (strcmp(filtered_extension_names[j], VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME) == 0) {
5470                     supports_portability_enumeration = true;
5471                     break;
5472                 }
5473             }
5474             // If the icd supports the extension, use the flags as given, otherwise remove the portability bit
5475             icd_create_info.flags = supports_portability_enumeration
5476                                         ? pCreateInfo->flags
5477                                         : pCreateInfo->flags & (~VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR);
5478         }
5479 
5480         // Create an instance, substituting the version to 1.0 if necessary
5481         VkApplicationInfo icd_app_info;
5482         const uint32_t api_variant = 0;
5483         const uint32_t api_version_1_0 = VK_API_VERSION_1_0;
5484         uint32_t icd_version_nopatch =
5485             VK_MAKE_API_VERSION(api_variant, VK_API_VERSION_MAJOR(icd_version), VK_API_VERSION_MINOR(icd_version), 0);
5486         uint32_t requested_version = (pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL)
5487                                          ? api_version_1_0
5488                                          : pCreateInfo->pApplicationInfo->apiVersion;
5489         if ((requested_version != 0) && (icd_version_nopatch == api_version_1_0)) {
5490             if (icd_create_info.pApplicationInfo == NULL) {
5491                 memset(&icd_app_info, 0, sizeof(icd_app_info));
5492             } else {
5493                 memmove(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info));
5494             }
5495             icd_app_info.apiVersion = icd_version;
5496             icd_create_info.pApplicationInfo = &icd_app_info;
5497         }
5498         icd_result =
5499             ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance));
5500         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) {
5501             // If out of memory, bail immediately.
5502             res = VK_ERROR_OUT_OF_HOST_MEMORY;
5503             goto out;
5504         } else if (VK_SUCCESS != icd_result) {
5505             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5506                        "terminator_CreateInstance: Received return code %i from call to vkCreateInstance in ICD %s. Skipping "
5507                        "this driver.",
5508                        icd_result, icd_term->scanned_icd->lib_name);
5509             ptr_instance->icd_terms = icd_term->next;
5510             icd_term->next = NULL;
5511             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5512             continue;
5513         }
5514 
5515         if (!loader_icd_init_entries(ptr_instance, icd_term)) {
5516             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5517                        "terminator_CreateInstance: Failed to find required entrypoints in ICD %s. Skipping this driver.",
5518                        icd_term->scanned_icd->lib_name);
5519             ptr_instance->icd_terms = icd_term->next;
5520             icd_term->next = NULL;
5521             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5522             continue;
5523         }
5524 
5525         if (ptr_instance->icd_tramp_list.scanned_list[i].interface_version < 3 &&
5526             (
5527 #if defined(VK_USE_PLATFORM_XLIB_KHR)
5528                 NULL != icd_term->dispatch.CreateXlibSurfaceKHR ||
5529 #endif  // VK_USE_PLATFORM_XLIB_KHR
5530 #if defined(VK_USE_PLATFORM_XCB_KHR)
5531                 NULL != icd_term->dispatch.CreateXcbSurfaceKHR ||
5532 #endif  // VK_USE_PLATFORM_XCB_KHR
5533 #if defined(VK_USE_PLATFORM_WAYLAND_KHR)
5534                 NULL != icd_term->dispatch.CreateWaylandSurfaceKHR ||
5535 #endif  // VK_USE_PLATFORM_WAYLAND_KHR
5536 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
5537                 NULL != icd_term->dispatch.CreateAndroidSurfaceKHR ||
5538 #endif  // VK_USE_PLATFORM_ANDROID_KHR
5539 #if defined(VK_USE_PLATFORM_OHOS)
5540                 NULL != icd_term->dispatch.CreateSurfaceOHOS ||
5541 #endif  // VK_USE_PLATFORM_OHOS
5542 #if defined(VK_USE_PLATFORM_WIN32_KHR)
5543                 NULL != icd_term->dispatch.CreateWin32SurfaceKHR ||
5544 #endif  // VK_USE_PLATFORM_WIN32_KHR
5545                 NULL != icd_term->dispatch.DestroySurfaceKHR)) {
5546             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5547                        "terminator_CreateInstance: Driver %s supports interface version %u but still exposes VkSurfaceKHR"
5548                        " create/destroy entrypoints (Policy #LDP_DRIVER_8)",
5549                        ptr_instance->icd_tramp_list.scanned_list[i].lib_name,
5550                        ptr_instance->icd_tramp_list.scanned_list[i].interface_version);
5551         }
5552 
5553         // If we made it this far, at least one ICD was successful
5554         one_icd_successful = true;
5555     }
5556 
5557     // For vkGetPhysicalDeviceProperties2, at least one ICD needs to support the extension for the
5558     // instance to have it
5559     if (ptr_instance->supports_get_dev_prop_2) {
5560         bool at_least_one_supports = false;
5561         icd_term = ptr_instance->icd_terms;
5562         while (icd_term != NULL) {
5563             if (icd_term->supports_get_dev_prop_2) {
5564                 at_least_one_supports = true;
5565                 break;
5566             }
5567             icd_term = icd_term->next;
5568         }
5569         if (!at_least_one_supports) {
5570             ptr_instance->supports_get_dev_prop_2 = false;
5571         }
5572     }
5573 
5574     // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to
5575     // find a suitable ICD.
5576     if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) {
5577         loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5578                    "terminator_CreateInstance: Found no drivers!");
5579         res = VK_ERROR_INCOMPATIBLE_DRIVER;
5580     }
5581 
5582 out:
5583 
5584     ptr_instance->create_terminator_invalid_extension = false;
5585 
5586     if (VK_SUCCESS != res) {
5587         if (VK_ERROR_EXTENSION_NOT_PRESENT == res) {
5588             ptr_instance->create_terminator_invalid_extension = true;
5589         }
5590 
5591         while (NULL != ptr_instance->icd_terms) {
5592             icd_term = ptr_instance->icd_terms;
5593             ptr_instance->icd_terms = icd_term->next;
5594             if (NULL != icd_term->instance) {
5595                 icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator);
5596             }
5597             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5598         }
5599     } else {
5600         // Check for enabled extensions here to setup the loader structures so the loader knows what extensions
5601         // it needs to worry about.
5602         // We do it here and again above the layers in the trampoline function since the trampoline function
5603         // may think different extensions are enabled than what's down here.
5604         // This is why we don't clear inside of these function calls.
5605         // The clearing should actually be handled by the overall memset of the pInstance structure in the
5606         // trampoline.
5607         wsi_create_instance(ptr_instance, pCreateInfo);
5608         check_for_enabled_debug_extensions(ptr_instance, pCreateInfo);
5609         extensions_create_instance(ptr_instance, pCreateInfo);
5610     }
5611 
5612     return res;
5613 }
5614 
5615 VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
5616     struct loader_instance *ptr_instance = loader_get_instance(instance);
5617     if (NULL == ptr_instance) {
5618         return;
5619     }
5620     struct loader_icd_term *icd_terms = ptr_instance->icd_terms;
5621     struct loader_icd_term *next_icd_term;
5622 
5623     // Remove this instance from the list of instances:
5624     struct loader_instance *prev = NULL;
5625     loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
5626     struct loader_instance *next = loader.instances;
5627     while (next != NULL) {
5628         if (next == ptr_instance) {
5629             // Remove this instance from the list:
5630             if (prev)
5631                 prev->next = next->next;
5632             else
5633                 loader.instances = next->next;
5634             break;
5635         }
5636         prev = next;
5637         next = next->next;
5638     }
5639     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
5640 
5641     while (NULL != icd_terms) {
5642         if (icd_terms->instance) {
5643             icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator);
5644         }
5645         next_icd_term = icd_terms->next;
5646         icd_terms->instance = VK_NULL_HANDLE;
5647         loader_icd_destroy(ptr_instance, icd_terms, pAllocator);
5648 
5649         icd_terms = next_icd_term;
5650     }
5651 
5652     loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_tramp_list);
5653     loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list);
5654     if (NULL != ptr_instance->phys_devs_term) {
5655         for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5656             for (uint32_t j = i + 1; j < ptr_instance->phys_dev_count_term; j++) {
5657                 if (ptr_instance->phys_devs_term[i] == ptr_instance->phys_devs_term[j]) {
5658                     ptr_instance->phys_devs_term[j] = NULL;
5659                 }
5660             }
5661         }
5662         for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5663             loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]);
5664         }
5665         loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term);
5666     }
5667     if (NULL != ptr_instance->phys_dev_groups_term) {
5668         for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) {
5669             loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]);
5670         }
5671         loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term);
5672     }
5673     loader_free_dev_ext_table(ptr_instance);
5674     loader_free_phys_dev_ext_table(ptr_instance);
5675 
5676     free_string_list(ptr_instance, &ptr_instance->enabled_layer_names);
5677 }
5678 
5679 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
5680                                                        const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
5681     VkResult res = VK_SUCCESS;
5682     struct loader_physical_device_term *phys_dev_term;
5683     phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
5684     struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
5685 
5686     struct loader_device *dev = (struct loader_device *)*pDevice;
5687     PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice;
5688     struct loader_extension_list icd_exts;
5689 
5690     VkBaseOutStructure *caller_dgci_container = NULL;
5691     VkDeviceGroupDeviceCreateInfo *caller_dgci = NULL;
5692 
5693     if (NULL == dev) {
5694         loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0,
5695                    "terminator_CreateDevice: Loader device pointer null encountered.  Possibly set by active layer. (Policy "
5696                    "#LLP_LAYER_22)");
5697     } else if (DEVICE_DISP_TABLE_MAGIC_NUMBER != dev->loader_dispatch.core_dispatch.magic) {
5698         loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0,
5699                    "terminator_CreateDevice: Device pointer (%p) has invalid MAGIC value 0x%08lx. The expected value is "
5700                    "0x10ADED040410ADED. Device value possibly "
5701                    "corrupted by active layer (Policy #LLP_LAYER_22).  ",
5702                    dev, dev->loader_dispatch.core_dispatch.magic);
5703     }
5704 
5705     dev->phys_dev_term = phys_dev_term;
5706 
5707     icd_exts.list = NULL;
5708 
5709     if (fpCreateDevice == NULL) {
5710         loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5711                    "terminator_CreateDevice: No vkCreateDevice command exposed by ICD %s", icd_term->scanned_icd->lib_name);
5712         res = VK_ERROR_INITIALIZATION_FAILED;
5713         goto out;
5714     }
5715 
5716     VkDeviceCreateInfo localCreateInfo;
5717     memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo));
5718 
5719     // NOTE: Need to filter the extensions to only those supported by the ICD.
5720     //       No ICD will advertise support for layers. An ICD library could support a layer,
5721     //       but it would be independent of the actual ICD, just in the same library.
5722     char **filtered_extension_names = NULL;
5723     if (0 < pCreateInfo->enabledExtensionCount) {
5724         filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
5725         if (NULL == filtered_extension_names) {
5726             loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5727                        "terminator_CreateDevice: Failed to create extension name storage for %d extensions",
5728                        pCreateInfo->enabledExtensionCount);
5729             return VK_ERROR_OUT_OF_HOST_MEMORY;
5730         }
5731     }
5732 
5733     localCreateInfo.enabledLayerCount = 0;
5734     localCreateInfo.ppEnabledLayerNames = NULL;
5735 
5736     localCreateInfo.enabledExtensionCount = 0;
5737     localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5738 
5739     // Get the physical device (ICD) extensions
5740     res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5741     if (VK_SUCCESS != res) {
5742         goto out;
5743     }
5744 
5745     res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties,
5746                                        phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts);
5747     if (res != VK_SUCCESS) {
5748         goto out;
5749     }
5750 
5751     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5752         const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5753         VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts);
5754         if (prop) {
5755             filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name;
5756             localCreateInfo.enabledExtensionCount++;
5757         } else {
5758             loader_log(icd_term->this_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5759                        "vkCreateDevice extension %s not available for devices associated with ICD %s", extension_name,
5760                        icd_term->scanned_icd->lib_name);
5761         }
5762     }
5763 
5764     // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the
5765     // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which
5766     // are really loader physical device terminator values) with the ICD versions.
5767     // if (icd_term->this_instance->enabled_known_extensions.khr_device_group_creation == 1) {
5768     {
5769         VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext;
5770         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo;
5771         while (NULL != pNext) {
5772             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
5773                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
5774                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
5775                     VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
5776                     VkPhysicalDevice *phys_dev_array = NULL;
5777                     if (NULL == temp_struct) {
5778                         return VK_ERROR_OUT_OF_HOST_MEMORY;
5779                     }
5780                     memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
5781                     phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
5782                     if (NULL == phys_dev_array) {
5783                         return VK_ERROR_OUT_OF_HOST_MEMORY;
5784                     }
5785 
5786                     // Before calling down, replace the incoming physical device values (which are really loader terminator
5787                     // physical devices) with the ICDs physical device values.
5788                     struct loader_physical_device_term *cur_term;
5789                     for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
5790                         cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev];
5791                         phys_dev_array[phys_dev] = cur_term->phys_dev;
5792                     }
5793                     temp_struct->pPhysicalDevices = phys_dev_array;
5794 
5795                     // Keep track of pointers to restore pNext chain before returning
5796                     caller_dgci_container = pPrev;
5797                     caller_dgci = cur_struct;
5798 
5799                     // Replace the old struct in the pNext chain with this one.
5800                     pPrev->pNext = (VkBaseOutStructure *)temp_struct;
5801                 }
5802                 break;
5803             }
5804 
5805             pPrev = pNext;
5806             pNext = pNext->pNext;
5807         }
5808     }
5809 
5810     // Handle loader emulation for structs that are not supported by the ICD:
5811     // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which
5812     // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current
5813     // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize
5814     // the any of the struct types, as the loader would not know the size to allocate and copy.
5815     // if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
5816     {
5817         const void *pNext = localCreateInfo.pNext;
5818         while (pNext != NULL) {
5819             switch (*(VkStructureType *)pNext) {
5820                 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
5821                     const VkPhysicalDeviceFeatures2KHR *features = pNext;
5822 
5823                     if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL &&
5824                         icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
5825                         loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
5826                                    "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"",
5827                                    icd_term->scanned_icd->lib_name);
5828 
5829                         // Verify that VK_KHR_get_physical_device_properties2 is enabled
5830                         if (icd_term->this_instance->enabled_known_extensions.khr_get_physical_device_properties2) {
5831                             localCreateInfo.pEnabledFeatures = &features->features;
5832                         }
5833                     }
5834 
5835                     // Leave this item in the pNext chain for now
5836 
5837                     pNext = features->pNext;
5838                     break;
5839                 }
5840 
5841                 case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: {
5842                     const VkDeviceGroupDeviceCreateInfo *group_info = pNext;
5843 
5844                     if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL &&
5845                         icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) {
5846                         loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
5847                                    "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for "
5848                                    "ICD \"%s\"",
5849                                    icd_term->scanned_icd->lib_name);
5850 
5851                         // The group must contain only this one device, since physical device groups aren't actually supported
5852                         if (group_info->physicalDeviceCount != 1) {
5853                             loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5854                                        "vkCreateDevice: Emulation failed to create device from device group info");
5855                             res = VK_ERROR_INITIALIZATION_FAILED;
5856                             goto out;
5857                         }
5858                     }
5859 
5860                     // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec
5861                     // states that the physicalDevice argument must be included in the device group, and we've already checked
5862                     // that it is
5863 
5864                     pNext = group_info->pNext;
5865                     break;
5866                 }
5867 
5868                 // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the
5869                 // ICD handle that error when the user enables the extension here
5870                 default: {
5871                     const VkBaseInStructure *header = pNext;
5872                     pNext = header->pNext;
5873                     break;
5874                 }
5875             }
5876         }
5877     }
5878 
5879     VkBool32 maintenance5_feature_enabled = false;
5880     // Look for the VkPhysicalDeviceMaintenance5FeaturesKHR struct to see if the feature was enabled
5881     {
5882         const void *pNext = localCreateInfo.pNext;
5883         while (pNext != NULL) {
5884             switch (*(VkStructureType *)pNext) {
5885                 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR: {
5886                     const VkPhysicalDeviceMaintenance5FeaturesKHR *maintenance_features = pNext;
5887                     if (maintenance_features->maintenance5 == VK_TRUE) {
5888                         maintenance5_feature_enabled = true;
5889                     }
5890                     pNext = maintenance_features->pNext;
5891                     break;
5892                 }
5893 
5894                 default: {
5895                     const VkBaseInStructure *header = pNext;
5896                     pNext = header->pNext;
5897                     break;
5898                 }
5899             }
5900         }
5901     }
5902 
5903     // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or
5904     // not to return that terminator when vkGetDeviceProcAddr is called
5905     for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) {
5906         if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
5907             dev->driver_extensions.khr_swapchain_enabled = true;
5908         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) {
5909             dev->driver_extensions.khr_display_swapchain_enabled = true;
5910         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
5911             dev->driver_extensions.khr_device_group_enabled = true;
5912         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
5913             dev->driver_extensions.ext_debug_marker_enabled = true;
5914         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], "VK_EXT_full_screen_exclusive")) {
5915             dev->driver_extensions.ext_full_screen_exclusive_enabled = true;
5916         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_MAINTENANCE_5_EXTENSION_NAME) &&
5917                    maintenance5_feature_enabled) {
5918             dev->should_ignore_device_commands_from_newer_version = true;
5919         }
5920     }
5921     dev->layer_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
5922     dev->driver_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
5923 
5924     VkPhysicalDeviceProperties properties;
5925     icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties);
5926     if (!dev->driver_extensions.khr_device_group_enabled) {
5927         if (properties.apiVersion >= VK_API_VERSION_1_1) {
5928             dev->driver_extensions.khr_device_group_enabled = true;
5929         }
5930     }
5931 
5932     loader_log(icd_term->this_instance, VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5933                "       Using \"%s\" with driver: \"%s\"", properties.deviceName, icd_term->scanned_icd->lib_name);
5934 
5935     res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device);
5936     if (res != VK_SUCCESS) {
5937         loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5938                    "terminator_CreateDevice: Failed in ICD %s vkCreateDevice call", icd_term->scanned_icd->lib_name);
5939         goto out;
5940     }
5941 
5942     *pDevice = dev->icd_device;
5943     loader_add_logical_device(icd_term, dev);
5944 
5945     // Init dispatch pointer in new device object
5946     loader_init_dispatch(*pDevice, &dev->loader_dispatch);
5947 
5948 out:
5949     if (NULL != icd_exts.list) {
5950         loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts);
5951     }
5952 
5953     // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfo
5954     // in the chain to maintain consistency for the caller.
5955     if (caller_dgci_container != NULL) {
5956         caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci;
5957     }
5958 
5959     return res;
5960 }
5961 
5962 // Update the trampoline physical devices with the wrapped version.
5963 // We always want to re-use previous physical device pointers since they may be used by an application
5964 // after returning previously.
5965 VkResult setup_loader_tramp_phys_devs(struct loader_instance *inst, uint32_t phys_dev_count, VkPhysicalDevice *phys_devs) {
5966     VkResult res = VK_SUCCESS;
5967     uint32_t found_count = 0;
5968     uint32_t old_count = inst->phys_dev_count_tramp;
5969     uint32_t new_count = inst->total_gpu_count;
5970     struct loader_physical_device_tramp **new_phys_devs = NULL;
5971 
5972     if (0 == phys_dev_count) {
5973         return VK_SUCCESS;
5974     }
5975     if (phys_dev_count > new_count) {
5976         new_count = phys_dev_count;
5977     }
5978 
5979     // We want an old to new index array and a new to old index array
5980     int32_t *old_to_new_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * old_count);
5981     int32_t *new_to_old_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * new_count);
5982     if (NULL == old_to_new_index || NULL == new_to_old_index) {
5983         return VK_ERROR_OUT_OF_HOST_MEMORY;
5984     }
5985 
5986     // Initialize both
5987     for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5988         old_to_new_index[cur_idx] = -1;
5989     }
5990     for (uint32_t cur_idx = 0; cur_idx < new_count; ++cur_idx) {
5991         new_to_old_index[cur_idx] = -1;
5992     }
5993 
5994     // Figure out the old->new and new->old indices
5995     for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5996         for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
5997             if (inst->phys_devs_tramp[cur_idx]->phys_dev == phys_devs[new_idx]) {
5998                 old_to_new_index[cur_idx] = (int32_t)new_idx;
5999                 new_to_old_index[new_idx] = (int32_t)cur_idx;
6000                 found_count++;
6001                 break;
6002             }
6003         }
6004     }
6005 
6006     // If we found exactly the number of items we were looking for as we had before.  Then everything
6007     // we already have is good enough and we just need to update the array that was passed in with
6008     // the loader values.
6009     if (found_count == phys_dev_count && 0 != old_count && old_count == new_count) {
6010         for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
6011             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6012                 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
6013                     phys_devs[new_idx] = (VkPhysicalDevice)inst->phys_devs_tramp[cur_idx];
6014                     break;
6015                 }
6016             }
6017         }
6018         // Nothing else to do for this path
6019         res = VK_SUCCESS;
6020     } else {
6021         // Something is different, so do the full path of checking every device and creating a new array to use.
6022         // This can happen if a device was added, or removed, or we hadn't previously queried all the data and we
6023         // have more to store.
6024         new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count,
6025                                                     VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6026         if (NULL == new_phys_devs) {
6027             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6028                        "setup_loader_tramp_phys_devs:  Failed to allocate new physical device array of size %d", new_count);
6029             res = VK_ERROR_OUT_OF_HOST_MEMORY;
6030             goto out;
6031         }
6032 
6033         if (new_count > phys_dev_count) {
6034             found_count = phys_dev_count;
6035         } else {
6036             found_count = new_count;
6037         }
6038 
6039         // First try to see if an old item exists that matches the new item.  If so, just copy it over.
6040         for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
6041             bool old_item_found = false;
6042             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6043                 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
6044                     // Copy over old item to correct spot in the new array
6045                     new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
6046                     old_item_found = true;
6047                     break;
6048                 }
6049             }
6050             // Something wasn't found, so it's new so add it to the new list
6051             if (!old_item_found) {
6052                 new_phys_devs[new_idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_tramp),
6053                                                                     VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6054                 if (NULL == new_phys_devs[new_idx]) {
6055                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6056                                "setup_loader_tramp_phys_devs:  Failed to allocate new trampoline physical device");
6057                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
6058                     goto out;
6059                 }
6060 
6061                 // Initialize the new physicalDevice object
6062                 loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp);
6063                 new_phys_devs[new_idx]->this_instance = inst;
6064                 new_phys_devs[new_idx]->phys_dev = phys_devs[new_idx];
6065                 new_phys_devs[new_idx]->magic = PHYS_TRAMP_MAGIC_NUMBER;
6066             }
6067 
6068             phys_devs[new_idx] = (VkPhysicalDevice)new_phys_devs[new_idx];
6069         }
6070 
6071         // We usually get here if the user array is smaller than the total number of devices, so copy the
6072         // remaining devices we have over to the new array.
6073         uint32_t start = found_count;
6074         for (uint32_t new_idx = start; new_idx < new_count; ++new_idx) {
6075             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6076                 if (old_to_new_index[cur_idx] == -1) {
6077                     new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
6078                     old_to_new_index[cur_idx] = new_idx;
6079                     found_count++;
6080                     break;
6081                 }
6082             }
6083         }
6084     }
6085 
6086 out:
6087 
6088     if (NULL != new_phys_devs) {
6089         if (VK_SUCCESS != res) {
6090             for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
6091                 // If an OOM occurred inside the copying of the new physical devices into the existing array
6092                 // will leave some of the old physical devices in the array which may have been copied into
6093                 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6094                 // delete physical devices which were copied.
6095                 bool found = false;
6096                 for (uint32_t cur_idx = 0; cur_idx < inst->phys_dev_count_tramp; cur_idx++) {
6097                     if (new_phys_devs[new_idx] == inst->phys_devs_tramp[cur_idx]) {
6098                         found = true;
6099                         break;
6100                     }
6101                 }
6102                 if (!found) {
6103                     loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6104                 }
6105             }
6106             loader_instance_heap_free(inst, new_phys_devs);
6107         } else {
6108             if (new_count > inst->total_gpu_count) {
6109                 inst->total_gpu_count = new_count;
6110             }
6111             // Free everything in the old array that was not copied into the new array
6112             // here.  We can't attempt to do that before here since the previous loop
6113             // looking before the "out:" label may hit an out of memory condition resulting
6114             // in memory leaking.
6115             if (NULL != inst->phys_devs_tramp) {
6116                 for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) {
6117                     bool found = false;
6118                     for (uint32_t j = 0; j < inst->total_gpu_count; j++) {
6119                         if (inst->phys_devs_tramp[i] == new_phys_devs[j]) {
6120                             found = true;
6121                             break;
6122                         }
6123                     }
6124                     if (!found) {
6125                         loader_instance_heap_free(inst, inst->phys_devs_tramp[i]);
6126                     }
6127                 }
6128                 loader_instance_heap_free(inst, inst->phys_devs_tramp);
6129             }
6130             inst->phys_devs_tramp = new_phys_devs;
6131             inst->phys_dev_count_tramp = found_count;
6132         }
6133     }
6134     if (VK_SUCCESS != res) {
6135         inst->total_gpu_count = 0;
6136     }
6137 
6138     return res;
6139 }
6140 
6141 #if defined(LOADER_ENABLE_LINUX_SORT)
6142 bool is_linux_sort_enabled(struct loader_instance *inst) {
6143     bool sort_items = inst->supports_get_dev_prop_2;
6144     char *env_value = loader_getenv("VK_LOADER_DISABLE_SELECT", inst);
6145     if (NULL != env_value) {
6146         int32_t int_env_val = atoi(env_value);
6147         loader_free_getenv(env_value, inst);
6148         if (int_env_val != 0) {
6149             sort_items = false;
6150         }
6151     }
6152     return sort_items;
6153 }
6154 #endif  // LOADER_ENABLE_LINUX_SORT
6155 
6156 // Look for physical_device in the provided phys_devs list, return true if found and put the index into out_idx, otherwise
6157 // return false
6158 bool find_phys_dev(VkPhysicalDevice physical_device, uint32_t phys_devs_count, struct loader_physical_device_term **phys_devs,
6159                    uint32_t *out_idx) {
6160     if (NULL == phys_devs) return false;
6161     for (uint32_t idx = 0; idx < phys_devs_count; idx++) {
6162         if (NULL != phys_devs[idx] && physical_device == phys_devs[idx]->phys_dev) {
6163             *out_idx = idx;
6164             return true;
6165         }
6166     }
6167     return false;
6168 }
6169 
6170 // Add physical_device to new_phys_devs
6171 VkResult check_and_add_to_new_phys_devs(struct loader_instance *inst, VkPhysicalDevice physical_device,
6172                                         struct loader_icd_physical_devices *dev_array, uint32_t *cur_new_phys_dev_count,
6173                                         struct loader_physical_device_term **new_phys_devs) {
6174     uint32_t out_idx = 0;
6175     uint32_t idx = *cur_new_phys_dev_count;
6176     // Check if the physical_device already exists in the new_phys_devs buffer, that means it was found from both
6177     // EnumerateAdapterPhysicalDevices and EnumeratePhysicalDevices and we need to skip it.
6178     if (find_phys_dev(physical_device, idx, new_phys_devs, &out_idx)) {
6179         return VK_SUCCESS;
6180     }
6181     // Check if it was found in a previous call to vkEnumeratePhysicalDevices, we can just copy over the old data.
6182     if (find_phys_dev(physical_device, inst->phys_dev_count_term, inst->phys_devs_term, &out_idx)) {
6183         new_phys_devs[idx] = inst->phys_devs_term[out_idx];
6184         (*cur_new_phys_dev_count)++;
6185         return VK_SUCCESS;
6186     }
6187 
6188     // Exit in case something is already present - this shouldn't happen but better to be safe than overwrite existing data
6189     // since this code has been refactored a half dozen times.
6190     if (NULL != new_phys_devs[idx]) {
6191         return VK_SUCCESS;
6192     }
6193     // If this physical device is new, we need to allocate space for it.
6194     new_phys_devs[idx] =
6195         loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6196     if (NULL == new_phys_devs[idx]) {
6197         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6198                    "check_and_add_to_new_phys_devs:  Failed to allocate physical device terminator object %d", idx);
6199         return VK_ERROR_OUT_OF_HOST_MEMORY;
6200     }
6201 
6202     loader_set_dispatch((void *)new_phys_devs[idx], inst->disp);
6203     new_phys_devs[idx]->this_icd_term = dev_array->icd_term;
6204     new_phys_devs[idx]->icd_index = (uint8_t)(dev_array->icd_index);
6205     new_phys_devs[idx]->phys_dev = physical_device;
6206 
6207     // Increment the count of new physical devices
6208     (*cur_new_phys_dev_count)++;
6209     return VK_SUCCESS;
6210 }
6211 
6212 /* Enumerate all physical devices from ICDs and add them to inst->phys_devs_term
6213  *
6214  * There are two methods to find VkPhysicalDevices - vkEnumeratePhysicalDevices and vkEnumerateAdapterPhysicalDevices
6215  * The latter is supported on windows only and on devices supporting ICD Interface Version 6 and greater.
6216  *
6217  * Once all physical devices are acquired, they need to be pulled into a single list of `loader_physical_device_term`'s.
6218  * They also need to be setup - the icd_term, icd_index, phys_dev, and disp (dispatch table) all need the correct data.
6219  * Additionally, we need to keep using already setup physical devices as they may be in use, thus anything enumerated
6220  * that is already in inst->phys_devs_term will be carried over.
6221  */
6222 
6223 VkResult setup_loader_term_phys_devs(struct loader_instance *inst) {
6224     VkResult res = VK_SUCCESS;
6225     struct loader_icd_term *icd_term;
6226     uint32_t icd_idx = 0;
6227     uint32_t windows_sorted_devices_count = 0;
6228     struct loader_icd_physical_devices *windows_sorted_devices_array = NULL;
6229     uint32_t icd_count = 0;
6230     struct loader_icd_physical_devices *icd_phys_dev_array = NULL;
6231     uint32_t new_phys_devs_capacity = 0;
6232     uint32_t new_phys_devs_count = 0;
6233     struct loader_physical_device_term **new_phys_devs = NULL;
6234 
6235 #if defined(_WIN32)
6236     // Get the physical devices supported by platform sorting mechanism into a separate list
6237     res = windows_read_sorted_physical_devices(inst, &windows_sorted_devices_count, &windows_sorted_devices_array);
6238     if (VK_SUCCESS != res) {
6239         goto out;
6240     }
6241 #endif
6242 
6243     icd_count = inst->total_icd_count;
6244 
6245     // Allocate something to store the physical device characteristics that we read from each ICD.
6246     icd_phys_dev_array =
6247         (struct loader_icd_physical_devices *)loader_stack_alloc(sizeof(struct loader_icd_physical_devices) * icd_count);
6248     if (NULL == icd_phys_dev_array) {
6249         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6250                    "setup_loader_term_phys_devs:  Failed to allocate temporary ICD Physical device info array of size %d",
6251                    icd_count);
6252         res = VK_ERROR_OUT_OF_HOST_MEMORY;
6253         goto out;
6254     }
6255     memset(icd_phys_dev_array, 0, sizeof(struct loader_icd_physical_devices) * icd_count);
6256 
6257     // For each ICD, query the number of physical devices, and then get an
6258     // internal value for those physical devices.
6259     icd_term = inst->icd_terms;
6260     while (NULL != icd_term) {
6261         res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].device_count, NULL);
6262         if (VK_SUCCESS != res) {
6263             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6264                        "setup_loader_term_phys_devs:  Call to ICD %d's \'vkEnumeratePhysicalDevices\' failed with error 0x%08x",
6265                        icd_idx, res);
6266             goto out;
6267         }
6268 
6269         icd_phys_dev_array[icd_idx].physical_devices =
6270             (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].device_count * sizeof(VkPhysicalDevice));
6271         if (NULL == icd_phys_dev_array[icd_idx].physical_devices) {
6272             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6273                        "setup_loader_term_phys_devs:  Failed to allocate temporary ICD Physical device array for ICD %d of size %d",
6274                        icd_idx, icd_phys_dev_array[icd_idx].device_count);
6275             res = VK_ERROR_OUT_OF_HOST_MEMORY;
6276             goto out;
6277         }
6278 
6279         res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].device_count),
6280                                                           icd_phys_dev_array[icd_idx].physical_devices);
6281         if (VK_SUCCESS != res) {
6282             goto out;
6283         }
6284         icd_phys_dev_array[icd_idx].icd_term = icd_term;
6285         icd_phys_dev_array[icd_idx].icd_index = icd_idx;
6286         icd_term = icd_term->next;
6287         ++icd_idx;
6288     }
6289 
6290     // Add up both the windows sorted and non windows found physical device counts
6291     for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6292         new_phys_devs_capacity += windows_sorted_devices_array[i].device_count;
6293     }
6294     for (uint32_t i = 0; i < icd_count; ++i) {
6295         new_phys_devs_capacity += icd_phys_dev_array[i].device_count;
6296     }
6297 
6298     // Bail out if there are no physical devices reported
6299     if (0 == new_phys_devs_capacity) {
6300         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6301                    "setup_loader_term_phys_devs:  Failed to detect any valid GPUs in the current config");
6302         res = VK_ERROR_INITIALIZATION_FAILED;
6303         goto out;
6304     }
6305 
6306     // Create an allocation large enough to hold both the windows sorting enumeration and non-windows physical device
6307     // enumeration
6308     new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_capacity,
6309                                                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6310     if (NULL == new_phys_devs) {
6311         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6312                    "setup_loader_term_phys_devs:  Failed to allocate new physical device array of size %d", new_phys_devs_capacity);
6313         res = VK_ERROR_OUT_OF_HOST_MEMORY;
6314         goto out;
6315     }
6316 
6317     // Copy over everything found through sorted enumeration
6318     for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6319         for (uint32_t j = 0; j < windows_sorted_devices_array[i].device_count; ++j) {
6320             res = check_and_add_to_new_phys_devs(inst, windows_sorted_devices_array[i].physical_devices[j],
6321                                                  &windows_sorted_devices_array[i], &new_phys_devs_count, new_phys_devs);
6322             if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6323                 goto out;
6324             }
6325         }
6326     }
6327 
6328 // Now go through the rest of the physical devices and add them to new_phys_devs
6329 #if defined(LOADER_ENABLE_LINUX_SORT)
6330 
6331     if (is_linux_sort_enabled(inst)) {
6332         for (uint32_t dev = new_phys_devs_count; dev < new_phys_devs_capacity; ++dev) {
6333             new_phys_devs[dev] =
6334                 loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6335             if (NULL == new_phys_devs[dev]) {
6336                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6337                            "setup_loader_term_phys_devs:  Failed to allocate physical device terminator object %d", dev);
6338                 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6339                 goto out;
6340             }
6341         }
6342 
6343         // Get the physical devices supported by platform sorting mechanism into a separate list
6344         // Pass in a sublist to the function so it only operates on the correct elements. This means passing in a pointer to the
6345         // current next element in new_phys_devs and passing in a `count` of currently unwritten elements
6346         res = linux_read_sorted_physical_devices(inst, icd_count, icd_phys_dev_array, new_phys_devs_capacity - new_phys_devs_count,
6347                                                  &new_phys_devs[new_phys_devs_count]);
6348         if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6349             goto out;
6350         }
6351         // Keep previously allocated physical device info since apps may already be using that!
6352         for (uint32_t new_idx = new_phys_devs_count; new_idx < new_phys_devs_capacity; new_idx++) {
6353             for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6354                 if (new_phys_devs[new_idx]->phys_dev == inst->phys_devs_term[old_idx]->phys_dev) {
6355                     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6356                                "Copying old device %u into new device %u", old_idx, new_idx);
6357                     // Free the old new_phys_devs info since we're not using it before we assign the new info
6358                     loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6359                     new_phys_devs[new_idx] = inst->phys_devs_term[old_idx];
6360                     break;
6361                 }
6362             }
6363         }
6364         // now set the count to the capacity, as now the list is filled in
6365         new_phys_devs_count = new_phys_devs_capacity;
6366         // We want the following code to run if either linux sorting is disabled at compile time or runtime
6367     } else {
6368 #endif  // LOADER_ENABLE_LINUX_SORT
6369 
6370         // Copy over everything found through the non-sorted means.
6371         for (uint32_t i = 0; i < icd_count; ++i) {
6372             for (uint32_t j = 0; j < icd_phys_dev_array[i].device_count; ++j) {
6373                 res = check_and_add_to_new_phys_devs(inst, icd_phys_dev_array[i].physical_devices[j], &icd_phys_dev_array[i],
6374                                                      &new_phys_devs_count, new_phys_devs);
6375                 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6376                     goto out;
6377                 }
6378             }
6379         }
6380 #if defined(LOADER_ENABLE_LINUX_SORT)
6381     }
6382 #endif  // LOADER_ENABLE_LINUX_SORT
6383 out:
6384 
6385     if (VK_SUCCESS != res) {
6386         if (NULL != new_phys_devs) {
6387             // We've encountered an error, so we should free the new buffers.
6388             for (uint32_t i = 0; i < new_phys_devs_capacity; i++) {
6389                 // May not have allocated this far, skip it if we hadn't.
6390                 if (new_phys_devs[i] == NULL) continue;
6391 
6392                 // If an OOM occurred inside the copying of the new physical devices into the existing array
6393                 // will leave some of the old physical devices in the array which may have been copied into
6394                 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6395                 // delete physical devices which were copied.
6396                 bool found = false;
6397                 if (NULL != inst->phys_devs_term) {
6398                     for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6399                         if (new_phys_devs[i] == inst->phys_devs_term[old_idx]) {
6400                             found = true;
6401                             break;
6402                         }
6403                     }
6404                 }
6405                 if (!found) {
6406                     loader_instance_heap_free(inst, new_phys_devs[i]);
6407                 }
6408             }
6409             loader_instance_heap_free(inst, new_phys_devs);
6410         }
6411         inst->total_gpu_count = 0;
6412     } else {
6413         if (NULL != inst->phys_devs_term) {
6414             // Free everything in the old array that was not copied into the new array
6415             // here.  We can't attempt to do that before here since the previous loop
6416             // looking before the "out:" label may hit an out of memory condition resulting
6417             // in memory leaking.
6418             for (uint32_t i = 0; i < inst->phys_dev_count_term; i++) {
6419                 bool found = false;
6420                 for (uint32_t j = 0; j < new_phys_devs_count; j++) {
6421                     if (new_phys_devs != NULL && inst->phys_devs_term[i] == new_phys_devs[j]) {
6422                         found = true;
6423                         break;
6424                     }
6425                 }
6426                 if (!found) {
6427                     loader_instance_heap_free(inst, inst->phys_devs_term[i]);
6428                 }
6429             }
6430             loader_instance_heap_free(inst, inst->phys_devs_term);
6431         }
6432 
6433         // Swap out old and new devices list
6434         inst->phys_dev_count_term = new_phys_devs_count;
6435         inst->phys_devs_term = new_phys_devs;
6436         inst->total_gpu_count = new_phys_devs_count;
6437     }
6438 
6439     if (windows_sorted_devices_array != NULL) {
6440         for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6441             if (windows_sorted_devices_array[i].device_count > 0 && windows_sorted_devices_array[i].physical_devices != NULL) {
6442                 loader_instance_heap_free(inst, windows_sorted_devices_array[i].physical_devices);
6443             }
6444         }
6445         loader_instance_heap_free(inst, windows_sorted_devices_array);
6446     }
6447 
6448     return res;
6449 }
6450 
6451 VkResult setup_loader_tramp_phys_dev_groups(struct loader_instance *inst, uint32_t group_count,
6452                                             VkPhysicalDeviceGroupProperties *groups) {
6453     VkResult res = VK_SUCCESS;
6454     uint32_t cur_idx;
6455     uint32_t dev_idx;
6456 
6457     if (0 == group_count) {
6458         return VK_SUCCESS;
6459     }
6460 
6461     // Generate a list of all the devices and convert them to the loader ID
6462     uint32_t phys_dev_count = 0;
6463     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6464         phys_dev_count += groups[cur_idx].physicalDeviceCount;
6465     }
6466     VkPhysicalDevice *devices = (VkPhysicalDevice *)loader_stack_alloc(sizeof(VkPhysicalDevice) * phys_dev_count);
6467     if (NULL == devices) {
6468         return VK_ERROR_OUT_OF_HOST_MEMORY;
6469     }
6470 
6471     uint32_t cur_device = 0;
6472     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6473         for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6474             devices[cur_device++] = groups[cur_idx].physicalDevices[dev_idx];
6475         }
6476     }
6477 
6478     // Update the devices based on the loader physical device values.
6479     res = setup_loader_tramp_phys_devs(inst, phys_dev_count, devices);
6480     if (VK_SUCCESS != res) {
6481         return res;
6482     }
6483 
6484     // Update the devices in the group structures now
6485     cur_device = 0;
6486     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6487         for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6488             groups[cur_idx].physicalDevices[dev_idx] = devices[cur_device++];
6489         }
6490     }
6491 
6492     return res;
6493 }
6494 
6495 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
6496                                                                    VkPhysicalDevice *pPhysicalDevices) {
6497     struct loader_instance *inst = (struct loader_instance *)instance;
6498     VkResult res = VK_SUCCESS;
6499 
6500     // Always call the setup loader terminator physical devices because they may
6501     // have changed at any point.
6502     res = setup_loader_term_phys_devs(inst);
6503     if (VK_SUCCESS != res) {
6504         goto out;
6505     }
6506 
6507     uint32_t copy_count = inst->phys_dev_count_term;
6508     if (NULL != pPhysicalDevices) {
6509         if (copy_count > *pPhysicalDeviceCount) {
6510             copy_count = *pPhysicalDeviceCount;
6511             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
6512                        "terminator_EnumeratePhysicalDevices : Trimming device count from %d to %d.", inst->phys_dev_count_term,
6513                        copy_count);
6514             res = VK_INCOMPLETE;
6515         }
6516 
6517         for (uint32_t i = 0; i < copy_count; i++) {
6518             pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i];
6519         }
6520     }
6521 
6522     *pPhysicalDeviceCount = copy_count;
6523 
6524 out:
6525 
6526     return res;
6527 }
6528 
6529 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
6530                                                                              const char *pLayerName, uint32_t *pPropertyCount,
6531                                                                              VkExtensionProperties *pProperties) {
6532     if (NULL == pPropertyCount) {
6533         return VK_INCOMPLETE;
6534     }
6535 
6536     struct loader_physical_device_term *phys_dev_term;
6537 
6538     // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected
6539     // type for VkPhysicalDevice.
6540     phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
6541 
6542     // if we got here with a non-empty pLayerName, look up the extensions
6543     // from the json
6544     if (pLayerName != NULL && strlen(pLayerName) > 0) {
6545         uint32_t count;
6546         uint32_t copy_size;
6547         const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance;
6548         struct loader_device_extension_list *dev_ext_list = NULL;
6549         struct loader_device_extension_list local_ext_list;
6550         memset(&local_ext_list, 0, sizeof(local_ext_list));
6551         if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) {
6552             for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) {
6553                 struct loader_layer_properties *props = &inst->instance_layer_list.list[i];
6554                 if (strcmp(props->info.layerName, pLayerName) == 0) {
6555                     dev_ext_list = &props->device_extension_list;
6556                 }
6557             }
6558 
6559             count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count;
6560             if (pProperties == NULL) {
6561                 *pPropertyCount = count;
6562                 loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6563                 return VK_SUCCESS;
6564             }
6565 
6566             copy_size = *pPropertyCount < count ? *pPropertyCount : count;
6567             for (uint32_t i = 0; i < copy_size; i++) {
6568                 memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties));
6569             }
6570             *pPropertyCount = copy_size;
6571 
6572             loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6573             if (copy_size < count) {
6574                 return VK_INCOMPLETE;
6575             }
6576         } else {
6577             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6578                        "vkEnumerateDeviceExtensionProperties:  pLayerName is too long or is badly formed");
6579             return VK_ERROR_EXTENSION_NOT_PRESENT;
6580         }
6581 
6582         return VK_SUCCESS;
6583     }
6584 
6585     // user is querying driver extensions and has supplied their own storage - just fill it out
6586     else if (pProperties) {
6587         struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
6588         uint32_t written_count = *pPropertyCount;
6589         VkResult res =
6590             icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &written_count, pProperties);
6591         if (res != VK_SUCCESS) {
6592             return res;
6593         }
6594 
6595         // Iterate over active layers, if they are an implicit layer, add their device extensions
6596         // After calling into the driver, written_count contains the amount of device extensions written. We can therefore write
6597         // layer extensions starting at that point in pProperties
6598         for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) {
6599             struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i];
6600             if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
6601                 struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list;
6602                 for (uint32_t j = 0; j < layer_ext_list->count; j++) {
6603                     struct loader_dev_ext_props *cur_ext_props = &layer_ext_list->list[j];
6604                     // look for duplicates
6605                     if (has_vk_extension_property_array(&cur_ext_props->props, written_count, pProperties)) {
6606                         continue;
6607                     }
6608 
6609                     if (*pPropertyCount <= written_count) {
6610                         return VK_INCOMPLETE;
6611                     }
6612 
6613                     memcpy(&pProperties[written_count], &cur_ext_props->props, sizeof(VkExtensionProperties));
6614                     written_count++;
6615                 }
6616             }
6617         }
6618         // Make sure we update the pPropertyCount with the how many were written
6619         *pPropertyCount = written_count;
6620         return res;
6621     }
6622     // Use `goto out;` for rest of this function
6623 
6624     // This case is during the call down the instance chain with pLayerName == NULL and pProperties == NULL
6625     struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
6626     struct loader_extension_list all_exts = {0};
6627     VkResult res;
6628 
6629     // We need to find the count without duplicates. This requires querying the driver for the names of the extensions.
6630     res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, NULL);
6631     if (res != VK_SUCCESS) {
6632         goto out;
6633     }
6634     // Then allocate memory to store the physical device extension list + the extensions layers provide
6635     // all_exts.count currently is the number of driver extensions
6636     all_exts.capacity = sizeof(VkExtensionProperties) * (all_exts.count + 20);
6637     all_exts.list = loader_instance_heap_alloc(icd_term->this_instance, all_exts.capacity, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
6638     if (NULL == all_exts.list) {
6639         res = VK_ERROR_OUT_OF_HOST_MEMORY;
6640         goto out;
6641     }
6642 
6643     // Get the available device extensions and put them in all_exts.list
6644     res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, all_exts.list);
6645     if (res != VK_SUCCESS) {
6646         goto out;
6647     }
6648 
6649     // Iterate over active layers, if they are an implicit layer, add their device extensions to all_exts.list
6650     for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) {
6651         struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i];
6652         if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
6653             struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list;
6654             for (uint32_t j = 0; j < layer_ext_list->count; j++) {
6655                 res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1, &layer_ext_list->list[j].props);
6656                 if (res != VK_SUCCESS) {
6657                     goto out;
6658                 }
6659             }
6660         }
6661     }
6662 
6663     // Write out the final de-duplicated count to pPropertyCount
6664     *pPropertyCount = all_exts.count;
6665     res = VK_SUCCESS;
6666 
6667 out:
6668 
6669     loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts);
6670     return res;
6671 }
6672 
6673 VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) {
6674     VkStringErrorFlags result = VK_STRING_ERROR_NONE;
6675     int num_char_bytes = 0;
6676     int i, j;
6677 
6678     if (utf8 == NULL) {
6679         return VK_STRING_ERROR_NULL_PTR;
6680     }
6681 
6682     for (i = 0; i <= max_length; i++) {
6683         if (utf8[i] == 0) {
6684             break;
6685         } else if (i == max_length) {
6686             result |= VK_STRING_ERROR_LENGTH;
6687             break;
6688         } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) {
6689             num_char_bytes = 0;
6690         } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
6691             num_char_bytes = 1;
6692         } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
6693             num_char_bytes = 2;
6694         } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) {
6695             num_char_bytes = 3;
6696         } else {
6697             result = VK_STRING_ERROR_BAD_DATA;
6698         }
6699 
6700         // Validate the following num_char_bytes of data
6701         for (j = 0; (j < num_char_bytes) && (i < max_length); j++) {
6702             if (++i == max_length) {
6703                 result |= VK_STRING_ERROR_LENGTH;
6704                 break;
6705             }
6706             if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) {
6707                 result |= VK_STRING_ERROR_BAD_DATA;
6708             }
6709         }
6710     }
6711     return result;
6712 }
6713 
6714 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain,
6715                                                                    uint32_t *pApiVersion) {
6716     (void)chain;
6717     // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead
6718     // prefers us crashing.
6719     *pApiVersion = VK_HEADER_VERSION_COMPLETE;
6720     return VK_SUCCESS;
6721 }
6722 
6723 VKAPI_ATTR VkResult VKAPI_CALL
6724 terminator_EnumerateInstanceExtensionProperties(const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName,
6725                                                 uint32_t *pPropertyCount, VkExtensionProperties *pProperties) {
6726     (void)chain;
6727     struct loader_extension_list *global_ext_list = NULL;
6728     struct loader_layer_list instance_layers;
6729     struct loader_extension_list local_ext_list;
6730     struct loader_icd_tramp_list icd_tramp_list;
6731     uint32_t copy_size;
6732     VkResult res = VK_SUCCESS;
6733     struct loader_envvar_all_filters layer_filters = {0};
6734 
6735     memset(&local_ext_list, 0, sizeof(local_ext_list));
6736     memset(&instance_layers, 0, sizeof(instance_layers));
6737     memset(&icd_tramp_list, 0, sizeof(icd_tramp_list));
6738 
6739     res = parse_layer_environment_var_filters(NULL, &layer_filters);
6740     if (VK_SUCCESS != res) {
6741         goto out;
6742     }
6743 
6744     // Get layer libraries if needed
6745     if (pLayerName && strlen(pLayerName) != 0) {
6746         if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) {
6747             assert(VK_FALSE && "vkEnumerateInstanceExtensionProperties: pLayerName is too long or is badly formed");
6748             res = VK_ERROR_EXTENSION_NOT_PRESENT;
6749             goto out;
6750         }
6751 
6752         res = loader_scan_for_layers(NULL, &instance_layers, &layer_filters);
6753         if (VK_SUCCESS != res) {
6754             goto out;
6755         }
6756         for (uint32_t i = 0; i < instance_layers.count; i++) {
6757             struct loader_layer_properties *props = &instance_layers.list[i];
6758             if (strcmp(props->info.layerName, pLayerName) == 0) {
6759                 global_ext_list = &props->instance_extension_list;
6760                 break;
6761             }
6762         }
6763     } else {
6764         // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them
6765         loader_preload_icds();
6766 
6767         // Scan/discover all ICD libraries
6768         res = loader_icd_scan(NULL, &icd_tramp_list, NULL, NULL);
6769         // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT
6770         if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6771             goto out;
6772         }
6773         // Get extensions from all ICD's, merge so no duplicates
6774         res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list);
6775         if (VK_SUCCESS != res) {
6776             goto out;
6777         }
6778         loader_scanned_icd_clear(NULL, &icd_tramp_list);
6779 
6780         // Append enabled implicit layers.
6781         res = loader_scan_for_implicit_layers(NULL, &instance_layers, &layer_filters);
6782         if (VK_SUCCESS != res) {
6783             goto out;
6784         }
6785         for (uint32_t i = 0; i < instance_layers.count; i++) {
6786             struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list;
6787             loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list);
6788         }
6789 
6790         global_ext_list = &local_ext_list;
6791     }
6792 
6793     if (global_ext_list == NULL) {
6794         res = VK_ERROR_LAYER_NOT_PRESENT;
6795         goto out;
6796     }
6797 
6798     if (pProperties == NULL) {
6799         *pPropertyCount = global_ext_list->count;
6800         goto out;
6801     }
6802 
6803     copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count;
6804     for (uint32_t i = 0; i < copy_size; i++) {
6805         memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties));
6806     }
6807     *pPropertyCount = copy_size;
6808 
6809     if (copy_size < global_ext_list->count) {
6810         res = VK_INCOMPLETE;
6811         goto out;
6812     }
6813 
6814 out:
6815     loader_destroy_generic_list(NULL, (struct loader_generic_list *)&icd_tramp_list);
6816     loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list);
6817     loader_delete_layer_list_and_properties(NULL, &instance_layers);
6818     return res;
6819 }
6820 
6821 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(const VkEnumerateInstanceLayerPropertiesChain *chain,
6822                                                                            uint32_t *pPropertyCount,
6823                                                                            VkLayerProperties *pProperties) {
6824     (void)chain;
6825     VkResult result = VK_SUCCESS;
6826     struct loader_layer_list instance_layer_list;
6827     struct loader_envvar_all_filters layer_filters = {0};
6828 
6829     LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
6830 
6831     uint32_t copy_size;
6832 
6833     result = parse_layer_environment_var_filters(NULL, &layer_filters);
6834     if (VK_SUCCESS != result) {
6835         goto out;
6836     }
6837 
6838     // Get layer libraries
6839     memset(&instance_layer_list, 0, sizeof(instance_layer_list));
6840     result = loader_scan_for_layers(NULL, &instance_layer_list, &layer_filters);
6841     if (VK_SUCCESS != result) {
6842         goto out;
6843     }
6844 
6845     uint32_t active_layer_count = 0;
6846     for (uint32_t i = 0; i < instance_layer_list.count; i++) {
6847         if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON ||
6848             instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
6849             active_layer_count++;
6850         }
6851     }
6852 
6853     if (pProperties == NULL) {
6854         *pPropertyCount = active_layer_count;
6855         goto out;
6856     }
6857 
6858     copy_size = (*pPropertyCount < active_layer_count) ? *pPropertyCount : active_layer_count;
6859     uint32_t output_properties_index = 0;
6860     for (uint32_t i = 0; i < copy_size; i++) {
6861         if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON ||
6862             instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
6863             memcpy(&pProperties[output_properties_index], &instance_layer_list.list[i].info, sizeof(VkLayerProperties));
6864             output_properties_index++;
6865         }
6866     }
6867 
6868     *pPropertyCount = copy_size;
6869 
6870     if (copy_size < instance_layer_list.count) {
6871         result = VK_INCOMPLETE;
6872         goto out;
6873     }
6874 
6875 out:
6876 
6877     loader_delete_layer_list_and_properties(NULL, &instance_layer_list);
6878     return result;
6879 }
6880 
6881 // ---- Vulkan Core 1.1 terminators
6882 
6883 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups(
6884     VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) {
6885     struct loader_instance *inst = (struct loader_instance *)instance;
6886 
6887     VkResult res = VK_SUCCESS;
6888     struct loader_icd_term *icd_term;
6889     uint32_t total_count = 0;
6890     uint32_t cur_icd_group_count = 0;
6891     VkPhysicalDeviceGroupProperties **new_phys_dev_groups = NULL;
6892     struct loader_physical_device_group_term *local_phys_dev_groups = NULL;
6893     PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL;
6894     struct loader_icd_physical_devices *sorted_phys_dev_array = NULL;
6895     uint32_t sorted_count = 0;
6896 
6897     // For each ICD, query the number of physical device groups, and then get an
6898     // internal value for those physical devices.
6899     icd_term = inst->icd_terms;
6900     for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
6901         cur_icd_group_count = 0;
6902 
6903         // Get the function pointer to use to call into the ICD. This could be the core or KHR version
6904         if (inst->enabled_known_extensions.khr_device_group_creation) {
6905             fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
6906         } else {
6907             fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
6908         }
6909 
6910         if (NULL == fpEnumeratePhysicalDeviceGroups) {
6911             // Treat each ICD's GPU as it's own group if the extension isn't supported
6912             res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL);
6913             if (res != VK_SUCCESS) {
6914                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6915                            "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of \'EnumeratePhysicalDevices\' "
6916                            "to ICD %d to get plain phys dev count.",
6917                            icd_idx);
6918                 continue;
6919             }
6920         } else {
6921             // Query the actual group info
6922             res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL);
6923             if (res != VK_SUCCESS) {
6924                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6925                            "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
6926                            "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get count.",
6927                            icd_idx);
6928                 continue;
6929             }
6930         }
6931         total_count += cur_icd_group_count;
6932     }
6933 
6934     // If GPUs not sorted yet, look through them and generate list of all available GPUs
6935     if (0 == total_count || 0 == inst->total_gpu_count) {
6936         res = setup_loader_term_phys_devs(inst);
6937         if (VK_SUCCESS != res) {
6938             goto out;
6939         }
6940     }
6941 
6942     if (NULL != pPhysicalDeviceGroupProperties) {
6943         // Create an array for the new physical device groups, which will be stored
6944         // in the instance for the Terminator code.
6945         new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_calloc(
6946             inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6947         if (NULL == new_phys_dev_groups) {
6948             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6949                        "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate new physical device group array of size %d",
6950                        total_count);
6951             res = VK_ERROR_OUT_OF_HOST_MEMORY;
6952             goto out;
6953         }
6954 
6955         // Create a temporary array (on the stack) to keep track of the
6956         // returned VkPhysicalDevice values.
6957         local_phys_dev_groups = loader_stack_alloc(sizeof(struct loader_physical_device_group_term) * total_count);
6958         // Initialize the memory to something valid
6959         memset(local_phys_dev_groups, 0, sizeof(struct loader_physical_device_group_term) * total_count);
6960 
6961 #if defined(_WIN32)
6962         // Get the physical devices supported by platform sorting mechanism into a separate list
6963         res = windows_read_sorted_physical_devices(inst, &sorted_count, &sorted_phys_dev_array);
6964         if (VK_SUCCESS != res) {
6965             goto out;
6966         }
6967 #endif
6968 
6969         cur_icd_group_count = 0;
6970         icd_term = inst->icd_terms;
6971         for (uint8_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
6972             uint32_t count_this_time = total_count - cur_icd_group_count;
6973 
6974             // Get the function pointer to use to call into the ICD. This could be the core or KHR version
6975             if (inst->enabled_known_extensions.khr_device_group_creation) {
6976                 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
6977             } else {
6978                 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
6979             }
6980 
6981             if (NULL == fpEnumeratePhysicalDeviceGroups) {
6982                 icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, NULL);
6983 
6984                 VkPhysicalDevice *phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time);
6985                 if (NULL == phys_dev_array) {
6986                     loader_log(
6987                         inst, VULKAN_LOADER_ERROR_BIT, 0,
6988                         "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate local physical device array of size %d",
6989                         count_this_time);
6990                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
6991                     goto out;
6992                 }
6993 
6994                 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array);
6995                 if (res != VK_SUCCESS) {
6996                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6997                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
6998                                "\'EnumeratePhysicalDevices\' to ICD %d to get plain phys dev count.",
6999                                icd_idx);
7000                     goto out;
7001                 }
7002 
7003                 // Add each GPU as it's own group
7004                 for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) {
7005                     uint32_t cur_index = indiv_gpu + cur_icd_group_count;
7006                     local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7007                     local_phys_dev_groups[cur_index].icd_index = icd_idx;
7008                     local_phys_dev_groups[cur_index].group_props.physicalDeviceCount = 1;
7009                     local_phys_dev_groups[cur_index].group_props.physicalDevices[0] = phys_dev_array[indiv_gpu];
7010                 }
7011 
7012             } else {
7013                 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, NULL);
7014                 if (res != VK_SUCCESS) {
7015                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7016                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7017                                "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group count.",
7018                                icd_idx);
7019                     goto out;
7020                 }
7021                 if (cur_icd_group_count + count_this_time < *pPhysicalDeviceGroupCount) {
7022                     // The total amount is still less than the amount of physical device group data passed in
7023                     // by the callee.  Therefore, we don't have to allocate any temporary structures and we
7024                     // can just use the data that was passed in.
7025                     res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time,
7026                                                           &pPhysicalDeviceGroupProperties[cur_icd_group_count]);
7027                     if (res != VK_SUCCESS) {
7028                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7029                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7030                                    "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group information.",
7031                                    icd_idx);
7032                         goto out;
7033                     }
7034                     for (uint32_t group = 0; group < count_this_time; ++group) {
7035                         uint32_t cur_index = group + cur_icd_group_count;
7036                         local_phys_dev_groups[cur_index].group_props = pPhysicalDeviceGroupProperties[cur_index];
7037                         local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7038                         local_phys_dev_groups[cur_index].icd_index = icd_idx;
7039                     }
7040                 } else {
7041                     // There's not enough space in the callee's allocated pPhysicalDeviceGroupProperties structs,
7042                     // so we have to allocate temporary versions to collect all the data.  However, we need to make
7043                     // sure that at least the ones we do query utilize any pNext data in the callee's version.
7044                     VkPhysicalDeviceGroupProperties *tmp_group_props =
7045                         loader_stack_alloc(count_this_time * sizeof(VkPhysicalDeviceGroupProperties));
7046                     for (uint32_t group = 0; group < count_this_time; group++) {
7047                         tmp_group_props[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES;
7048                         uint32_t cur_index = group + cur_icd_group_count;
7049                         if (*pPhysicalDeviceGroupCount > cur_index) {
7050                             tmp_group_props[group].pNext = pPhysicalDeviceGroupProperties[cur_index].pNext;
7051                         } else {
7052                             tmp_group_props[group].pNext = NULL;
7053                         }
7054                         tmp_group_props[group].subsetAllocation = false;
7055                     }
7056 
7057                     res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, tmp_group_props);
7058                     if (res != VK_SUCCESS) {
7059                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7060                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7061                                    "\'EnumeratePhysicalDeviceGroups\' to ICD %d  to get group information for temp data.",
7062                                    icd_idx);
7063                         goto out;
7064                     }
7065                     for (uint32_t group = 0; group < count_this_time; ++group) {
7066                         uint32_t cur_index = group + cur_icd_group_count;
7067                         local_phys_dev_groups[cur_index].group_props = tmp_group_props[group];
7068                         local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7069                         local_phys_dev_groups[cur_index].icd_index = icd_idx;
7070                     }
7071                 }
7072                 if (VK_SUCCESS != res) {
7073                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7074                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7075                                "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get content.",
7076                                icd_idx);
7077                     goto out;
7078                 }
7079             }
7080 
7081             cur_icd_group_count += count_this_time;
7082         }
7083 
7084 #if defined(LOADER_ENABLE_LINUX_SORT)
7085         if (is_linux_sort_enabled(inst)) {
7086             // Get the physical devices supported by platform sorting mechanism into a separate list
7087             res = linux_sort_physical_device_groups(inst, total_count, local_phys_dev_groups);
7088         }
7089 #elif defined(_WIN32)
7090         // The Windows sorting information is only on physical devices.  We need to take that and convert it to the group
7091         // information if it's present.
7092         if (sorted_count > 0) {
7093             res =
7094                 windows_sort_physical_device_groups(inst, total_count, local_phys_dev_groups, sorted_count, sorted_phys_dev_array);
7095         }
7096 #endif  // LOADER_ENABLE_LINUX_SORT
7097 
7098         // Just to be safe, make sure we successfully completed setup_loader_term_phys_devs above
7099         // before attempting to do the following.  By verifying that setup_loader_term_phys_devs ran
7100         // first, it guarantees that each physical device will have a loader-specific handle.
7101         if (NULL != inst->phys_devs_term) {
7102             for (uint32_t group = 0; group < total_count; group++) {
7103                 for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].group_props.physicalDeviceCount;
7104                      group_gpu++) {
7105                     bool found = false;
7106                     for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) {
7107                         if (local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] ==
7108                             inst->phys_devs_term[term_gpu]->phys_dev) {
7109                             local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] =
7110                                 (VkPhysicalDevice)inst->phys_devs_term[term_gpu];
7111                             found = true;
7112                             break;
7113                         }
7114                     }
7115                     if (!found) {
7116                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7117                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed to find GPU %d in group %d returned by "
7118                                    "\'EnumeratePhysicalDeviceGroups\' in list returned by \'EnumeratePhysicalDevices\'",
7119                                    group_gpu, group);
7120                         res = VK_ERROR_INITIALIZATION_FAILED;
7121                         goto out;
7122                     }
7123                 }
7124             }
7125         }
7126 
7127         uint32_t idx = 0;
7128 
7129         // Copy or create everything to fill the new array of physical device groups
7130         for (uint32_t group = 0; group < total_count; group++) {
7131             // Skip groups which have been included through sorting
7132             if (local_phys_dev_groups[group].group_props.physicalDeviceCount == 0) {
7133                 continue;
7134             }
7135 
7136             // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups
7137             VkPhysicalDeviceGroupProperties *group_properties = &local_phys_dev_groups[group].group_props;
7138 
7139             // Check if this physical device group with the same contents is already in the old buffer
7140             for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
7141                 if (NULL != group_properties && NULL != inst->phys_dev_groups_term[old_idx] &&
7142                     group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) {
7143                     bool found_all_gpus = true;
7144                     for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) {
7145                         bool found_gpu = false;
7146                         for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) {
7147                             if (group_properties->physicalDevices[new_gpu] ==
7148                                 inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) {
7149                                 found_gpu = true;
7150                                 break;
7151                             }
7152                         }
7153 
7154                         if (!found_gpu) {
7155                             found_all_gpus = false;
7156                             break;
7157                         }
7158                     }
7159                     if (!found_all_gpus) {
7160                         continue;
7161                     } else {
7162                         new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx];
7163                         break;
7164                     }
7165                 }
7166             }
7167             // If this physical device group isn't in the old buffer, create it
7168             if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) {
7169                 new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupProperties *)loader_instance_heap_alloc(
7170                     inst, sizeof(VkPhysicalDeviceGroupProperties), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
7171                 if (NULL == new_phys_dev_groups[idx]) {
7172                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7173                                "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate physical device group Terminator "
7174                                "object %d",
7175                                idx);
7176                     total_count = idx;
7177                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
7178                     goto out;
7179                 }
7180                 memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupProperties));
7181             }
7182 
7183             ++idx;
7184         }
7185     }
7186 
7187 out:
7188 
7189     if (NULL != pPhysicalDeviceGroupProperties) {
7190         if (VK_SUCCESS != res) {
7191             if (NULL != new_phys_dev_groups) {
7192                 // We've encountered an error, so we should free the new buffers.
7193                 for (uint32_t i = 0; i < total_count; i++) {
7194                     // If an OOM occurred inside the copying of the new physical device groups into the existing array will
7195                     // leave some of the old physical device groups in the array which may have been copied into the new array,
7196                     // leading to them being freed twice. To avoid this we just make sure to not delete physical device groups
7197                     // which were copied.
7198                     bool found = false;
7199                     if (NULL != inst->phys_devs_term) {
7200                         for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
7201                             if (new_phys_dev_groups[i] == inst->phys_dev_groups_term[old_idx]) {
7202                                 found = true;
7203                                 break;
7204                             }
7205                         }
7206                     }
7207                     if (!found) {
7208                         loader_instance_heap_free(inst, new_phys_dev_groups[i]);
7209                     }
7210                 }
7211                 loader_instance_heap_free(inst, new_phys_dev_groups);
7212             }
7213         } else {
7214             if (NULL != inst->phys_dev_groups_term) {
7215                 // Free everything in the old array that was not copied into the new array
7216                 // here.  We can't attempt to do that before here since the previous loop
7217                 // looking before the "out:" label may hit an out of memory condition resulting
7218                 // in memory leaking.
7219                 for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) {
7220                     bool found = false;
7221                     for (uint32_t j = 0; j < total_count; j++) {
7222                         if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) {
7223                             found = true;
7224                             break;
7225                         }
7226                     }
7227                     if (!found) {
7228                         loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]);
7229                     }
7230                 }
7231                 loader_instance_heap_free(inst, inst->phys_dev_groups_term);
7232             }
7233 
7234             // Swap in the new physical device group list
7235             inst->phys_dev_group_count_term = total_count;
7236             inst->phys_dev_groups_term = new_phys_dev_groups;
7237         }
7238 
7239         if (sorted_phys_dev_array != NULL) {
7240             for (uint32_t i = 0; i < sorted_count; ++i) {
7241                 if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) {
7242                     loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices);
7243                 }
7244             }
7245             loader_instance_heap_free(inst, sorted_phys_dev_array);
7246         }
7247 
7248         uint32_t copy_count = inst->phys_dev_group_count_term;
7249         if (NULL != pPhysicalDeviceGroupProperties) {
7250             if (copy_count > *pPhysicalDeviceGroupCount) {
7251                 copy_count = *pPhysicalDeviceGroupCount;
7252                 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
7253                            "terminator_EnumeratePhysicalDeviceGroups : Trimming device count from %d to %d.",
7254                            inst->phys_dev_group_count_term, copy_count);
7255                 res = VK_INCOMPLETE;
7256             }
7257 
7258             for (uint32_t i = 0; i < copy_count; i++) {
7259                 memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i], sizeof(VkPhysicalDeviceGroupProperties));
7260             }
7261         }
7262 
7263         *pPhysicalDeviceGroupCount = copy_count;
7264 
7265     } else {
7266         *pPhysicalDeviceGroupCount = total_count;
7267     }
7268     return res;
7269 }
7270