1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2017-2019 The Khronos Group Inc.
6  * Copyright (c) 2018-2020 NVIDIA Corporation
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *	  http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan robustness2 tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktRobustnessExtsTests.hpp"
26 
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkDeviceUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkObjUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vktRobustnessUtil.hpp"
38 
39 #include "vktTestGroupUtil.hpp"
40 #include "vktTestCase.hpp"
41 
42 #include "deDefs.h"
43 #include "deMath.h"
44 #include "deRandom.h"
45 #include "deSharedPtr.hpp"
46 #include "deString.h"
47 
48 #include "tcuVectorType.hpp"
49 #include "tcuTestCase.hpp"
50 #include "tcuTestLog.hpp"
51 #include "tcuImageCompare.hpp"
52 
53 #include <string>
54 #include <sstream>
55 #include <algorithm>
56 #include <limits>
57 
58 namespace vkt
59 {
60 namespace robustness
61 {
62 namespace
63 {
64 using namespace vk;
65 using namespace std;
66 using de::SharedPtr;
67 using BufferWithMemoryPtr = de::MovePtr<BufferWithMemory>;
68 
69 enum RobustnessFeatureBits
70 {
71 	RF_IMG_ROBUSTNESS		= (1		),
72 	RF_ROBUSTNESS2			= (1 << 1	),
73 	RF_PIPELINE_ROBUSTNESS	= (1 << 2	),
74 };
75 
76 using RobustnessFeatures = deUint32;
77 
78 // Class to wrap a singleton device with the indicated robustness features.
79 template <RobustnessFeatures FEATURES>
80 class SingletonDevice
81 {
SingletonDevice(Context& context)82 	SingletonDevice	(Context& context)
83 		: m_context(context)
84 		, m_logicalDevice()
85 	{
86 		// Note we are already checking the needed features are available in checkSupport().
87 		VkPhysicalDeviceExtendedDynamicStateFeaturesEXT		edsFeatures						= initVulkanStructure();
88 		VkPhysicalDeviceScalarBlockLayoutFeatures			scalarBlockLayoutFeatures		= initVulkanStructure();
89 		VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT	shaderImageAtomicInt64Features	= initVulkanStructure();
90 		VkPhysicalDeviceBufferDeviceAddressFeatures			bufferDeviceAddressFeatures		= initVulkanStructure();
91 		VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
92 		VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
93 #ifndef CTS_USES_VULKANSC
94 		VkPhysicalDeviceRayTracingPipelineFeaturesKHR		rayTracingPipelineFeatures		= initVulkanStructure();
95 		VkPhysicalDeviceAccelerationStructureFeaturesKHR	accelerationStructureFeatures	= initVulkanStructure();
96 		VkPhysicalDevicePipelineRobustnessFeaturesEXT		pipelineRobustnessFeatures		= initVulkanStructure();
97 		VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT	gplFeatures						= initVulkanStructure();
98 #endif // CTS_USES_VULKANSC
99 		VkPhysicalDeviceFeatures2							features2						= initVulkanStructure();
100 
101 		const auto addFeatures = makeStructChainAdder(&features2);
102 
103 		// Enable these ones if supported, as they're needed in some tests.
104 		if (context.isDeviceFunctionalitySupported("VK_EXT_extended_dynamic_state"))
105 			addFeatures(&edsFeatures);
106 
107 		if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
108 			addFeatures(&scalarBlockLayoutFeatures);
109 
110 		if (context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"))
111 			addFeatures(&shaderImageAtomicInt64Features);
112 
113 #ifndef CTS_USES_VULKANSC
114 		if (context.isDeviceFunctionalitySupported("VK_KHR_ray_tracing_pipeline"))
115 		{
116 			addFeatures(&accelerationStructureFeatures);
117 			addFeatures(&rayTracingPipelineFeatures);
118 		}
119 
120 		if (context.isDeviceFunctionalitySupported("VK_EXT_graphics_pipeline_library"))
121 			addFeatures(&gplFeatures);
122 #endif // CTS_USES_VULKANSC
123 
124 		if (context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address"))
125 			addFeatures(&bufferDeviceAddressFeatures);
126 
127 		if (FEATURES & RF_IMG_ROBUSTNESS)
128 		{
129 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
130 
131 			if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
132 				addFeatures(&imageRobustnessFeatures);
133 		}
134 
135 		if (FEATURES & RF_ROBUSTNESS2)
136 		{
137 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
138 
139 			if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
140 				addFeatures(&robustness2Features);
141 		}
142 
143 #ifndef CTS_USES_VULKANSC
144 		if (FEATURES & RF_PIPELINE_ROBUSTNESS)
145 			addFeatures(&pipelineRobustnessFeatures);
146 #endif
147 
148 		const auto&	vki				= m_context.getInstanceInterface();
149 		const auto	instance		= m_context.getInstance();
150 		const auto	physicalDevice	= chooseDevice(vki, instance, context.getTestContext().getCommandLine());
151 
152 		vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
153 
154 #ifndef CTS_USES_VULKANSC
155 		if (FEATURES & RF_PIPELINE_ROBUSTNESS)
156 			features2.features.robustBufferAccess = VK_FALSE;
157 #endif
158 		m_logicalDevice = createRobustBufferAccessDevice(context, &features2);
159 
160 #ifndef CTS_USES_VULKANSC
161 		m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), instance, *m_logicalDevice, context.getUsedApiVersion()));
162 #else
163 		m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), instance, *m_logicalDevice, context.getTestContext().getCommandLine(), context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(), context.getUsedApiVersion()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
164 #endif // CTS_USES_VULKANSC
165 	}
166 
167 public:
~SingletonDevice()168 	~SingletonDevice()
169 	{
170 	}
171 
getDevice(Context& context)172 	static VkDevice getDevice(Context& context)
173 	{
174 		if (!m_singletonDevice)
175 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
176 		DE_ASSERT(m_singletonDevice);
177 		return m_singletonDevice->m_logicalDevice.get();
178 	}
getDeviceInterface(Context& context)179 	static const DeviceInterface& getDeviceInterface(Context& context)
180 	{
181 		if (!m_singletonDevice)
182 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
183 		DE_ASSERT(m_singletonDevice);
184 		return *(m_singletonDevice->m_deviceDriver.get());
185 	}
186 
destroy()187 	static void destroy()
188 	{
189 		m_singletonDevice.clear();
190 	}
191 
192 private:
193 	const Context&								m_context;
194 	Move<vk::VkDevice>							m_logicalDevice;
195 #ifndef CTS_USES_VULKANSC
196 	de::MovePtr<vk::DeviceDriver>				m_deviceDriver;
197 #else
198 	de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter>	m_deviceDriver;
199 #endif // CTS_USES_VULKANSC
200 
201 	static SharedPtr<SingletonDevice<FEATURES>>	m_singletonDevice;
202 };
203 
204 template <RobustnessFeatures FEATURES>
205 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
206 
207 using ImageRobustnessSingleton	= SingletonDevice<RF_IMG_ROBUSTNESS>;
208 using Robustness2Singleton		= SingletonDevice<RF_ROBUSTNESS2>;
209 
210 using PipelineRobustnessImageRobustnessSingleton	= SingletonDevice<RF_IMG_ROBUSTNESS | RF_PIPELINE_ROBUSTNESS>;
211 using PipelineRobustnessRobustness2Singleton		= SingletonDevice<RF_ROBUSTNESS2 | RF_PIPELINE_ROBUSTNESS>;
212 
213 // Render target / compute grid dimensions
214 static const deUint32 DIM = 8;
215 
216 // treated as a phony VkDescriptorType value
217 #define VERTEX_ATTRIBUTE_FETCH 999
218 
219 typedef enum
220 {
221 	STAGE_COMPUTE = 0,
222 	STAGE_VERTEX,
223 	STAGE_FRAGMENT,
224 	STAGE_RAYGEN
225 } Stage;
226 
227 enum class PipelineRobustnessCase
228 {
229 	DISABLED = 0,
230 	ENABLED_MONOLITHIC,
231 	ENABLED_FAST_GPL,
232 	ENABLED_OPTIMIZED_GPL,
233 };
234 
getConstructionTypeFromRobustnessCase(PipelineRobustnessCase prCase)235 PipelineConstructionType getConstructionTypeFromRobustnessCase (PipelineRobustnessCase prCase)
236 {
237 	if (prCase == PipelineRobustnessCase::ENABLED_FAST_GPL)
238 		return PIPELINE_CONSTRUCTION_TYPE_FAST_LINKED_LIBRARY;
239 	if (prCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
240 		return PIPELINE_CONSTRUCTION_TYPE_LINK_TIME_OPTIMIZED_LIBRARY;
241 	return PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC;
242 }
243 
244 struct CaseDef
245 {
246 	VkFormat format;
247 	Stage stage;
248 	VkFlags allShaderStages;
249 	VkFlags allPipelineStages;
250 	int/*VkDescriptorType*/ descriptorType;
251 	VkImageViewType viewType;
252 	VkSampleCountFlagBits samples;
253 	int bufferLen;
254 	bool unroll;
255 	bool vol;
256 	bool nullDescriptor;
257 	bool useTemplate;
258 	bool formatQualifier;
259 	bool pushDescriptor;
260 	bool testRobustness2;
261 	PipelineRobustnessCase pipelineRobustnessCase;
262 	deUint32 imageDim[3]; // width, height, depth or layers
263 	bool readOnly;
264 
needsScalarBlockLayoutvkt::robustness::__anon28606::CaseDef265 	bool needsScalarBlockLayout() const
266 	{
267 		bool scalarNeeded = false;
268 
269 		switch (descriptorType)
270 		{
271 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
272 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
273 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
274 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
275 			scalarNeeded = true;
276 			break;
277 		default:
278 			scalarNeeded = false;
279 			break;
280 		}
281 
282 		return scalarNeeded;
283 	}
284 
needsPipelineRobustnessvkt::robustness::__anon28606::CaseDef285 	bool needsPipelineRobustness (void) const
286 	{
287 		return (pipelineRobustnessCase != PipelineRobustnessCase::DISABLED);
288 	}
289 };
290 
formatIsR64(const VkFormat& f)291 static bool formatIsR64(const VkFormat& f)
292 {
293 	switch (f)
294 	{
295 	case VK_FORMAT_R64_SINT:
296 	case VK_FORMAT_R64_UINT:
297 		return true;
298 	default:
299 		return false;
300 	}
301 }
302 
303 // Returns the appropriate singleton device for the given case.
getLogicalDevice(Context& ctx, const bool testRobustness2, const bool testPipelineRobustness)304 VkDevice getLogicalDevice (Context& ctx, const bool testRobustness2, const bool testPipelineRobustness)
305 {
306 	if (testPipelineRobustness)
307 	{
308 		if (testRobustness2)
309 			return PipelineRobustnessRobustness2Singleton::getDevice(ctx);
310 		return PipelineRobustnessImageRobustnessSingleton::getDevice(ctx);
311 	}
312 
313 	if (testRobustness2)
314 		return Robustness2Singleton::getDevice(ctx);
315 	return ImageRobustnessSingleton::getDevice(ctx);
316 }
317 
318 // Returns the appropriate singleton device driver for the given case.
getDeviceInterface(Context& ctx, const bool testRobustness2, const bool testPipelineRobustness)319 const DeviceInterface& getDeviceInterface(Context& ctx, const bool testRobustness2, const bool testPipelineRobustness)
320 {
321 	if (testPipelineRobustness)
322 	{
323 		if (testRobustness2)
324 			return PipelineRobustnessRobustness2Singleton::getDeviceInterface(ctx);
325 		return PipelineRobustnessImageRobustnessSingleton::getDeviceInterface(ctx);
326 	}
327 
328 	if (testRobustness2)
329 		return Robustness2Singleton::getDeviceInterface(ctx);
330 	return ImageRobustnessSingleton::getDeviceInterface(ctx);
331 }
332 
333 
334 class Layout
335 {
336 public:
337 	vector<VkDescriptorSetLayoutBinding> layoutBindings;
338 	vector<deUint8> refData;
339 };
340 
341 
342 class RobustnessExtsTestInstance : public TestInstance
343 {
344 public:
345 						RobustnessExtsTestInstance		(Context& context, const CaseDef& data);
346 						~RobustnessExtsTestInstance	(void);
347 	tcu::TestStatus		iterate								(void);
348 private:
349 	CaseDef				m_data;
350 };
351 
RobustnessExtsTestInstance(Context& context, const CaseDef& data)352 RobustnessExtsTestInstance::RobustnessExtsTestInstance (Context& context, const CaseDef& data)
353 	: vkt::TestInstance		(context)
354 	, m_data				(data)
355 {
356 }
357 
~RobustnessExtsTestInstance(void)358 RobustnessExtsTestInstance::~RobustnessExtsTestInstance (void)
359 {
360 }
361 
362 class RobustnessExtsTestCase : public TestCase
363 {
364 	public:
365 								RobustnessExtsTestCase		(tcu::TestContext& context, const std::string& name, const CaseDef data);
366 								~RobustnessExtsTestCase	(void);
367 	virtual	void				initPrograms					(SourceCollections& programCollection) const;
368 	virtual TestInstance*		createInstance					(Context& context) const;
369 	virtual void				checkSupport					(Context& context) const;
370 
371 private:
372 	CaseDef					m_data;
373 };
374 
RobustnessExtsTestCase(tcu::TestContext& context, const std::string& name, const CaseDef data)375 RobustnessExtsTestCase::RobustnessExtsTestCase (tcu::TestContext& context, const std::string& name, const CaseDef data)
376 	: vkt::TestCase	(context, name)
377 	, m_data		(data)
378 {
379 }
380 
~RobustnessExtsTestCase(void)381 RobustnessExtsTestCase::~RobustnessExtsTestCase	(void)
382 {
383 }
384 
formatIsFloat(const VkFormat& f)385 static bool formatIsFloat(const VkFormat& f)
386 {
387 	switch (f)
388 	{
389 	case VK_FORMAT_R32_SFLOAT:
390 	case VK_FORMAT_R32G32_SFLOAT:
391 	case VK_FORMAT_R32G32B32A32_SFLOAT:
392 		return true;
393 	default:
394 		return false;
395 	}
396 }
397 
formatIsSignedInt(const VkFormat& f)398 static bool formatIsSignedInt(const VkFormat& f)
399 {
400 	switch (f)
401 	{
402 	case VK_FORMAT_R32_SINT:
403 	case VK_FORMAT_R64_SINT:
404 	case VK_FORMAT_R32G32_SINT:
405 	case VK_FORMAT_R32G32B32A32_SINT:
406 		return true;
407 	default:
408 		return false;
409 	}
410 }
411 
supportsStores(int descriptorType)412 static bool supportsStores(int descriptorType)
413 {
414 	switch (descriptorType)
415 	{
416 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
417 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
418 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
419 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
420 		return true;
421 	default:
422 		return false;
423 	}
424 }
425 
426 #ifndef CTS_USES_VULKANSC
getPipelineRobustnessInfo(bool robustness2, int descriptorType)427 static VkPipelineRobustnessCreateInfoEXT getPipelineRobustnessInfo(bool robustness2, int descriptorType)
428 {
429 	VkPipelineRobustnessCreateInfoEXT robustnessCreateInfo = initVulkanStructure();
430 	robustnessCreateInfo.storageBuffers	= VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
431 	robustnessCreateInfo.uniformBuffers	= VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
432 	robustnessCreateInfo.vertexInputs	= VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
433 	robustnessCreateInfo.images			= VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT;
434 
435 	switch (descriptorType)
436 	{
437 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
438 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
439 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
440 			robustnessCreateInfo.storageBuffers	= (robustness2
441 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
442 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
443 			break;
444 
445 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
446 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
447 			robustnessCreateInfo.images	= (robustness2
448 										? VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT
449 										: VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT);
450 			break;
451 
452 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
453 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
454 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
455 			robustnessCreateInfo.uniformBuffers	= (robustness2
456 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
457 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
458 			break;
459 
460 		case VERTEX_ATTRIBUTE_FETCH:
461 			robustnessCreateInfo.vertexInputs	= (robustness2
462 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
463 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
464 			break;
465 
466 		default:
467 			DE_ASSERT(0);
468 	}
469 
470 	return robustnessCreateInfo;
471 }
472 #endif
473 
checkSupport(Context& context) const474 void RobustnessExtsTestCase::checkSupport(Context& context) const
475 {
476 	const auto&	vki				= context.getInstanceInterface();
477 	const auto	physicalDevice	= context.getPhysicalDevice();
478 
479 	checkPipelineConstructionRequirements(vki, physicalDevice, getConstructionTypeFromRobustnessCase(m_data.pipelineRobustnessCase));
480 
481 	// We need to query some features using the physical device instead of using the reported context features because robustness2
482 	// and image robustness are always disabled in the default device but they may be available.
483 	VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
484 	VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
485 	VkPhysicalDeviceScalarBlockLayoutFeatures			scalarLayoutFeatures			= initVulkanStructure();
486 	VkPhysicalDeviceFeatures2							features2						= initVulkanStructure();
487 
488 	context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
489 	const auto addFeatures = makeStructChainAdder(&features2);
490 
491 	if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
492 		addFeatures(&scalarLayoutFeatures);
493 
494 	if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
495 		addFeatures(&imageRobustnessFeatures);
496 
497 	if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
498 		addFeatures(&robustness2Features);
499 
500 #ifndef CTS_USES_VULKANSC
501 	VkPhysicalDevicePipelineRobustnessFeaturesEXT		pipelineRobustnessFeatures = initVulkanStructure();
502 	if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
503 		addFeatures(&pipelineRobustnessFeatures);
504 #endif
505 
506 	context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
507 	vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
508 
509 	if (formatIsR64(m_data.format))
510 	{
511 		context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
512 
513 		VkFormatProperties formatProperties;
514 		vki.getPhysicalDeviceFormatProperties(physicalDevice, m_data.format, &formatProperties);
515 
516 #ifndef CTS_USES_VULKANSC
517 		const VkFormatProperties3KHR formatProperties3 = context.getFormatProperties(m_data.format);
518 #endif // CTS_USES_VULKANSC
519 
520 		switch (m_data.descriptorType)
521 		{
522 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
523 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
524 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
525 			break;
526 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
527 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
528 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
529 #ifndef CTS_USES_VULKANSC
530 			if ((formatProperties3.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) != VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR)
531 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT is not supported");
532 #endif // CTS_USES_VULKANSC
533 			break;
534 		case VERTEX_ATTRIBUTE_FETCH:
535 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
536 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
537 			break;
538 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
539 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
540 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
541 			break;
542 		default: DE_ASSERT(true);
543 		}
544 
545 		if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
546 		{
547 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
548 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
549 		}
550 	}
551 
552 	// Check needed properties and features
553 	if (m_data.needsScalarBlockLayout() && !scalarLayoutFeatures.scalarBlockLayout)
554 		TCU_THROW(NotSupportedError, "Scalar block layout not supported");
555 
556 	if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
557 		TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
558 
559 	if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
560 		TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
561 
562 	if (m_data.stage == STAGE_RAYGEN)
563 		context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
564 
565 	switch (m_data.descriptorType)
566 	{
567 	default: DE_ASSERT(0); // Fallthrough
568 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
569 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
570 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
571 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
572 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
573 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
574 	case VERTEX_ATTRIBUTE_FETCH:
575 		if (m_data.testRobustness2)
576 		{
577 			if (!robustness2Features.robustBufferAccess2)
578 				TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
579 		}
580 		else
581 		{
582 			// This case is not tested here.
583 			DE_ASSERT(false);
584 		}
585 		break;
586 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
587 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
588 		if (m_data.testRobustness2)
589 		{
590 			if (!robustness2Features.robustImageAccess2)
591 				TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
592 		}
593 		else
594 		{
595 			if (!imageRobustnessFeatures.robustImageAccess)
596 				TCU_THROW(NotSupportedError, "robustImageAccess not supported");
597 		}
598 		break;
599 	}
600 
601 	if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
602 		TCU_THROW(NotSupportedError, "nullDescriptor not supported");
603 
604 	// The fill shader for 64-bit multisample image tests uses a storage image.
605 	if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && formatIsR64(m_data.format) &&
606 		!features2.features.shaderStorageImageMultisample)
607 		TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
608 
609 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
610 		m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
611 		!features2.features.shaderStorageImageMultisample)
612 		TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
613 
614 	if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(0, 1, 1, 0)))
615 		TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
616 
617 #ifndef CTS_USES_VULKANSC
618 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && !m_data.formatQualifier)
619 	{
620 		const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
621 		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
622 			TCU_THROW(NotSupportedError, "Format does not support reading without format");
623 		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
624 			TCU_THROW(NotSupportedError, "Format does not support writing without format");
625 	}
626 	else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER && !m_data.formatQualifier)
627 	{
628 		const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
629 		if (!(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
630 			TCU_THROW(NotSupportedError, "Format does not support reading without format");
631 		if (!(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
632 			TCU_THROW(NotSupportedError, "Format does not support writing without format");
633 	}
634 #else
635 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
636 		!m_data.formatQualifier &&
637 		(!features2.features.shaderStorageImageReadWithoutFormat || !features2.features.shaderStorageImageWriteWithoutFormat))
638 		TCU_THROW(NotSupportedError, "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
639 #endif // CTS_USES_VULKANSC
640 
641 	if (m_data.pushDescriptor)
642 		context.requireDeviceFunctionality("VK_KHR_push_descriptor");
643 
644 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
645 		TCU_THROW(NotSupportedError, "Cube array image view type not supported");
646 
647 	if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
648 		TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
649 
650 #ifndef CTS_USES_VULKANSC
651 	if (m_data.needsPipelineRobustness() && !pipelineRobustnessFeatures.pipelineRobustness)
652 		TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
653 #endif
654 }
655 
generateLayout(Layout &layout, const CaseDef &caseDef)656 void generateLayout(Layout &layout, const CaseDef &caseDef)
657 {
658 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
659 	int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
660 	bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
661 
662 	for (deUint32 b = 0; b < layout.layoutBindings.size(); ++b)
663 	{
664 		VkDescriptorSetLayoutBinding &binding = bindings[b];
665 		binding.binding = b;
666 		binding.pImmutableSamplers = NULL;
667 		binding.stageFlags = caseDef.allShaderStages;
668 		binding.descriptorCount = 1;
669 
670 		// Output image
671 		if (b == 0)
672 			binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
673 		else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
674 			binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
675 	}
676 
677 	if (caseDef.nullDescriptor)
678 		return;
679 
680 	if (caseDef.bufferLen == 0)
681 	{
682 		// Clear color values for image tests
683 		static deUint32 urefData[4]		= { 0x12345678, 0x23456789, 0x34567890, 0x45678901 };
684 		static deUint64 urefData64[4]	= { 0x1234567887654321, 0x234567899, 0x345678909, 0x456789019 };
685 		static float frefData[4]		= { 123.f, 234.f, 345.f, 456.f };
686 
687 		if (formatIsR64(caseDef.format))
688 		{
689 			layout.refData.resize(32);
690 			deUint64 *ptr = (deUint64 *)layout.refData.data();
691 
692 			for (unsigned int i = 0; i < 4; ++i)
693 			{
694 				ptr[i] = urefData64[i];
695 			}
696 		}
697 		else
698 		{
699 			layout.refData.resize(16);
700 			deMemcpy(layout.refData.data(), formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
701 		}
702 	}
703 	else
704 	{
705 		layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7: ~3));
706 		for (unsigned int i = 0; i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(deUint64) : sizeof(deUint32)); ++i)
707 		{
708 			if (formatIsFloat(caseDef.format))
709 			{
710 				float *f = (float *)layout.refData.data() + i;
711 				*f = 2.0f*(float)i + 3.0f;
712 			}
713 			if (formatIsR64(caseDef.format))
714 			{
715 				deUint64 *u = (deUint64 *)layout.refData.data() + i;
716 				*u = 2 * i + 3;
717 			}
718 			else
719 			{
720 				int *u = (int *)layout.refData.data() + i;
721 				*u = 2*i + 3;
722 			}
723 		}
724 	}
725 }
726 
genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)727 static string genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)
728 {
729 	std::stringstream s;
730 	// Fetch from the descriptor.
731 	switch (caseDef.descriptorType)
732 	{
733 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
734 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
735 		s << vecType << "(ubo0_1.val[" << coord << "]";
736 		for (int i = numComponents; i < 4; ++i) s << ", 0";
737 		s << ")";
738 		break;
739 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
740 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
741 		s << vecType << "(ssbo0_1.val[" << coord << "]";
742 		for (int i = numComponents; i < 4; ++i) s << ", 0";
743 		s << ")";
744 		break;
745 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
746 		s << "texelFetch(texbo0_1, " << coord << ")";
747 		break;
748 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
749 		s << "imageLoad(image0_1, " << coord << ")";
750 		break;
751 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
752 		if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
753 			s << "texelFetch(texture0_1, " << coord << ")";
754 		else
755 			s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
756 		break;
757 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
758 		s << "imageLoad(image0_1, " << coord << ")";
759 		break;
760 	case VERTEX_ATTRIBUTE_FETCH:
761 		s << "attr";
762 		break;
763 	default: DE_ASSERT(0);
764 	}
765 	return s.str();
766 }
767 
768 static const int storeValue = 123;
769 
770 // Get the value stored by genStore.
getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)771 static string getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)
772 {
773 	std::stringstream s;
774 	switch (descriptorType)
775 	{
776 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
777 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
778 		s << vecType  << "(" << bufType << "(" << storeValue << ")";
779 		for (int i = numComponents; i < 4; ++i) s << ", 0";
780 		s << ")";
781 		break;
782 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
783 		s << vecType << "(" << storeValue << ")";
784 		break;
785 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
786 		s << vecType << "(" << storeValue << ")";
787 		break;
788 	default: DE_ASSERT(0);
789 	}
790 	return s.str();
791 }
792 
genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)793 static string genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)
794 {
795 	std::stringstream s;
796 	// Store to the descriptor.
797 	switch (descriptorType)
798 	{
799 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
800 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
801 		s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
802 		break;
803 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
804 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
805 		break;
806 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
807 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
808 		break;
809 	default: DE_ASSERT(0);
810 	}
811 	return s.str();
812 }
813 
genAtomic(int descriptorType, const string& bufType, const string& coord)814 static string genAtomic(int descriptorType, const string& bufType, const string& coord)
815 {
816 	std::stringstream s;
817 	// Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
818 	switch (descriptorType)
819 	{
820 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
821 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
822 		s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
823 		break;
824 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
825 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
826 		break;
827 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
828 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
829 		break;
830 	default: DE_ASSERT(0);
831 	}
832 	return s.str();
833 }
834 
getShaderImageFormatQualifier(const tcu::TextureFormat& format)835 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
836 {
837 	const char* orderPart;
838 	const char* typePart;
839 
840 	switch (format.order)
841 	{
842 		case tcu::TextureFormat::R:		orderPart = "r";	break;
843 		case tcu::TextureFormat::RG:	orderPart = "rg";	break;
844 		case tcu::TextureFormat::RGB:	orderPart = "rgb";	break;
845 		case tcu::TextureFormat::RGBA:	orderPart = "rgba";	break;
846 
847 		default:
848 			DE_FATAL("Impossible");
849 			orderPart = DE_NULL;
850 	}
851 
852 	switch (format.type)
853 	{
854 		case tcu::TextureFormat::FLOAT:				typePart = "32f";		break;
855 		case tcu::TextureFormat::HALF_FLOAT:		typePart = "16f";		break;
856 
857 		case tcu::TextureFormat::UNSIGNED_INT64:	typePart = "64ui";		break;
858 		case tcu::TextureFormat::UNSIGNED_INT32:	typePart = "32ui";		break;
859 		case tcu::TextureFormat::UNSIGNED_INT16:	typePart = "16ui";		break;
860 		case tcu::TextureFormat::UNSIGNED_INT8:		typePart = "8ui";		break;
861 
862 		case tcu::TextureFormat::SIGNED_INT64:		typePart = "64i";		break;
863 		case tcu::TextureFormat::SIGNED_INT32:		typePart = "32i";		break;
864 		case tcu::TextureFormat::SIGNED_INT16:		typePart = "16i";		break;
865 		case tcu::TextureFormat::SIGNED_INT8:		typePart = "8i";		break;
866 
867 		case tcu::TextureFormat::UNORM_INT16:		typePart = "16";		break;
868 		case tcu::TextureFormat::UNORM_INT8:		typePart = "8";			break;
869 
870 		case tcu::TextureFormat::SNORM_INT16:		typePart = "16_snorm";	break;
871 		case tcu::TextureFormat::SNORM_INT8:		typePart = "8_snorm";	break;
872 
873 		default:
874 			DE_FATAL("Impossible");
875 			typePart = DE_NULL;
876 	}
877 
878 	return std::string() + orderPart + typePart;
879 }
880 
genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)881 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
882 {
883 	if (numCoords == 1)
884 		return c;
885 
886 	if (samples != VK_SAMPLE_COUNT_1_BIT)
887 		numCoords--;
888 
889 	string coord = "ivec" + to_string(numCoords) + "(";
890 
891 	for (int i = 0; i < numCoords; ++i)
892 	{
893 		if (i == dim)
894 			coord += c;
895 		else
896 			coord += "0";
897 		if (i < numCoords - 1)
898 			coord += ", ";
899 	}
900 	coord += ")";
901 
902 	// Append sample coordinate
903 	if (samples != VK_SAMPLE_COUNT_1_BIT)
904 	{
905 		coord += ", ";
906 		if (dim == numCoords)
907 			coord += c;
908 		else
909 			coord += "0";
910 	}
911 	return coord;
912 }
913 
914 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)915 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
916 {
917 	// dim can be 3 for cube_array. Reuse the number of layers in that case.
918 	dim = std::min(dim, 2);
919 
920 	if (numCoords == 1)
921 		return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
922 
923 	string coord = "vec" + to_string(numCoords) + "(";
924 
925 	for (int i = 0; i < numCoords; ++i)
926 	{
927 		if (i == dim)
928 			coord += c;
929 		else
930 			coord += "0.25";
931 		if (i < numNormalizedCoords)
932 			coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
933 		if (i < numCoords - 1)
934 			coord += ", ";
935 	}
936 	coord += ")";
937 	return coord;
938 }
939 
initPrograms(SourceCollections& programCollection) const940 void RobustnessExtsTestCase::initPrograms (SourceCollections& programCollection) const
941 {
942 	VkFormat format = m_data.format;
943 
944 	Layout layout;
945 	generateLayout(layout, m_data);
946 
947 	if (layout.layoutBindings.size() > 1 &&
948 		layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
949 	{
950 		if (format == VK_FORMAT_R64_SINT)
951 			format = VK_FORMAT_R32G32_SINT;
952 
953 		if (format == VK_FORMAT_R64_UINT)
954 			format = VK_FORMAT_R32G32_UINT;
955 	}
956 
957 	std::stringstream decls, checks;
958 
959 	const string	r64			= formatIsR64(format) ? "64" : "";
960 	const string	i64Type		= formatIsR64(format) ? "64_t" : "";
961 	const string	vecType		= formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
962 	const string	qLevelType	= vecType == "vec4" ? "float" : ((vecType == "ivec4") || (vecType == "i64vec4")) ? ("int" + i64Type) : ("uint" + i64Type);
963 
964 	decls << "uvec4 abs(uvec4 x) { return x; }\n";
965 	if (formatIsR64(format))
966 		decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
967 	decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
968 
969 
970 	const int	componetsSize = (formatIsR64(format) ? 8 : 4);
971 	int			refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
972 	// Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
973 	// robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
974 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
975 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
976 	{
977 		refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
978 	}
979 	if (m_data.nullDescriptor)
980 		refDataNumElements = 4;
981 
982 	if (formatIsFloat(format))
983 	{
984 		decls << "float refData[" << refDataNumElements << "] = {";
985 		int i;
986 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
987 		{
988 			if (i != 0)
989 				decls << ", ";
990 			decls << ((const float *)layout.refData.data())[i];
991 		}
992 		while (i < refDataNumElements)
993 		{
994 			if (i != 0)
995 				decls << ", ";
996 			decls << "0";
997 			i++;
998 		}
999 	}
1000 	else if (formatIsR64(format))
1001 	{
1002 		decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
1003 		int i;
1004 		for (i = 0; i < (int)layout.refData.size() / 8; ++i)
1005 		{
1006 			if (i != 0)
1007 				decls << ", ";
1008 			decls << ((const deUint64 *)layout.refData.data())[i] << "l";
1009 		}
1010 		while (i < refDataNumElements)
1011 		{
1012 			if (i != 0)
1013 				decls << ", ";
1014 			decls << "0l";
1015 			i++;
1016 		}
1017 	}
1018 	else
1019 	{
1020 		decls << "int" << " refData[" << refDataNumElements << "] = {";
1021 		int i;
1022 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1023 		{
1024 			if (i != 0)
1025 				decls << ", ";
1026 			decls << ((const int *)layout.refData.data())[i];
1027 		}
1028 		while (i < refDataNumElements)
1029 		{
1030 			if (i != 0)
1031 				decls << ", ";
1032 			decls << "0";
1033 			i++;
1034 		}
1035 	}
1036 
1037 	decls << "};\n";
1038 	decls << vecType << " zzzz = " << vecType << "(0);\n";
1039 	decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
1040 	decls << vecType << " expectedIB;\n";
1041 
1042 	string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
1043 	string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
1044 	string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
1045 
1046 	string imageDim = "";
1047 	int numCoords, numNormalizedCoords;
1048 	bool layered = false;
1049 	switch (m_data.viewType)
1050 	{
1051 		default: DE_ASSERT(0); // Fallthrough
1052 		case VK_IMAGE_VIEW_TYPE_1D:			imageDim = "1D";		numCoords = 1;	numNormalizedCoords = 1;	break;
1053 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	imageDim = "1DArray";	numCoords = 2;	numNormalizedCoords = 1;	layered = true;	break;
1054 		case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2D";		numCoords = 2;	numNormalizedCoords = 2;	break;
1055 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DArray";	numCoords = 3;	numNormalizedCoords = 2;	layered = true;	break;
1056 		case VK_IMAGE_VIEW_TYPE_3D:			imageDim = "3D";		numCoords = 3;	numNormalizedCoords = 3;	break;
1057 		case VK_IMAGE_VIEW_TYPE_CUBE:		imageDim = "Cube";		numCoords = 3;	numNormalizedCoords = 3;	break;
1058 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	imageDim = "CubeArray";	numCoords = 4;	numNormalizedCoords = 3;	layered = true;	break;
1059 	}
1060 	if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
1061 	{
1062 		switch (m_data.viewType)
1063 		{
1064 			default: DE_ASSERT(0); // Fallthrough
1065 			case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2DMS";		break;
1066 			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DMSArray";	break;
1067 		}
1068 		numCoords++;
1069 	}
1070 	bool dataDependsOnLayer = (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) && !m_data.nullDescriptor;
1071 
1072 	// Special case imageLoad(imageCubeArray, ...) which uses ivec3
1073 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
1074 		m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1075 	{
1076 		numCoords = 3;
1077 	}
1078 
1079 	int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
1080 	string bufType;
1081 	if (numComponents == 1)
1082 		bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
1083 	else
1084 		bufType = imgprefix + "vec" + std::to_string(numComponents);
1085 
1086 	// For UBO's, which have a declared size in the shader, don't access outside that size.
1087 	bool declaredSize = false;
1088 	switch (m_data.descriptorType) {
1089 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1090 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1091 		declaredSize = true;
1092 		break;
1093 	default:
1094 		break;
1095 	}
1096 
1097 	checks << "  int inboundcoords, clampedLayer;\n";
1098 	checks << "  " << vecType << " expectedIB2;\n";
1099 	if (m_data.unroll)
1100 	{
1101 		if (declaredSize)
1102 			checks << "  [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
1103 		else
1104 			checks << "  [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
1105 	}
1106 	else
1107 	{
1108 		if (declaredSize)
1109 			checks << "  [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
1110 		else
1111 			checks << "  [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
1112 	}
1113 
1114 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1115 		checks << "    int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
1116 	else
1117 		checks << "    int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
1118 
1119 	decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
1120 
1121 	const char *vol = m_data.vol ? "volatile " : "";
1122 	const char *ro = m_data.readOnly ? "readonly " : "";
1123 
1124 	// Construct the declaration for the binding
1125 	switch (m_data.descriptorType)
1126 	{
1127 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1128 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1129 		decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
1130 		break;
1131 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1132 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1133 		decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
1134 		decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
1135 		break;
1136 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1137 		switch(format)
1138 		{
1139 		case VK_FORMAT_R64_SINT:
1140 			decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
1141 			break;
1142 		case VK_FORMAT_R64_UINT:
1143 			decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
1144 			break;
1145 		default:
1146 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
1147 		}
1148 		break;
1149 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1150 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "imageBuffer image0_1;\n";
1151 		break;
1152 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1153 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image" << imageDim << " image0_1;\n";
1154 		break;
1155 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1156 		switch (format)
1157 		{
1158 		case VK_FORMAT_R64_SINT:
1159 			decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
1160 			break;
1161 		case VK_FORMAT_R64_UINT:
1162 			decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
1163 			break;
1164 		default:
1165 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
1166 			break;
1167 		}
1168 		break;
1169 	case VERTEX_ATTRIBUTE_FETCH:
1170 		if (formatIsR64(format))
1171 		{
1172 			decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t")) << " attr;\n";
1173 		}
1174 		else
1175 		{
1176 			decls << "layout(location = 0) in " << vecType << " attr;\n";
1177 		}
1178 		break;
1179 	default: DE_ASSERT(0);
1180 	}
1181 
1182 	string expectedOOB;
1183 	string defaultw;
1184 
1185 	switch (m_data.descriptorType)
1186 	{
1187 	default: DE_ASSERT(0); // Fallthrough
1188 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1189 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1190 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1191 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1192 		expectedOOB = "zzzz";
1193 		defaultw = "0";
1194 		break;
1195 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1196 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1197 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1198 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1199 	case VERTEX_ATTRIBUTE_FETCH:
1200 		if (numComponents == 1)
1201 		{
1202 			expectedOOB = "zzzo";
1203 		}
1204 		else if (numComponents == 2)
1205 		{
1206 			expectedOOB = "zzzo";
1207 		}
1208 		else
1209 		{
1210 			expectedOOB = "zzzz";
1211 		}
1212 		defaultw = "1";
1213 		break;
1214 	}
1215 
1216 	string idx;
1217 	switch (m_data.descriptorType)
1218 	{
1219 	default: DE_ASSERT(0); // Fallthrough
1220 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1221 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1222 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1223 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1224 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1225 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1226 	case VERTEX_ATTRIBUTE_FETCH:
1227 		idx = "idx";
1228 		break;
1229 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1230 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1231 		idx = "0";
1232 		break;
1233 	}
1234 
1235 	if (m_data.nullDescriptor)
1236 	{
1237 		checks << "    expectedIB = zzzz;\n";
1238 		checks << "    inboundcoords = 0;\n";
1239 		checks << "    int paddedinboundcoords = 0;\n";
1240 		// Vertex attribute fetch still gets format conversion applied
1241 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1242 			expectedOOB = "zzzz";
1243 	}
1244 	else
1245 	{
1246 		checks << "    expectedIB.x = refData[" << idx << "];\n";
1247 		if (numComponents > 1)
1248 		{
1249 			checks << "    expectedIB.y = refData[" << idx << "+1];\n";
1250 		}
1251 		else
1252 		{
1253 			checks << "    expectedIB.y = 0;\n";
1254 		}
1255 		if (numComponents > 2)
1256 		{
1257 			checks << "    expectedIB.z = refData[" << idx << "+2];\n";
1258 			checks << "    expectedIB.w = refData[" << idx << "+3];\n";
1259 		}
1260 		else
1261 		{
1262 			checks << "    expectedIB.z = 0;\n";
1263 			checks << "    expectedIB.w = " << defaultw << ";\n";
1264 		}
1265 
1266 		switch (m_data.descriptorType)
1267 		{
1268 		default: DE_ASSERT(0); // Fallthrough
1269 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1270 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1271 			// UBOs can either strictly bounds check against inboundcoords, or can
1272 			// return the contents from memory for the range padded up to paddedinboundcoords.
1273 			checks << "    int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1274 			// fallthrough
1275 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1276 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1277 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1278 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1279 		case VERTEX_ATTRIBUTE_FETCH:
1280 			checks << "    inboundcoords = " << layout.refData.size() / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32)) / numComponents << ";\n";
1281 			break;
1282 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1283 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1284 			// set per-component below
1285 			break;
1286 		}
1287 	}
1288 
1289 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1290 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1291 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1292 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1293 		 !m_data.readOnly)
1294 	{
1295 		for (int i = 0; i < numCoords; ++i)
1296 		{
1297 			// Treat i==3 coord (cube array layer) like i == 2
1298 			deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1299 			if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1300 				checks << "    inboundcoords = " << coordDim << ";\n";
1301 
1302 			string coord = genCoord("c", numCoords, m_data.samples, i);
1303 			string inboundcoords =
1304 				m_data.nullDescriptor ? "0" :
1305 				(m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ? to_string(m_data.samples) : "inboundcoords";
1306 
1307 			checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1308 			if (m_data.formatQualifier &&
1309 				(format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1310 			{
1311 				checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1312 			}
1313 		}
1314 	}
1315 
1316 	for (int i = 0; i < numCoords; ++i)
1317 	{
1318 		// Treat i==3 coord (cube array layer) like i == 2
1319 		deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1320 		if (!m_data.nullDescriptor)
1321 		{
1322 			switch (m_data.descriptorType)
1323 			{
1324 			default:
1325 				break;
1326 			case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1327 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1328 				checks << "    inboundcoords = " << coordDim << ";\n";
1329 				break;
1330 			}
1331 		}
1332 
1333 		string coord = genCoord("c", numCoords, m_data.samples, i);
1334 
1335 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1336 		{
1337 			if (formatIsR64(format))
1338 			{
1339 				checks << "    temp.x = attr;\n";
1340 				checks << "    temp.y = 0l;\n";
1341 				checks << "    temp.z = 0l;\n";
1342 				checks << "    temp.w = 0l;\n";
1343 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else temp -= zzzz;\n";
1344 			}
1345 			else
1346 			{
1347 				checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1348 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else temp -= " << expectedOOB << ";\n";
1349 			}
1350 			// Accumulate any incorrect values.
1351 			checks << "    accum += abs(temp);\n";
1352 		}
1353 		// Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1354 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1355 			!(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1356 			  (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1357 		{
1358 			checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1359 
1360 			checks << "    expectedIB2 = expectedIB;\n";
1361 
1362 			// Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1363 			if (dataDependsOnLayer && i == numNormalizedCoords)
1364 				checks << "    if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1365 
1366 			if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1367 			{
1368 				if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1369 				{
1370 					checks << "    if (temp == zzzz) temp = " << vecType << "(0);\n";
1371 					if (m_data.formatQualifier && numComponents < 4)
1372 						checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1373 					checks << "    else temp = " << vecType << "(1);\n";
1374 				}
1375 				else
1376 					// multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1377 					checks << "    if (c >= 0 && c < " << m_data.samples << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1378 			}
1379 			else
1380 			{
1381 				// Storage buffers may be split into per-component loads. Generate a second
1382 				// expected out of bounds value where some subset of the components are
1383 				// actually in-bounds. If both loads and stores are split into per-component
1384 				// accesses, then the result value can be a mix of storeValue and zero.
1385 				string expectedOOB2 = expectedOOB;
1386 				string expectedOOB3 = expectedOOB;
1387 				if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1388 					 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1389 					 !m_data.nullDescriptor)
1390 				{
1391 					int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1392 					int mod = (int)((len / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32))) % numComponents);
1393 					string sstoreValue = de::toString(storeValue);
1394 					switch (mod)
1395 					{
1396 					case 0:
1397 						break;
1398 					case 1:
1399 						expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1400 						expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1401 						break;
1402 					case 2:
1403 						expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1404 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1405 						break;
1406 					case 3:
1407 						expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1408 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1409 						break;
1410 					}
1411 				}
1412 
1413 				// Entirely in-bounds.
1414 				checks << "    if (c >= 0 && c < inboundcoords) {\n"
1415 						  "       if (temp == expectedIB2) temp = " << vecType << "(0); else temp = " << vecType << "(1);\n"
1416 						  "    }\n";
1417 
1418 				// normal out-of-bounds value
1419 				if (m_data.testRobustness2)
1420 					checks << "    else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1421 				else
1422 					// image_robustness relaxes alpha which is allowed to be zero or one
1423 					checks << "    else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1424 
1425 				if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1426 					m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1427 				{
1428 					checks << "    else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = " << vecType << "(0);\n";
1429 				}
1430 
1431 				// null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1432 				if (m_data.nullDescriptor && m_data.formatQualifier &&
1433 					(m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1434 					numComponents < 4)
1435 					checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1436 
1437 				// non-volatile value replaced with stored value
1438 				if (supportsStores(m_data.descriptorType) && !m_data.vol) {
1439 					checks << "    else if (temp == " << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType) << ") temp = " << vecType << "(0);\n";
1440 
1441 					if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) {
1442 
1443 						for (int mask = (numComponents*numComponents) - 2; mask > 0; mask--) {
1444 							checks << "    else if (temp == " << vecType << "(";
1445 							for (int vecIdx = 0; vecIdx < 4; vecIdx++) {
1446 								if (mask & (1 << vecIdx)) checks << storeValue;
1447 								else checks << "0";
1448 
1449 								if (vecIdx != 3) checks << ",";
1450 							}
1451 							checks << ")) temp = " << vecType << "(0);\n";
1452 						}
1453 					}
1454 				}
1455 
1456 				// value straddling the boundary, returning a partial vector
1457 				if (expectedOOB2 != expectedOOB)
1458 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType << "(0);\n";
1459 				if (expectedOOB3 != expectedOOB)
1460 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType << "(0);\n";
1461 
1462 				// failure
1463 				checks << "    else temp = " << vecType << "(1);\n";
1464 			}
1465 			// Accumulate any incorrect values.
1466 			checks << "    accum += abs(temp);\n";
1467 
1468 			// Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1469 			if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1470 			{
1471 				// Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1472 				string coord0 = genCoord("0", numCoords, m_data.samples, i);
1473 				checks << "    if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c") << "; else temp = " << vecType << "(0);\n";
1474 				checks << "    if (c != 0) temp -= " << expectedOOB << ";\n";
1475 				checks << "    accum += abs(temp);\n";
1476 			}
1477 		}
1478 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1479 			m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1480 		{
1481 			string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1482 
1483 			checks << "    expectedIB2 = expectedIB;\n";
1484 
1485 			// Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1486 			if (dataDependsOnLayer && i == numNormalizedCoords)
1487 			{
1488 				checks << "    clampedLayer = clamp(c, 0, " << coordDim-1 << ");\n";
1489 				checks << "    expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1490 			}
1491 
1492 			stringstream normexpected;
1493 			// Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1494 			if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
1495 				m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1496 				(layered && i == numCoords-1))
1497 				normexpected << "    temp -= expectedIB2;\n";
1498 			else
1499 			{
1500 				normexpected << "    if (c >= 0 && c < inboundcoords)\n";
1501 				normexpected << "        temp -= expectedIB2;\n";
1502 				normexpected << "    else\n";
1503 				if (m_data.testRobustness2)
1504 					normexpected << "        temp -= " << expectedOOB << ";\n";
1505 				else
1506 					// image_robustness relaxes alpha which is allowed to be zero or one
1507 					normexpected << "        temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1508 			}
1509 
1510 			checks << "    temp = texture(texture0_1, " << coordNorm << ");\n";
1511 			checks << normexpected.str();
1512 			checks << "    accum += abs(temp);\n";
1513 			checks << "    temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1514 			checks << normexpected.str();
1515 			checks << "    accum += abs(temp);\n";
1516 			checks << "    temp = textureGrad(texture0_1, " << coordNorm << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1517 			checks << normexpected.str();
1518 			checks << "    accum += abs(temp);\n";
1519 		}
1520 		if (m_data.nullDescriptor)
1521 		{
1522 			const char *sizeswiz;
1523 			switch (m_data.viewType)
1524 			{
1525 				default: DE_ASSERT(0); // Fallthrough
1526 				case VK_IMAGE_VIEW_TYPE_1D:			sizeswiz = ".xxxx";	break;
1527 				case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	sizeswiz = ".xyxx";	break;
1528 				case VK_IMAGE_VIEW_TYPE_2D:			sizeswiz = ".xyxx";	break;
1529 				case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	sizeswiz = ".xyzx";	break;
1530 				case VK_IMAGE_VIEW_TYPE_3D:			sizeswiz = ".xyzx";	break;
1531 				case VK_IMAGE_VIEW_TYPE_CUBE:		sizeswiz = ".xyxx";	break;
1532 				case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	sizeswiz = ".xyzx";	break;
1533 			}
1534 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1535 			{
1536 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1537 				{
1538 					checks << "    temp = textureSize(texture0_1, 0)" << sizeswiz <<";\n";
1539 					checks << "    accum += abs(temp);\n";
1540 
1541 					// checking textureSize with clearly out of range LOD values
1542 					checks << "    temp = textureSize(texture0_1, " << -i << ")" << sizeswiz <<";\n";
1543 					checks << "    accum += abs(temp);\n";
1544 					checks << "    temp = textureSize(texture0_1, " << (std::numeric_limits<deInt32>::max() - i) << ")" << sizeswiz <<";\n";
1545 					checks << "    accum += abs(temp);\n";
1546 				}
1547 				else
1548 				{
1549 					checks << "    temp = textureSize(texture0_1)" << sizeswiz <<";\n";
1550 					checks << "    accum += abs(temp);\n";
1551 					checks << "    temp = textureSamples(texture0_1).xxxx;\n";
1552 					checks << "    accum += abs(temp);\n";
1553 				}
1554 			}
1555 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1556 			{
1557 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1558 				{
1559 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1560 					checks << "    accum += abs(temp);\n";
1561 				}
1562 				else
1563 				{
1564 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1565 					checks << "    accum += abs(temp);\n";
1566 					checks << "    temp = imageSamples(image0_1).xxxx;\n";
1567 					checks << "    accum += abs(temp);\n";
1568 				}
1569 			}
1570 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1571 				m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1572 			{
1573 				// expect zero for runtime-sized array .length()
1574 				checks << "    temp = " << vecType << "(ssbo0_1.val.length());\n";
1575 				checks << "    accum += abs(temp);\n";
1576 				checks << "    temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1577 				checks << "    accum += abs(temp);\n";
1578 			}
1579 		}
1580 	}
1581 	checks << "  }\n";
1582 
1583 	// outside the coordinates loop because we only need to call it once
1584 	if (m_data.nullDescriptor &&
1585 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1586 		m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1587 	{
1588 		checks << "  temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1589 		checks << "  temp = " << vecType << "(temp_ql);\n";
1590 		checks << "  accum += abs(temp);\n";
1591 
1592 		if (m_data.stage == STAGE_FRAGMENT)
1593 		{
1594 			// as here we only want to check that textureQueryLod returns 0 when
1595 			// texture0_1 is null, we don't need to use the actual texture coordinates
1596 			// (and modify the vertex shader below to do so). Any coordinates are fine.
1597 			// gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1598 			std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1599 			checks << "  vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1600 			checks << "  temp_ql = " << qLevelType << "(ceil(abs(lod.x) + abs(lod.y)));\n";
1601 			checks << "  temp = " << vecType << "(temp_ql);\n";
1602 			checks << "  accum += abs(temp);\n";
1603 		}
1604 	}
1605 
1606 
1607 	const bool		needsScalarLayout	= m_data.needsScalarBlockLayout();
1608 	const uint32_t	shaderBuildOptions	= (needsScalarLayout
1609 										? static_cast<uint32_t>(vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS)
1610 										: 0u);
1611 
1612 	const bool is64BitFormat = formatIsR64(m_data.format);
1613 	std::string support =	"#version 460 core\n"
1614 							"#extension GL_EXT_nonuniform_qualifier : enable\n" +
1615 							(needsScalarLayout ? std::string("#extension GL_EXT_scalar_block_layout : enable\n") : std::string()) +
1616 							"#extension GL_EXT_samplerless_texture_functions : enable\n"
1617 							"#extension GL_EXT_control_flow_attributes : enable\n"
1618 							"#extension GL_EXT_shader_image_load_formatted : enable\n";
1619 	std::string SupportR64 =	"#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1620 								"#extension GL_EXT_shader_image_int64 : require\n";
1621 	if (is64BitFormat)
1622 		support += SupportR64;
1623 	if (m_data.stage == STAGE_RAYGEN)
1624 		support += "#extension GL_EXT_ray_tracing : require\n";
1625 
1626 	std::string code =	"  " + vecType + " accum = " + vecType + "(0);\n"
1627 						"  " + vecType + " temp;\n"
1628 						"  " + qLevelType + " temp_ql;\n" +
1629 						checks.str() +
1630 						"  " + vecType + " color = (accum != " + vecType + "(0)) ? " + vecType + "(0,0,0,0) : " + vecType + "(1,0,0,1);\n";
1631 
1632 	switch (m_data.stage)
1633 	{
1634 	default: DE_ASSERT(0); // Fallthrough
1635 	case STAGE_COMPUTE:
1636 		{
1637 			std::stringstream css;
1638 			css << support
1639 				<< decls.str() <<
1640 				"layout(local_size_x = 1, local_size_y = 1) in;\n"
1641 				"void main()\n"
1642 				"{\n"
1643 				<< code <<
1644 				"  imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1645 				"}\n";
1646 
1647 			programCollection.glslSources.add("test") << glu::ComputeSource(css.str())
1648 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1649 			break;
1650 		}
1651 	case STAGE_RAYGEN:
1652 		{
1653 			std::stringstream css;
1654 			css << support
1655 				<< decls.str() <<
1656 				"void main()\n"
1657 				"{\n"
1658 				<< code <<
1659 				"  imageStore(image0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1660 				"}\n";
1661 
1662 			programCollection.glslSources.add("test") << glu::RaygenSource(css.str())
1663 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, shaderBuildOptions, true);
1664 			break;
1665 		}
1666 	case STAGE_VERTEX:
1667 		{
1668 			std::stringstream vss;
1669 			vss << support
1670 				<< decls.str() <<
1671 				"void main()\n"
1672 				"{\n"
1673 				<< code <<
1674 				"  imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1675 				"  gl_PointSize = 1.0f;\n"
1676 				"  gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1677 				"}\n";
1678 
1679 			programCollection.glslSources.add("test") << glu::VertexSource(vss.str())
1680 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1681 			break;
1682 		}
1683 	case STAGE_FRAGMENT:
1684 		{
1685 			std::stringstream vss;
1686 			vss <<
1687 				"#version 450 core\n"
1688 				"void main()\n"
1689 				"{\n"
1690 				// full-viewport quad
1691 				"  gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1692 				"}\n";
1693 
1694 			programCollection.glslSources.add("vert") << glu::VertexSource(vss.str())
1695 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1696 
1697 			std::stringstream fss;
1698 			fss << support
1699 				<< decls.str() <<
1700 				"void main()\n"
1701 				"{\n"
1702 				<< code <<
1703 				"  imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1704 				"}\n";
1705 
1706 			programCollection.glslSources.add("test") << glu::FragmentSource(fss.str())
1707 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1708 			break;
1709 		}
1710 	}
1711 
1712 	// The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1713 	if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1714 	{
1715 		const std::string	ivecCords = (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1716 		std::stringstream	fillShader;
1717 
1718 		fillShader <<
1719 			"#version 450\n"
1720 			<< SupportR64
1721 			<< "\n"
1722 			"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1723 			"layout (" + getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1724 			<< string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image" << imageDim << +" u_resultImage;\n"
1725 			"\n"
1726 			"layout(std430, binding = 1) buffer inputBuffer\n"
1727 			"{\n"
1728 			"  int" << (is64BitFormat ? "64_t" : "") << " data[];\n"
1729 			"} inBuffer;\n"
1730 			"\n"
1731 			"void main(void)\n"
1732 			"{\n"
1733 			"  int gx = int(gl_GlobalInvocationID.x);\n"
1734 			"  int gy = int(gl_GlobalInvocationID.y);\n"
1735 			"  int gz = int(gl_GlobalInvocationID.z);\n"
1736 			"  uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1737 
1738 			for(int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1739 			{
1740 				fillShader << "  imageStore(u_resultImage, " << ivecCords << ", " << ndx << ", i64vec4(inBuffer.data[index]));\n";
1741 			}
1742 
1743 			fillShader << "}\n";
1744 
1745 		programCollection.glslSources.add("fillShader") << glu::ComputeSource(fillShader.str())
1746 			<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1747 	}
1748 
1749 }
1750 
imageViewTypeToImageType(VkImageViewType type)1751 VkImageType imageViewTypeToImageType (VkImageViewType type)
1752 {
1753 	switch (type)
1754 	{
1755 		case VK_IMAGE_VIEW_TYPE_1D:
1756 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:		return VK_IMAGE_TYPE_1D;
1757 		case VK_IMAGE_VIEW_TYPE_2D:
1758 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1759 		case VK_IMAGE_VIEW_TYPE_CUBE:
1760 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:		return VK_IMAGE_TYPE_2D;
1761 		case VK_IMAGE_VIEW_TYPE_3D:				return VK_IMAGE_TYPE_3D;
1762 		default:
1763 			DE_ASSERT(false);
1764 	}
1765 
1766 	return VK_IMAGE_TYPE_2D;
1767 }
1768 
createInstance(Context& context) const1769 TestInstance* RobustnessExtsTestCase::createInstance (Context& context) const
1770 {
1771 	return new RobustnessExtsTestInstance(context, m_data);
1772 }
1773 
iterate(void)1774 tcu::TestStatus RobustnessExtsTestInstance::iterate (void)
1775 {
1776 	const VkInstance			instance			= m_context.getInstance();
1777 	const InstanceInterface&	vki					= m_context.getInstanceInterface();
1778 	const VkDevice				device				= getLogicalDevice(m_context, m_data.testRobustness2, m_data.needsPipelineRobustness());
1779 	const vk::DeviceInterface&	vk					= getDeviceInterface(m_context, m_data.testRobustness2, m_data.needsPipelineRobustness());
1780 	const VkPhysicalDevice		physicalDevice		= chooseDevice(vki, instance, m_context.getTestContext().getCommandLine());
1781 	SimpleAllocator				allocator			(vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1782 
1783 	Layout layout;
1784 	generateLayout(layout, m_data);
1785 
1786 	// Get needed properties.
1787 	VkPhysicalDeviceProperties2 properties = initVulkanStructure();
1788 
1789 #ifndef CTS_USES_VULKANSC
1790 	VkPhysicalDeviceRayTracingPipelinePropertiesKHR rayTracingProperties = initVulkanStructure();
1791 #endif
1792 
1793 	VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties = initVulkanStructure();
1794 
1795 #ifndef CTS_USES_VULKANSC
1796 	if (m_context.isDeviceFunctionalitySupported("VK_KHR_ray_tracing_pipeline"))
1797 	{
1798 		rayTracingProperties.pNext = properties.pNext;
1799 		properties.pNext = &rayTracingProperties;
1800 	}
1801 #endif
1802 
1803 	if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1804 	{
1805 		robustness2Properties.pNext = properties.pNext;
1806 		properties.pNext = &robustness2Properties;
1807 	}
1808 
1809 	vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1810 
1811 	if (m_data.testRobustness2)
1812 	{
1813 		if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
1814 			robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
1815 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
1816 
1817 		if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
1818 			robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
1819 			!deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
1820 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
1821 	}
1822 
1823 	VkPipelineBindPoint bindPoint;
1824 
1825 	switch (m_data.stage)
1826 	{
1827 	case STAGE_COMPUTE:
1828 		bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1829 		break;
1830 #ifndef CTS_USES_VULKANSC
1831 	case STAGE_RAYGEN:
1832 		bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR;
1833 		break;
1834 #endif
1835 	default:
1836 		bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1837 		break;
1838 	}
1839 
1840 	Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
1841 	Move<vk::VkDescriptorPool>		descriptorPool;
1842 	Move<vk::VkDescriptorSet>		descriptorSet;
1843 
1844 	int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
1845 	int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
1846 
1847 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
1848 
1849 	VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1850 
1851 #ifndef CTS_USES_VULKANSC
1852 	VkDescriptorSetLayoutCreateFlags layoutCreateFlags = m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
1853 #else
1854 	VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
1855 #endif
1856 
1857 	// Create a layout and allocate a descriptor set for it.
1858 
1859 	const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1860 	{
1861 		vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1862 		DE_NULL,
1863 
1864 		layoutCreateFlags,
1865 		(deUint32)bindings.size(),
1866 		bindings.empty() ? DE_NULL : bindings.data()
1867 	};
1868 
1869 	descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1870 
1871 	vk::DescriptorPoolBuilder poolBuilder;
1872 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
1873 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
1874 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
1875 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
1876 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
1877 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
1878 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
1879 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
1880 
1881 	descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
1882 
1883 	const void *pNext = DE_NULL;
1884 
1885 	if (!m_data.pushDescriptor)
1886 		descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
1887 
1888 	BufferWithMemoryPtr buffer;
1889 
1890 	deUint8 *bufferPtr = DE_NULL;
1891 	if (!m_data.nullDescriptor)
1892 	{
1893 		// Create a buffer to hold data for all descriptors.
1894 		VkDeviceSize	size = de::max(
1895 			(VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1),
1896 			(VkDeviceSize)256);
1897 
1898 		VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1899 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1900 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1901 		{
1902 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
1903 			usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
1904 		}
1905 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1906 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1907 		{
1908 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
1909 			usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1910 		}
1911 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1912 		{
1913 			usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1914 		}
1915 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
1916 		{
1917 			usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
1918 		}
1919 		else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1920 		{
1921 			size = m_data.bufferLen;
1922 		}
1923 
1924 		buffer = BufferWithMemoryPtr(new BufferWithMemory(
1925 			vk, device, allocator, makeBufferCreateInfo(size, usage), MemoryRequirement::HostVisible));
1926 		bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
1927 
1928 		deMemset(bufferPtr, 0x3f, (size_t)size);
1929 
1930 		deMemset(bufferPtr, 0, m_data.bufferLen);
1931 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1932 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1933 		{
1934 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
1935 		}
1936 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1937 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1938 		{
1939 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
1940 		}
1941 	}
1942 
1943 	const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1944 
1945 	Move<VkDescriptorSetLayout>		descriptorSetLayoutR64;
1946 	Move<VkDescriptorPool>			descriptorPoolR64;
1947 	Move<VkDescriptorSet>			descriptorSetFillImage;
1948 	Move<VkShaderModule>			shaderModuleFillImage;
1949 	Move<VkPipelineLayout>			pipelineLayoutFillImage;
1950 	Move<VkPipeline>				pipelineFillImage;
1951 
1952 	Move<VkCommandPool>				cmdPool		= createCommandPool(vk, device, 0, queueFamilyIndex);
1953 	Move<VkCommandBuffer>			cmdBuffer	= allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1954 	VkQueue							queue;
1955 
1956 	vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
1957 
1958 	const VkImageSubresourceRange	barrierRange				=
1959 	{
1960 		VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
1961 		0u,							// deUint32				baseMipLevel;
1962 		VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
1963 		0u,							// deUint32				baseArrayLayer;
1964 		VK_REMAINING_ARRAY_LAYERS	// deUint32				layerCount;
1965 	};
1966 
1967 	VkImageMemoryBarrier			preImageBarrier				=
1968 	{
1969 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
1970 		DE_NULL,											// const void*			pNext
1971 		0u,													// VkAccessFlags		srcAccessMask
1972 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
1973 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
1974 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,				// VkImageLayout		newLayout
1975 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
1976 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
1977 		DE_NULL,											// VkImage				image
1978 		barrierRange,										// VkImageSubresourceRange	subresourceRange;
1979 	};
1980 
1981 	VkImageMemoryBarrier			postImageBarrier			=
1982 	{
1983 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType			sType;
1984 		DE_NULL,									// const void*				pNext;
1985 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags			srcAccessMask;
1986 		VK_ACCESS_SHADER_READ_BIT,					// VkAccessFlags			dstAccessMask;
1987 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,		// VkImageLayout			oldLayout;
1988 		VK_IMAGE_LAYOUT_GENERAL,					// VkImageLayout			newLayout;
1989 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					srcQueueFamilyIndex;
1990 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					dstQueueFamilyIndex;
1991 		DE_NULL,									// VkImage					image;
1992 		barrierRange,								// VkImageSubresourceRange	subresourceRange;
1993 	};
1994 
1995 	vk::VkClearColorValue			clearValue;
1996 	clearValue.uint32[0] = 0u;
1997 	clearValue.uint32[1] = 0u;
1998 	clearValue.uint32[2] = 0u;
1999 	clearValue.uint32[3] = 0u;
2000 
2001 	beginCommandBuffer(vk, *cmdBuffer, 0u);
2002 
2003 	typedef vk::Unique<vk::VkBufferView>		BufferViewHandleUp;
2004 	typedef de::SharedPtr<BufferViewHandleUp>	BufferViewHandleSp;
2005 	typedef de::SharedPtr<ImageWithMemory>		ImageWithMemorySp;
2006 	typedef de::SharedPtr<Unique<VkImageView> >	VkImageViewSp;
2007 
2008 	vector<BufferViewHandleSp>					bufferViews(1);
2009 
2010 	VkImageCreateFlags mutableFormatFlag = 0;
2011 	// The 64-bit image tests use a view format which differs from the image.
2012 	if (formatIsR64(m_data.format))
2013 		mutableFormatFlag = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
2014 	VkImageCreateFlags imageCreateFlags = mutableFormatFlag;
2015 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2016 		imageCreateFlags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
2017 
2018 	const bool featureSampledImage = ((getPhysicalDeviceFormatProperties(vki,
2019 										physicalDevice,
2020 										m_data.format).optimalTilingFeatures &
2021 										VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
2022 
2023 	const VkImageUsageFlags usageSampledImage = (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
2024 
2025 	const VkImageCreateInfo			outputImageCreateInfo			=
2026 	{
2027 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
2028 		DE_NULL,								// const void*				pNext;
2029 		mutableFormatFlag,						// VkImageCreateFlags		flags;
2030 		VK_IMAGE_TYPE_2D,						// VkImageType				imageType;
2031 		m_data.format,							// VkFormat					format;
2032 		{
2033 			DIM,								// deUint32	width;
2034 			DIM,								// deUint32	height;
2035 			1u									// deUint32	depth;
2036 		},										// VkExtent3D				extent;
2037 		1u,										// deUint32					mipLevels;
2038 		1u,										// deUint32					arrayLayers;
2039 		VK_SAMPLE_COUNT_1_BIT,					// VkSampleCountFlagBits	samples;
2040 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
2041 		VK_IMAGE_USAGE_STORAGE_BIT
2042 		| usageSampledImage
2043 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2044 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
2045 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
2046 		0u,										// deUint32					queueFamilyIndexCount;
2047 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
2048 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
2049 	};
2050 
2051 	deUint32 width = m_data.imageDim[0];
2052 	deUint32 height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] : 1;
2053 	deUint32 depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2054 	deUint32 layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
2055 						m_data.viewType != VK_IMAGE_VIEW_TYPE_1D &&
2056 						m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
2057 						m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2058 
2059 	const VkImageUsageFlags usageImage = (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT : (VkImageUsageFlagBits)0);
2060 
2061 	const VkImageCreateInfo			imageCreateInfo			=
2062 	{
2063 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
2064 		DE_NULL,								// const void*				pNext;
2065 		imageCreateFlags,						// VkImageCreateFlags		flags;
2066 		imageViewTypeToImageType(m_data.viewType),	// VkImageType				imageType;
2067 		m_data.format,							// VkFormat					format;
2068 		{
2069 			width,								// deUint32	width;
2070 			height,								// deUint32	height;
2071 			depth								// deUint32	depth;
2072 		},										// VkExtent3D				extent;
2073 		1u,										// deUint32					mipLevels;
2074 		layers,									// deUint32					arrayLayers;
2075 		m_data.samples,							// VkSampleCountFlagBits	samples;
2076 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
2077 		usageImage
2078 		| usageSampledImage
2079 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2080 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
2081 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
2082 		0u,										// deUint32					queueFamilyIndexCount;
2083 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
2084 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
2085 	};
2086 
2087 	VkImageViewCreateInfo		imageViewCreateInfo		=
2088 	{
2089 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,	// VkStructureType			sType;
2090 		DE_NULL,									// const void*				pNext;
2091 		(VkImageViewCreateFlags)0u,					// VkImageViewCreateFlags	flags;
2092 		DE_NULL,									// VkImage					image;
2093 		VK_IMAGE_VIEW_TYPE_2D,						// VkImageViewType			viewType;
2094 		m_data.format,								// VkFormat					format;
2095 		{
2096 			VK_COMPONENT_SWIZZLE_IDENTITY,
2097 			VK_COMPONENT_SWIZZLE_IDENTITY,
2098 			VK_COMPONENT_SWIZZLE_IDENTITY,
2099 			VK_COMPONENT_SWIZZLE_IDENTITY
2100 		},											// VkComponentMapping		 components;
2101 		{
2102 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask;
2103 			0u,										// deUint32				baseMipLevel;
2104 			VK_REMAINING_MIP_LEVELS,				// deUint32				levelCount;
2105 			0u,										// deUint32				baseArrayLayer;
2106 			VK_REMAINING_ARRAY_LAYERS				// deUint32				layerCount;
2107 		}											// VkImageSubresourceRange	subresourceRange;
2108 	};
2109 
2110 	vector<ImageWithMemorySp> images(2);
2111 	vector<VkImageViewSp> imageViews(2);
2112 
2113 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2114 	{
2115 		deUint32 *ptr = (deUint32 *)bufferPtr;
2116 		deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2117 	}
2118 
2119 	BufferWithMemoryPtr				bufferImageR64;
2120 	BufferWithMemoryPtr				bufferOutputImageR64;
2121 	const VkDeviceSize				sizeOutputR64	= 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height * outputImageCreateInfo.extent.depth;
2122 	const VkDeviceSize				sizeOneLayers	= 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
2123 	const VkDeviceSize				sizeImageR64	= sizeOneLayers * layers;
2124 
2125 	if (formatIsR64(m_data.format))
2126 	{
2127 		bufferOutputImageR64 = BufferWithMemoryPtr(new BufferWithMemory(
2128 			vk, device, allocator,
2129 			makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
2130 			MemoryRequirement::HostVisible));
2131 
2132 		deUint64* bufferUint64Ptr = (deUint64 *)bufferOutputImageR64->getAllocation().getHostPtr();
2133 
2134 		for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
2135 		{
2136 			bufferUint64Ptr[ndx] = 0;
2137 		}
2138 		flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
2139 
2140 		bufferImageR64 = BufferWithMemoryPtr(new BufferWithMemory(
2141 			vk, device, allocator,
2142 			makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
2143 			MemoryRequirement::HostVisible));
2144 
2145 		for (deUint32 layerNdx = 0; layerNdx < layers; ++layerNdx)
2146 		{
2147 			bufferUint64Ptr = (deUint64 *)bufferImageR64->getAllocation().getHostPtr();
2148 			bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
2149 
2150 			for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
2151 			{
2152 				bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? layerNdx : 0);
2153 			}
2154 		}
2155 		flushAlloc(vk, device, bufferImageR64->getAllocation());
2156 	}
2157 
2158 	for (size_t b = 0; b < bindings.size(); ++b)
2159 	{
2160 		VkDescriptorSetLayoutBinding &binding = bindings[b];
2161 
2162 		if (binding.descriptorCount == 0)
2163 			continue;
2164 		if (b == 1 && m_data.nullDescriptor)
2165 			continue;
2166 
2167 		DE_ASSERT(binding.descriptorCount == 1);
2168 		switch (binding.descriptorType)
2169 		{
2170 		default: DE_ASSERT(0); // Fallthrough
2171 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2172 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2173 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2174 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2175 			{
2176 				deUint32 *ptr = (deUint32 *)bufferPtr;
2177 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2178 			}
2179 			break;
2180 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2181 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2182 			{
2183 				deUint32 *ptr = (deUint32 *)bufferPtr;
2184 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2185 
2186 				const vk::VkBufferViewCreateInfo viewCreateInfo =
2187 				{
2188 					vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2189 					DE_NULL,
2190 					(vk::VkBufferViewCreateFlags)0,
2191 					**buffer,								// buffer
2192 					m_data.format,							// format
2193 					(vk::VkDeviceSize)0,					// offset
2194 					(vk::VkDeviceSize)m_data.bufferLen		// range
2195 				};
2196 				vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2197 				bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2198 			}
2199 			break;
2200 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2201 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2202 			{
2203 				if (bindings.size() > 1 &&
2204 					bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2205 				{
2206 					if (m_data.format == VK_FORMAT_R64_SINT)
2207 						imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2208 
2209 					if (m_data.format == VK_FORMAT_R64_UINT)
2210 						imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2211 				}
2212 
2213 				if (b == 0)
2214 				{
2215 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2216 					imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2217 				}
2218 				else
2219 				{
2220 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2221 					imageViewCreateInfo.viewType = m_data.viewType;
2222 				}
2223 				imageViewCreateInfo.image = **images[b];
2224 				imageViews[b] = VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2225 
2226 				VkImage						img			= **images[b];
2227 				const VkBuffer&				bufferR64= ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2228 				const VkImageCreateInfo&	imageInfo	= ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2229 				const deUint32				clearLayers	= b == 0 ? 1 : layers;
2230 
2231 				if (!formatIsR64(m_data.format))
2232 				{
2233 					preImageBarrier.image	= img;
2234 					if (b == 1)
2235 					{
2236 						if (formatIsFloat(m_data.format))
2237 						{
2238 							deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2239 						}
2240 						else if (formatIsSignedInt(m_data.format))
2241 						{
2242 							deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2243 						}
2244 						else
2245 						{
2246 							deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2247 						}
2248 					}
2249 					postImageBarrier.image	= img;
2250 
2251 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2252 
2253 					for (unsigned int i = 0; i < clearLayers; ++i)
2254 					{
2255 						const VkImageSubresourceRange	clearRange				=
2256 						{
2257 							VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
2258 							0u,							// deUint32				baseMipLevel;
2259 							VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
2260 							i,							// deUint32				baseArrayLayer;
2261 							1							// deUint32				layerCount;
2262 						};
2263 
2264 						vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2265 
2266 						// Use same data for all faces for cube(array), otherwise make value a function of the layer
2267 						if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2268 						{
2269 							if (formatIsFloat(m_data.format))
2270 								clearValue.float32[0] += 1;
2271 							else if (formatIsSignedInt(m_data.format))
2272 								clearValue.int32[0] += 1;
2273 							else
2274 								clearValue.uint32[0] += 1;
2275 						}
2276 					}
2277 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2278 				}
2279 				else
2280 				{
2281 					if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2282 					{
2283 						const VkImageSubresourceRange	subresourceRange	= makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2284 						const VkImageMemoryBarrier		imageBarrierPre		= makeImageMemoryBarrier(0,
2285 																				VK_ACCESS_SHADER_WRITE_BIT,
2286 																				VK_IMAGE_LAYOUT_UNDEFINED,
2287 																				VK_IMAGE_LAYOUT_GENERAL,
2288 																				img,
2289 																				subresourceRange);
2290 						const VkImageMemoryBarrier		imageBarrierPost	= makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
2291 																				VK_ACCESS_SHADER_READ_BIT,
2292 																				VK_IMAGE_LAYOUT_GENERAL,
2293 																				VK_IMAGE_LAYOUT_GENERAL,
2294 																				img,
2295 																				subresourceRange);
2296 
2297 						descriptorSetLayoutR64 =
2298 							DescriptorSetLayoutBuilder()
2299 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2300 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2301 							.build(vk, device);
2302 
2303 						descriptorPoolR64 =
2304 							DescriptorPoolBuilder()
2305 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2306 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,1)
2307 							.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2308 
2309 						descriptorSetFillImage = makeDescriptorSet(vk,
2310 							device,
2311 							*descriptorPoolR64,
2312 							*descriptorSetLayoutR64);
2313 
2314 						shaderModuleFillImage	= createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2315 						pipelineLayoutFillImage	= makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2316 						pipelineFillImage		= makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2317 
2318 						const VkDescriptorImageInfo		descResultImageInfo		= makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2319 						const VkDescriptorBufferInfo	descResultBufferInfo	= makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2320 
2321 						DescriptorSetUpdateBuilder()
2322 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2323 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2324 							.update(vk, device);
2325 
2326 						vk.cmdPipelineBarrier(*cmdBuffer,
2327 							VK_PIPELINE_STAGE_HOST_BIT,
2328 							VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2329 							(VkDependencyFlags)0,
2330 							0, (const VkMemoryBarrier*)DE_NULL,
2331 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2332 							1, &imageBarrierPre);
2333 
2334 						vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2335 						vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u, 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2336 
2337 						vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2338 
2339 						vk.cmdPipelineBarrier(*cmdBuffer,
2340 									VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2341 									VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2342 									(VkDependencyFlags)0,
2343 									0, (const VkMemoryBarrier*)DE_NULL,
2344 									0, (const VkBufferMemoryBarrier*)DE_NULL,
2345 									1, &imageBarrierPost);
2346 					}
2347 					else
2348 					{
2349 						VkDeviceSize					size			= ((b == 0) ? sizeOutputR64 : sizeImageR64);
2350 						const vector<VkBufferImageCopy>	bufferImageCopy	(1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, clearLayers)));
2351 
2352 						copyBufferToImage(vk,
2353 							*cmdBuffer,
2354 							bufferR64,
2355 							size,
2356 							bufferImageCopy,
2357 							VK_IMAGE_ASPECT_COLOR_BIT,
2358 							1,
2359 							clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2360 					}
2361 				}
2362 			}
2363 			break;
2364 		}
2365 	}
2366 
2367 	const VkSamplerCreateInfo	samplerParams	=
2368 	{
2369 		VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,		// VkStructureType			sType;
2370 		DE_NULL,									// const void*				pNext;
2371 		0,											// VkSamplerCreateFlags		flags;
2372 		VK_FILTER_NEAREST,							// VkFilter					magFilter:
2373 		VK_FILTER_NEAREST,							// VkFilter					minFilter;
2374 		VK_SAMPLER_MIPMAP_MODE_NEAREST,				// VkSamplerMipmapMode		mipmapMode;
2375 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeU;
2376 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeV;
2377 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeW;
2378 		0.0f,										// float					mipLodBias;
2379 		VK_FALSE,									// VkBool32					anistoropyEnable;
2380 		1.0f,										// float					maxAnisotropy;
2381 		VK_FALSE,									// VkBool32					compareEnable;
2382 		VK_COMPARE_OP_ALWAYS,						// VkCompareOp				compareOp;
2383 		0.0f,										// float					minLod;
2384 		0.0f,										// float					maxLod;
2385 		formatIsFloat(m_data.format) ?
2386 			VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2387 			VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,	// VkBorderColor			borderColor;
2388 		VK_FALSE									// VkBool32					unnormalizedCoordinates;
2389 	};
2390 
2391 	Move<VkSampler>				sampler			(createSampler(vk, device, &samplerParams));
2392 
2393 	// Flush modified memory.
2394 	if (!m_data.nullDescriptor)
2395 		flushAlloc(vk, device, buffer->getAllocation());
2396 
2397 	const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2398 	{
2399 		VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,				// sType
2400 		DE_NULL,													// pNext
2401 		(VkPipelineLayoutCreateFlags)0,
2402 		1u,															// setLayoutCount
2403 		&descriptorSetLayout.get(),									// pSetLayouts
2404 		0u,															// pushConstantRangeCount
2405 		DE_NULL,													// pPushConstantRanges
2406 	};
2407 
2408 	Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2409 
2410 	BufferWithMemoryPtr copyBuffer;
2411 	copyBuffer = BufferWithMemoryPtr(new BufferWithMemory(
2412 		vk, device, allocator, makeBufferCreateInfo(DIM*DIM*16, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2413 
2414 	{
2415 		vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2416 		vector<VkDescriptorImageInfo> imageInfoVec(2);
2417 		vector<VkBufferView> bufferViewVec(2);
2418 		vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2419 		int vecIndex = 0;
2420 		int numDynamic = 0;
2421 
2422 #ifndef CTS_USES_VULKANSC
2423 		vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,
2424 												bufTemplateEntriesBefore,
2425 												texelBufTemplateEntriesBefore;
2426 #endif
2427 
2428 		for (size_t b = 0; b < bindings.size(); ++b)
2429 		{
2430 			VkDescriptorSetLayoutBinding &binding = bindings[b];
2431 			// Construct the declaration for the binding
2432 			if (binding.descriptorCount > 0)
2433 			{
2434 				// output image
2435 				switch (binding.descriptorType)
2436 				{
2437 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2438 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2439 					// Output image.
2440 					if (b == 1 && m_data.nullDescriptor)
2441 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2442 					else
2443 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2444 					break;
2445 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2446 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2447 					if (b == 1 && m_data.nullDescriptor)
2448 						bufferViewVec[vecIndex] = DE_NULL;
2449 					else
2450 						bufferViewVec[vecIndex] = **bufferViews[0];
2451 					break;
2452 				default:
2453 					// Other descriptor types.
2454 					if (b == 1 && m_data.nullDescriptor)
2455 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2456 					else
2457 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2458 					break;
2459 				}
2460 
2461 				VkWriteDescriptorSet w =
2462 				{
2463 					VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,				// sType
2464 					DE_NULL,											// pNext
2465 					m_data.pushDescriptor ? DE_NULL : *descriptorSet,	// dstSet
2466 					(deUint32)b,										// binding
2467 					0,													// dstArrayElement
2468 					1u,													// descriptorCount
2469 					binding.descriptorType,								// descriptorType
2470 					&imageInfoVec[vecIndex],							// pImageInfo
2471 					&bufferInfoVec[vecIndex],							// pBufferInfo
2472 					&bufferViewVec[vecIndex],							// pTexelBufferView
2473 				};
2474 
2475 #ifndef CTS_USES_VULKANSC
2476 				VkDescriptorUpdateTemplateEntry templateEntry =
2477 				{
2478 					(deUint32)b,				// uint32_t				dstBinding;
2479 					0,							// uint32_t				dstArrayElement;
2480 					1u,							// uint32_t				descriptorCount;
2481 					binding.descriptorType,		// VkDescriptorType		descriptorType;
2482 					0,							// size_t				offset;
2483 					0,							// size_t				stride;
2484 				};
2485 
2486 				switch (binding.descriptorType)
2487 				{
2488 				default: DE_ASSERT(0); // Fallthrough
2489 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2490 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2491 					templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2492 					imgTemplateEntriesBefore.push_back(templateEntry);
2493 					break;
2494 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2495 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2496 					templateEntry.offset = vecIndex * sizeof(VkBufferView);
2497 					texelBufTemplateEntriesBefore.push_back(templateEntry);
2498 					break;
2499 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2500 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2501 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2502 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2503 					templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2504 					bufTemplateEntriesBefore.push_back(templateEntry);
2505 					break;
2506 				}
2507 #endif
2508 
2509 				vecIndex++;
2510 
2511 				writesBeforeBindVec.push_back(w);
2512 
2513 				// Count the number of dynamic descriptors in this set.
2514 				if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2515 					binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2516 				{
2517 					numDynamic++;
2518 				}
2519 			}
2520 		}
2521 
2522 		// Make zeros have at least one element so &zeros[0] works
2523 		vector<deUint32> zeros(de::max(1,numDynamic));
2524 		deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2525 
2526 		// Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2527 		if (m_data.useTemplate)
2528 		{
2529 #ifndef CTS_USES_VULKANSC
2530 			VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2531 			{
2532 				VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,	// VkStructureType							sType;
2533 				NULL,														// void*									pNext;
2534 				0,															// VkDescriptorUpdateTemplateCreateFlags	flags;
2535 				0,															// uint32_t									descriptorUpdateEntryCount;
2536 				DE_NULL,													// uint32_t									descriptorUpdateEntryCount;
2537 				m_data.pushDescriptor ?
2538 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2539 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,		// VkDescriptorUpdateTemplateType			templateType;
2540 				descriptorSetLayout.get(),									// VkDescriptorSetLayout					descriptorSetLayout;
2541 				bindPoint,													// VkPipelineBindPoint						pipelineBindPoint;
2542 				*pipelineLayout,											// VkPipelineLayout							pipelineLayout;
2543 				0,															// uint32_t									set;
2544 			};
2545 
2546 			void *templateVectorData[] =
2547 			{
2548 				imageInfoVec.data(),
2549 				bufferInfoVec.data(),
2550 				bufferViewVec.data(),
2551 			};
2552 
2553 			vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2554 			{
2555 				&imgTemplateEntriesBefore,
2556 				&bufTemplateEntriesBefore,
2557 				&texelBufTemplateEntriesBefore,
2558 			};
2559 
2560 			if (m_data.pushDescriptor)
2561 			{
2562 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2563 				{
2564 					if (templateVectorsBefore[i]->size())
2565 					{
2566 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2567 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2568 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2569 						vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout, 0, templateVectorData[i]);
2570 					}
2571 				}
2572 			}
2573 			else
2574 			{
2575 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2576 				{
2577 					if (templateVectorsBefore[i]->size())
2578 					{
2579 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2580 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2581 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2582 						vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate, templateVectorData[i]);
2583 					}
2584 				}
2585 
2586 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2587 			}
2588 #endif
2589 		}
2590 		else
2591 		{
2592 			if (m_data.pushDescriptor)
2593 			{
2594 #ifndef CTS_USES_VULKANSC
2595 				if (writesBeforeBindVec.size())
2596 				{
2597 					vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2598 				}
2599 #endif
2600 			}
2601 			else
2602 			{
2603 				if (writesBeforeBindVec.size())
2604 				{
2605 					vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2606 				}
2607 
2608 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2609 			}
2610 		}
2611 	}
2612 
2613 #ifndef CTS_USES_VULKANSC
2614 	// For graphics pipeline library cases.
2615 	Move<VkPipeline> vertexInputLib;
2616 	Move<VkPipeline> preRasterShaderLib;
2617 	Move<VkPipeline> fragShaderLib;
2618 	Move<VkPipeline> fragOutputLib;
2619 #endif // CTS_USES_VULKANSC
2620 
2621 	Move<VkPipeline> pipeline;
2622 	Move<VkRenderPass> renderPass;
2623 	Move<VkFramebuffer> framebuffer;
2624 
2625 #ifndef CTS_USES_VULKANSC
2626 	BufferWithMemoryPtr				sbtBuffer;
2627 	const auto						sbtFlags		= (VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
2628 	VkStridedDeviceAddressRegionKHR	rgenSBTRegion	= makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2629 	VkStridedDeviceAddressRegionKHR	missSBTRegion	= makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2630 	VkStridedDeviceAddressRegionKHR	hitSBTRegion	= makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2631 	VkStridedDeviceAddressRegionKHR	callSBTRegion	= makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2632 	const auto						sgHandleSize	= rayTracingProperties.shaderGroupHandleSize;
2633 #endif // CTS_USES_VULKANSC
2634 
2635 	if (m_data.stage == STAGE_COMPUTE)
2636 	{
2637 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2638 
2639 		const VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
2640 		{
2641 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	// VkStructureType						sType;
2642 			nullptr,												// const void*							pNext;
2643 			static_cast<VkPipelineShaderStageCreateFlags>(0u),		// VkPipelineShaderStageCreateFlags		flags;
2644 			VK_SHADER_STAGE_COMPUTE_BIT,							// VkShaderStageFlagBits				stage;
2645 			*shader,												// VkShaderModule						module;
2646 			"main",													// const char*							pName;
2647 			nullptr,												// const VkSpecializationInfo*			pSpecializationInfo;
2648 		};
2649 
2650 		VkComputePipelineCreateInfo pipelineCreateInfo =
2651 		{
2652 			VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,			// VkStructureType					sType;
2653 			nullptr,												// const void*						pNext;
2654 			static_cast<VkPipelineCreateFlags>(0u),					// VkPipelineCreateFlags			flags;
2655 			pipelineShaderStageParams,								// VkPipelineShaderStageCreateInfo	stage;
2656 			*pipelineLayout,										// VkPipelineLayout					layout;
2657 			DE_NULL,												// VkPipeline						basePipelineHandle;
2658 			0,														// deInt32							basePipelineIndex;
2659 		};
2660 
2661 #ifndef CTS_USES_VULKANSC
2662 		VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2663 		if (m_data.needsPipelineRobustness())
2664 		{
2665 			pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2666 			pipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2667 		}
2668 #endif
2669 
2670 		pipeline = createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo);
2671 
2672 	}
2673 #ifndef CTS_USES_VULKANSC
2674 	else if (m_data.stage == STAGE_RAYGEN)
2675 	{
2676 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2677 
2678 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo =
2679 		{
2680 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2681 			nullptr,
2682 			0u,															// flags
2683 			VK_SHADER_STAGE_RAYGEN_BIT_KHR,								// stage
2684 			*shader,													// shader
2685 			"main",
2686 			nullptr,													// pSpecializationInfo
2687 		};
2688 
2689 		VkRayTracingShaderGroupCreateInfoKHR group =
2690 		{
2691 			VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR,
2692 			nullptr,
2693 			VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR,			// type
2694 			0,														// generalShader
2695 			VK_SHADER_UNUSED_KHR,									// closestHitShader
2696 			VK_SHADER_UNUSED_KHR,									// anyHitShader
2697 			VK_SHADER_UNUSED_KHR,									// intersectionShader
2698 			nullptr,												// pShaderGroupCaptureReplayHandle
2699 		};
2700 
2701 		VkRayTracingPipelineCreateInfoKHR pipelineCreateInfo = {
2702 			VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR,	// sType
2703 			nullptr,												// pNext
2704 			0u,														// flags
2705 			1u,														// stageCount
2706 			&shaderCreateInfo,										// pStages
2707 			1u,														// groupCount
2708 			&group,													// pGroups
2709 			0,														// maxRecursionDepth
2710 			nullptr,												// pLibraryInfo
2711 			nullptr,												// pLibraryInterface
2712 			nullptr,												// pDynamicState
2713 			*pipelineLayout,										// layout
2714 			(vk::VkPipeline)0,										// basePipelineHandle
2715 			0u,														// basePipelineIndex
2716 		};
2717 
2718 		VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2719 		if (m_data.needsPipelineRobustness())
2720 		{
2721 			pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2722 			pipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2723 		}
2724 
2725 		pipeline = createRayTracingPipelineKHR(vk, device, VK_NULL_HANDLE, VK_NULL_HANDLE, &pipelineCreateInfo);
2726 
2727 		sbtBuffer = BufferWithMemoryPtr(new BufferWithMemory(
2728 			vk, device, allocator, makeBufferCreateInfo(sgHandleSize, sbtFlags), (MemoryRequirement::HostVisible | MemoryRequirement::DeviceAddress)));
2729 
2730 		deUint32 *ptr = (deUint32 *)sbtBuffer->getAllocation().getHostPtr();
2731 		invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2732 
2733 		vk.getRayTracingShaderGroupHandlesKHR(device, *pipeline, 0, 1, sgHandleSize, ptr);
2734 
2735 		const VkBufferDeviceAddressInfo deviceAddressInfo
2736 		{
2737 			VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,		// VkStructureType    sType
2738 			nullptr,											// const void*        pNext
2739 			sbtBuffer->get()									// VkBuffer           buffer;
2740 		};
2741 		const auto sbtAddress	= vk.getBufferDeviceAddress(device, &deviceAddressInfo);
2742 		rgenSBTRegion			= makeStridedDeviceAddressRegionKHR(sbtAddress, sgHandleSize, sgHandleSize);
2743 	}
2744 #endif
2745 	else
2746 	{
2747 		const VkSubpassDescription		subpassDesc				=
2748 		{
2749 			(VkSubpassDescriptionFlags)0,											// VkSubpassDescriptionFlags	flags
2750 			VK_PIPELINE_BIND_POINT_GRAPHICS,										// VkPipelineBindPoint			pipelineBindPoint
2751 			0u,																		// deUint32						inputAttachmentCount
2752 			DE_NULL,																// const VkAttachmentReference*	pInputAttachments
2753 			0u,																		// deUint32						colorAttachmentCount
2754 			DE_NULL,																// const VkAttachmentReference*	pColorAttachments
2755 			DE_NULL,																// const VkAttachmentReference*	pResolveAttachments
2756 			DE_NULL,																// const VkAttachmentReference*	pDepthStencilAttachment
2757 			0u,																		// deUint32						preserveAttachmentCount
2758 			DE_NULL																	// const deUint32*				pPreserveAttachments
2759 		};
2760 
2761 		const std::vector<VkSubpassDependency> subpassDependencies =
2762 		{
2763 			makeSubpassDependency
2764 			(
2765 				VK_SUBPASS_EXTERNAL,							// deUint32				srcSubpass
2766 				0,												// deUint32				dstSubpass
2767 				VK_PIPELINE_STAGE_TRANSFER_BIT,					// VkPipelineStageFlags	srcStageMask
2768 				VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,			// VkPipelineStageFlags	dstStageMask
2769 				VK_ACCESS_TRANSFER_WRITE_BIT,					// VkAccessFlags		srcAccessMask
2770 				VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT,	//	dstAccessMask
2771 				VK_DEPENDENCY_BY_REGION_BIT						// VkDependencyFlags	dependencyFlags
2772 			),
2773 			makeSubpassDependency
2774 			(
2775 				0,
2776 				0,
2777 				VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
2778 				((m_data.stage == STAGE_VERTEX) ? VK_PIPELINE_STAGE_VERTEX_SHADER_BIT : VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),
2779 				VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
2780 				VK_ACCESS_SHADER_WRITE_BIT,
2781 				0u
2782 			),
2783 		};
2784 
2785 		const VkRenderPassCreateInfo	renderPassParams		=
2786 		{
2787 			VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,				// VkStructureTypei					sType
2788 			DE_NULL,												// const void*						pNext
2789 			(VkRenderPassCreateFlags)0,								// VkRenderPassCreateFlags			flags
2790 			0u,														// deUint32							attachmentCount
2791 			DE_NULL,												// const VkAttachmentDescription*	pAttachments
2792 			1u,														// deUint32							subpassCount
2793 			&subpassDesc,											// const VkSubpassDescription*		pSubpasses
2794 			de::sizeU32(subpassDependencies),						// deUint32							dependencyCount
2795 			de::dataOrNull(subpassDependencies),					// const VkSubpassDependency*		pDependencies
2796 		};
2797 
2798 		renderPass = createRenderPass(vk, device, &renderPassParams);
2799 
2800 		const vk::VkFramebufferCreateInfo	framebufferParams	=
2801 		{
2802 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,			// sType
2803 			DE_NULL,												// pNext
2804 			(vk::VkFramebufferCreateFlags)0,
2805 			*renderPass,											// renderPass
2806 			0u,														// attachmentCount
2807 			DE_NULL,												// pAttachments
2808 			DIM,													// width
2809 			DIM,													// height
2810 			1u,														// layers
2811 		};
2812 
2813 		framebuffer = createFramebuffer(vk, device, &framebufferParams);
2814 
2815 		const VkVertexInputBindingDescription			vertexInputBindingDescription		=
2816 		{
2817 			0u,								// deUint32			 binding
2818 			(deUint32)formatBytes,			// deUint32			 stride
2819 			VK_VERTEX_INPUT_RATE_VERTEX,	// VkVertexInputRate	inputRate
2820 		};
2821 
2822 		const VkVertexInputAttributeDescription			vertexInputAttributeDescription		=
2823 		{
2824 			0u,								// deUint32	location
2825 			0u,								// deUint32	binding
2826 			m_data.format,					// VkFormat	format
2827 			0u								// deUint32	offset
2828 		};
2829 
2830 		deUint32 numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2831 
2832 		VkPipelineVertexInputStateCreateInfo		vertexInputStateCreateInfo		=
2833 		{
2834 			VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	// VkStructureType							sType;
2835 			DE_NULL,													// const void*								pNext;
2836 			(VkPipelineVertexInputStateCreateFlags)0,					// VkPipelineVertexInputStateCreateFlags	flags;
2837 			numAttribs,													// deUint32									vertexBindingDescriptionCount;
2838 			&vertexInputBindingDescription,								// const VkVertexInputBindingDescription*	pVertexBindingDescriptions;
2839 			numAttribs,													// deUint32									vertexAttributeDescriptionCount;
2840 			&vertexInputAttributeDescription							// const VkVertexInputAttributeDescription*	pVertexAttributeDescriptions;
2841 		};
2842 
2843 		const VkPipelineInputAssemblyStateCreateInfo	inputAssemblyStateCreateInfo	=
2844 		{
2845 			VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,	// VkStructureType							sType;
2846 			DE_NULL,														// const void*								pNext;
2847 			(VkPipelineInputAssemblyStateCreateFlags)0,						// VkPipelineInputAssemblyStateCreateFlags	flags;
2848 			(m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology						topology;
2849 			VK_FALSE														// VkBool32									primitiveRestartEnable;
2850 		};
2851 
2852 		const VkPipelineRasterizationStateCreateInfo	rasterizationStateCreateInfo	=
2853 		{
2854 			VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,		// VkStructureType							sType;
2855 			DE_NULL,														// const void*								pNext;
2856 			(VkPipelineRasterizationStateCreateFlags)0,						// VkPipelineRasterizationStateCreateFlags	flags;
2857 			VK_FALSE,														// VkBool32									depthClampEnable;
2858 			(m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE,			// VkBool32									rasterizerDiscardEnable;
2859 			VK_POLYGON_MODE_FILL,											// VkPolygonMode							polygonMode;
2860 			VK_CULL_MODE_NONE,												// VkCullModeFlags							cullMode;
2861 			VK_FRONT_FACE_CLOCKWISE,										// VkFrontFace								frontFace;
2862 			VK_FALSE,														// VkBool32									depthBiasEnable;
2863 			0.0f,															// float									depthBiasConstantFactor;
2864 			0.0f,															// float									depthBiasClamp;
2865 			0.0f,															// float									depthBiasSlopeFactor;
2866 			1.0f															// float									lineWidth;
2867 		};
2868 
2869 		const VkPipelineMultisampleStateCreateInfo		multisampleStateCreateInfo =
2870 		{
2871 			VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,	// VkStructureType							sType
2872 			DE_NULL,													// const void*								pNext
2873 			0u,															// VkPipelineMultisampleStateCreateFlags	flags
2874 			VK_SAMPLE_COUNT_1_BIT,										// VkSampleCountFlagBits					rasterizationSamples
2875 			VK_FALSE,													// VkBool32									sampleShadingEnable
2876 			1.0f,														// float									minSampleShading
2877 			DE_NULL,													// const VkSampleMask*						pSampleMask
2878 			VK_FALSE,													// VkBool32									alphaToCoverageEnable
2879 			VK_FALSE													// VkBool32									alphaToOneEnable
2880 		};
2881 
2882 		VkViewport viewport = makeViewport(DIM, DIM);
2883 		VkRect2D scissor = makeRect2D(DIM, DIM);
2884 
2885 		const VkPipelineViewportStateCreateInfo			viewportStateCreateInfo				=
2886 		{
2887 			VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,	// VkStructureType							sType
2888 			DE_NULL,												// const void*								pNext
2889 			(VkPipelineViewportStateCreateFlags)0,					// VkPipelineViewportStateCreateFlags		flags
2890 			1u,														// deUint32									viewportCount
2891 			&viewport,												// const VkViewport*						pViewports
2892 			1u,														// deUint32									scissorCount
2893 			&scissor												// const VkRect2D*							pScissors
2894 		};
2895 
2896 		Move<VkShaderModule> fs;
2897 		Move<VkShaderModule> vs;
2898 
2899 		deUint32 numStages;
2900 		if (m_data.stage == STAGE_VERTEX)
2901 		{
2902 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2903 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
2904 			numStages = 1u;
2905 		}
2906 		else
2907 		{
2908 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
2909 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2910 			numStages = 2u;
2911 		}
2912 
2913 		VkPipelineShaderStageCreateInfo	shaderCreateInfo[2] =
2914 		{
2915 			{
2916 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2917 				DE_NULL,
2918 				(VkPipelineShaderStageCreateFlags)0,
2919 				VK_SHADER_STAGE_VERTEX_BIT,									// stage
2920 				*vs,														// shader
2921 				"main",
2922 				DE_NULL,													// pSpecializationInfo
2923 			},
2924 			{
2925 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2926 				DE_NULL,
2927 				(VkPipelineShaderStageCreateFlags)0,
2928 				VK_SHADER_STAGE_FRAGMENT_BIT,								// stage
2929 				*fs,														// shader
2930 				"main",
2931 				DE_NULL,													// pSpecializationInfo
2932 			}
2933 		};
2934 
2935 		// Base structure with everything for the monolithic case.
2936 		VkGraphicsPipelineCreateInfo				graphicsPipelineCreateInfo		=
2937 		{
2938 			VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,	// VkStructureType									sType;
2939 			nullptr,											// const void*										pNext;
2940 			0u,													// VkPipelineCreateFlags							flags;
2941 			numStages,											// deUint32											stageCount;
2942 			&shaderCreateInfo[0],								// const VkPipelineShaderStageCreateInfo*			pStages;
2943 			&vertexInputStateCreateInfo,						// const VkPipelineVertexInputStateCreateInfo*		pVertexInputState;
2944 			&inputAssemblyStateCreateInfo,						// const VkPipelineInputAssemblyStateCreateInfo*	pInputAssemblyState;
2945 			nullptr,											// const VkPipelineTessellationStateCreateInfo*		pTessellationState;
2946 			&viewportStateCreateInfo,							// const VkPipelineViewportStateCreateInfo*			pViewportState;
2947 			&rasterizationStateCreateInfo,						// const VkPipelineRasterizationStateCreateInfo*	pRasterizationState;
2948 			&multisampleStateCreateInfo,						// const VkPipelineMultisampleStateCreateInfo*		pMultisampleState;
2949 			nullptr,											// const VkPipelineDepthStencilStateCreateInfo*		pDepthStencilState;
2950 			nullptr,											// const VkPipelineColorBlendStateCreateInfo*		pColorBlendState;
2951 			nullptr,											// const VkPipelineDynamicStateCreateInfo*			pDynamicState;
2952 			pipelineLayout.get(),								// VkPipelineLayout									layout;
2953 			renderPass.get(),									// VkRenderPass										renderPass;
2954 			0u,													// deUint32											subpass;
2955 			VK_NULL_HANDLE,										// VkPipeline										basePipelineHandle;
2956 			0													// int												basePipelineIndex;
2957 		};
2958 
2959 #ifndef CTS_USES_VULKANSC
2960 		VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2961 		if (m_data.needsPipelineRobustness())
2962 		{
2963 			pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2964 
2965 			if (m_data.pipelineRobustnessCase == PipelineRobustnessCase::ENABLED_MONOLITHIC)
2966 			{
2967 				if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2968 					graphicsPipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2969 				else if (m_data.stage == STAGE_VERTEX)
2970 					shaderCreateInfo[0].pNext = &pipelineRobustnessInfo;
2971 				else
2972 					shaderCreateInfo[1].pNext = &pipelineRobustnessInfo;
2973 			}
2974 			else // Fast or Optimized graphics pipeline libraries.
2975 			{
2976 				VkPipelineCreateFlags libCreationFlags	= VK_PIPELINE_CREATE_LIBRARY_BIT_KHR;
2977 				VkPipelineCreateFlags linkFlags			= 0u;
2978 
2979 				if (m_data.pipelineRobustnessCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
2980 				{
2981 					libCreationFlags	|= VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT;
2982 					linkFlags			|= VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT;
2983 				}
2984 
2985 				// Vertex input state library. When testing the robust vertex shaders, this will be merged with it in the same library.
2986 				if (m_data.stage != STAGE_VERTEX || m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2987 				{
2988 					VkGraphicsPipelineLibraryCreateInfoEXT	vertexInputLibInfo		= initVulkanStructure();
2989 					VkGraphicsPipelineCreateInfo			vertexInputPipelineInfo	= initVulkanStructure();
2990 
2991 					vertexInputPipelineInfo.pNext = &vertexInputLibInfo;
2992 					if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2993 						vertexInputLibInfo.pNext = &pipelineRobustnessInfo;
2994 
2995 					vertexInputLibInfo.flags						|= VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT;
2996 					vertexInputPipelineInfo.flags					= libCreationFlags;
2997 					vertexInputPipelineInfo.pVertexInputState		= graphicsPipelineCreateInfo.pVertexInputState;
2998 					vertexInputPipelineInfo.pInputAssemblyState		= graphicsPipelineCreateInfo.pInputAssemblyState;
2999 
3000 					vertexInputLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &vertexInputPipelineInfo);
3001 				}
3002 
3003 				// Pre-rasterization shader state library.
3004 				{
3005 					VkGraphicsPipelineLibraryCreateInfoEXT preRasterShaderLibInfo	= initVulkanStructure();
3006 					preRasterShaderLibInfo.flags									|= VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT;
3007 
3008 					VkGraphicsPipelineCreateInfo preRasterShaderPipelineInfo	= initVulkanStructure(&preRasterShaderLibInfo);
3009 					preRasterShaderPipelineInfo.flags							= libCreationFlags;
3010 					preRasterShaderPipelineInfo.layout							= graphicsPipelineCreateInfo.layout;
3011 					preRasterShaderPipelineInfo.pViewportState					= graphicsPipelineCreateInfo.pViewportState;
3012 					preRasterShaderPipelineInfo.pRasterizationState				= graphicsPipelineCreateInfo.pRasterizationState;
3013 					preRasterShaderPipelineInfo.pTessellationState				= graphicsPipelineCreateInfo.pTessellationState;
3014 					preRasterShaderPipelineInfo.renderPass						= graphicsPipelineCreateInfo.renderPass;
3015 					preRasterShaderPipelineInfo.subpass							= graphicsPipelineCreateInfo.subpass;
3016 
3017 					VkPipelineShaderStageCreateInfo vertexStageInfo = shaderCreateInfo[0];
3018 					if (m_data.stage == STAGE_VERTEX && m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
3019 					{
3020 						preRasterShaderPipelineInfo.pVertexInputState	= graphicsPipelineCreateInfo.pVertexInputState;
3021 						preRasterShaderPipelineInfo.pInputAssemblyState	= graphicsPipelineCreateInfo.pInputAssemblyState;
3022 						preRasterShaderLibInfo.flags					|= VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT;
3023 						vertexStageInfo.pNext							= &pipelineRobustnessInfo;
3024 					}
3025 
3026 					preRasterShaderPipelineInfo.stageCount	= 1u;
3027 					preRasterShaderPipelineInfo.pStages		= &vertexStageInfo;
3028 
3029 					preRasterShaderLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &preRasterShaderPipelineInfo);
3030 				}
3031 
3032 				// Fragment shader stage library.
3033 				{
3034 					VkGraphicsPipelineLibraryCreateInfoEXT fragShaderLibInfo	= initVulkanStructure();
3035 					fragShaderLibInfo.flags										|= VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT;
3036 
3037 					VkGraphicsPipelineCreateInfo fragShaderPipelineInfo	= initVulkanStructure(&fragShaderLibInfo);
3038 					fragShaderPipelineInfo.flags						= libCreationFlags;
3039 					fragShaderPipelineInfo.layout						= graphicsPipelineCreateInfo.layout;
3040 					fragShaderPipelineInfo.pMultisampleState			= graphicsPipelineCreateInfo.pMultisampleState;
3041 					fragShaderPipelineInfo.pDepthStencilState			= graphicsPipelineCreateInfo.pDepthStencilState;
3042 					fragShaderPipelineInfo.renderPass					= graphicsPipelineCreateInfo.renderPass;
3043 					fragShaderPipelineInfo.subpass						= graphicsPipelineCreateInfo.subpass;
3044 
3045 					std::vector<VkPipelineShaderStageCreateInfo> shaderStages;
3046 					if (m_data.stage != STAGE_VERTEX)
3047 					{
3048 						shaderStages.push_back(shaderCreateInfo[1]);
3049 						if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
3050 							shaderStages.back().pNext = &pipelineRobustnessInfo;
3051 					}
3052 
3053 					fragShaderPipelineInfo.stageCount	= de::sizeU32(shaderStages);
3054 					fragShaderPipelineInfo.pStages		= de::dataOrNull(shaderStages);
3055 
3056 					fragShaderLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &fragShaderPipelineInfo);
3057 				}
3058 
3059 				// Fragment output library.
3060 				{
3061 					VkGraphicsPipelineLibraryCreateInfoEXT fragOutputLibInfo	= initVulkanStructure();
3062 					fragOutputLibInfo.flags										|= VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT;
3063 
3064 					VkGraphicsPipelineCreateInfo fragOutputPipelineInfo	= initVulkanStructure(&fragOutputLibInfo);
3065 					fragOutputPipelineInfo.flags						= libCreationFlags;
3066 					fragOutputPipelineInfo.pColorBlendState				= graphicsPipelineCreateInfo.pColorBlendState;
3067 					fragOutputPipelineInfo.renderPass					= graphicsPipelineCreateInfo.renderPass;
3068 					fragOutputPipelineInfo.subpass						= graphicsPipelineCreateInfo.subpass;
3069 					fragOutputPipelineInfo.pMultisampleState			= graphicsPipelineCreateInfo.pMultisampleState;
3070 
3071 					fragOutputLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &fragOutputPipelineInfo);
3072 				}
3073 
3074 				// Linked pipeline.
3075 				std::vector<VkPipeline> libraryHandles;
3076 				if (*vertexInputLib		!= VK_NULL_HANDLE) libraryHandles.push_back(*vertexInputLib);
3077 				if (*preRasterShaderLib	!= VK_NULL_HANDLE) libraryHandles.push_back(*preRasterShaderLib);
3078 				if (*fragShaderLib		!= VK_NULL_HANDLE) libraryHandles.push_back(*fragShaderLib);
3079 				if (*fragOutputLib		!= VK_NULL_HANDLE) libraryHandles.push_back(*fragOutputLib);
3080 
3081 				VkPipelineLibraryCreateInfoKHR linkedPipelineLibraryInfo	= initVulkanStructure();
3082 				linkedPipelineLibraryInfo.libraryCount						= de::sizeU32(libraryHandles);
3083 				linkedPipelineLibraryInfo.pLibraries						= de::dataOrNull(libraryHandles);
3084 
3085 				VkGraphicsPipelineCreateInfo linkedPipelineInfo	= initVulkanStructure(&linkedPipelineLibraryInfo);
3086 				linkedPipelineInfo.flags						= linkFlags;
3087 				linkedPipelineInfo.layout						= graphicsPipelineCreateInfo.layout;
3088 
3089 				pipeline = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &linkedPipelineInfo);
3090 			}
3091 		}
3092 #endif
3093 		if (*pipeline == VK_NULL_HANDLE)
3094 			pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
3095 	}
3096 
3097 	const VkImageMemoryBarrier imageBarrier =
3098 	{
3099 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
3100 		DE_NULL,											// const void*			pNext
3101 		0u,													// VkAccessFlags		srcAccessMask
3102 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
3103 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
3104 		VK_IMAGE_LAYOUT_GENERAL,							// VkImageLayout		newLayout
3105 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
3106 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
3107 		**images[0],										// VkImage				image
3108 		{
3109 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask
3110 			0u,										// uint32_t				baseMipLevel
3111 			1u,										// uint32_t				mipLevels,
3112 			0u,										// uint32_t				baseArray
3113 			1u,										// uint32_t				arraySize
3114 		}
3115 	};
3116 
3117 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
3118 							(VkDependencyFlags)0,
3119 							0, (const VkMemoryBarrier*)DE_NULL,
3120 							0, (const VkBufferMemoryBarrier*)DE_NULL,
3121 							1, &imageBarrier);
3122 
3123 	vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
3124 
3125 	if (!formatIsR64(m_data.format))
3126 	{
3127 		VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
3128 		VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
3129 
3130 		vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
3131 	}
3132 	else
3133 	{
3134 		const vector<VkBufferImageCopy>	bufferImageCopy(1, makeBufferImageCopy(outputImageCreateInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
3135 		copyBufferToImage(vk,
3136 			*cmdBuffer,
3137 			*(*bufferOutputImageR64),
3138 			sizeOutputR64,
3139 			bufferImageCopy,
3140 			VK_IMAGE_ASPECT_COLOR_BIT,
3141 			1,
3142 			1, **images[0], VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
3143 	}
3144 
3145 	VkMemoryBarrier					memBarrier =
3146 	{
3147 		VK_STRUCTURE_TYPE_MEMORY_BARRIER,	// sType
3148 		DE_NULL,							// pNext
3149 		0u,									// srcAccessMask
3150 		0u,									// dstAccessMask
3151 	};
3152 
3153 	memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3154 	memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
3155 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
3156 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3157 
3158 	if (m_data.stage == STAGE_COMPUTE)
3159 	{
3160 		vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
3161 	}
3162 #ifndef CTS_USES_VULKANSC
3163 	else if (m_data.stage == STAGE_RAYGEN)
3164 	{
3165 		vk.cmdTraceRaysKHR(*cmdBuffer,
3166 			&rgenSBTRegion,
3167 			&missSBTRegion,
3168 			&hitSBTRegion,
3169 			&callSBTRegion,
3170 			DIM, DIM, 1u);
3171 	}
3172 #endif
3173 	else
3174 	{
3175 		beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
3176 						makeRect2D(DIM, DIM),
3177 						0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
3178 		// Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
3179 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3180 		{
3181 			VkDeviceSize zeroOffset = 0;
3182 			VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
3183 			vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
3184 			vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
3185 
3186 			// This barrier corresponds to the second subpass dependency.
3187 			const auto writeStage = ((m_data.stage == STAGE_VERTEX) ? VK_PIPELINE_STAGE_VERTEX_SHADER_BIT : VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
3188 			const auto postDrawBarrier = makeMemoryBarrier(VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, VK_ACCESS_SHADER_WRITE_BIT);
3189 			cmdPipelineMemoryBarrier(vk, *cmdBuffer, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, writeStage, &postDrawBarrier);
3190 		}
3191 		if (m_data.stage == STAGE_VERTEX)
3192 		{
3193 			vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
3194 		}
3195 		else
3196 		{
3197 			vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
3198 		}
3199 		endRenderPass(vk, *cmdBuffer);
3200 	}
3201 
3202 	memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3203 	memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
3204 	vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
3205 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3206 
3207 	const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
3208 															 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
3209 	vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, &copyRegion);
3210 
3211 	memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3212 	memBarrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
3213 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
3214 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3215 
3216 	endCommandBuffer(vk, *cmdBuffer);
3217 
3218 	submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
3219 
3220 	void *ptr = copyBuffer->getAllocation().getHostPtr();
3221 
3222 	invalidateAlloc(vk, device, copyBuffer->getAllocation());
3223 
3224 	qpTestResult res = QP_TEST_RESULT_PASS;
3225 
3226 	for (deUint32 i = 0; i < DIM*DIM; ++i)
3227 	{
3228 		if (formatIsFloat(m_data.format))
3229 		{
3230 			if (((float *)ptr)[i * numComponents] != 1.0f)
3231 			{
3232 				res = QP_TEST_RESULT_FAIL;
3233 			}
3234 		}
3235 		else if (formatIsR64(m_data.format))
3236 		{
3237 			if (((deUint64 *)ptr)[i * numComponents] != 1)
3238 			{
3239 				res = QP_TEST_RESULT_FAIL;
3240 			}
3241 		}
3242 		else
3243 		{
3244 			if (((deUint32 *)ptr)[i * numComponents] != 1)
3245 			{
3246 				res = QP_TEST_RESULT_FAIL;
3247 			}
3248 		}
3249 	}
3250 
3251 	return tcu::TestStatus(res, qpGetTestResultName(res));
3252 }
3253 
3254 // Out of bounds stride tests.
3255 //
3256 // The goal is checking the following situation:
3257 //
3258 // - The vertex buffer size is not a multiple of the vertex binding stride.
3259 //     - In other words, the last chunk goes partially beyond the end of the buffer.
3260 // - However, in this last chunk there will be an attribute that will be completely inside the buffer's range.
3261 // - With robustBufferAccess2, the implementation has to consider the attribute in-bounds and use it properly.
3262 // - Without robustBufferAccess2, the implementation is allowed to work at the chunk level instead of the attribute level.
3263 //     - In other words, it can consider the attribute out of bounds because the chunk is out of bounds.
3264 //
3265 // The test will try to check robustBufferAccess2 is correctly applied here.
3266 
3267 struct OutOfBoundsStrideParams
3268 {
3269 	const bool pipelineRobustness;
3270 	const bool dynamicStride;
3271 
OutOfBoundsStrideParamsvkt::robustness::__anon28606::OutOfBoundsStrideParams3272 	OutOfBoundsStrideParams (const bool pipelineRobustness_, const bool dynamicStride_)
3273 		: pipelineRobustness	(pipelineRobustness_)
3274 		, dynamicStride			(dynamicStride_)
3275 		{}
3276 };
3277 
3278 class OutOfBoundsStrideInstance : public vkt::TestInstance
3279 {
3280 public:
OutOfBoundsStrideInstance(Context& context, const OutOfBoundsStrideParams& params)3281 					OutOfBoundsStrideInstance	(Context& context, const OutOfBoundsStrideParams& params)
3282 						: vkt::TestInstance	(context)
3283 						, m_params			(params)
3284 						{}
~OutOfBoundsStrideInstance(void)3285 	virtual			~OutOfBoundsStrideInstance	(void) {}
3286 
3287 	tcu::TestStatus	iterate							(void) override;
3288 
3289 protected:
3290 	const OutOfBoundsStrideParams m_params;
3291 };
3292 
3293 class OutOfBoundsStrideCase : public vkt::TestCase
3294 {
3295 public:
3296 					OutOfBoundsStrideCase	(tcu::TestContext& testCtx, const std::string& name, const OutOfBoundsStrideParams& params);
~OutOfBoundsStrideCase(void)3297 	virtual			~OutOfBoundsStrideCase	(void) {}
3298 
3299 	void			initPrograms			(vk::SourceCollections& programCollection) const override;
3300 	TestInstance*	createInstance			(Context& context) const override { return new OutOfBoundsStrideInstance(context, m_params); }
3301 	void			checkSupport			(Context& context) const override;
3302 
3303 protected:
3304 	const OutOfBoundsStrideParams m_params;
3305 };
3306 
OutOfBoundsStrideCase(tcu::TestContext& testCtx, const std::string& name, const OutOfBoundsStrideParams& params)3307 OutOfBoundsStrideCase::OutOfBoundsStrideCase (tcu::TestContext& testCtx, const std::string& name, const OutOfBoundsStrideParams& params)
3308 	: vkt::TestCase		(testCtx, name)
3309 	, m_params			(params)
3310 {
3311 #ifdef CTS_USES_VULKANSC
3312 	DE_ASSERT(!m_params.pipelineRobustness);
3313 #endif // CTS_USES_VULKANSC
3314 }
3315 
checkSupport(Context &context) const3316 void OutOfBoundsStrideCase::checkSupport(Context &context) const
3317 {
3318 	context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
3319 
3320 	const auto&	vki				= context.getInstanceInterface();
3321 	const auto	physicalDevice	= context.getPhysicalDevice();
3322 
3323 	// We need to query feature support using the physical device instead of using the reported context features because robustness
3324 	// features are disabled in the default device.
3325 	VkPhysicalDeviceFeatures2							features2					= initVulkanStructure();
3326 	VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features			= initVulkanStructure();
3327 #ifndef CTS_USES_VULKANSC
3328 	VkPhysicalDevicePipelineRobustnessFeaturesEXT		pipelineRobustnessFeatures	= initVulkanStructure();
3329 #endif // CTS_USES_VULKANSC
3330 	VkPhysicalDeviceExtendedDynamicStateFeaturesEXT		edsFeatures					= initVulkanStructure();
3331 
3332 	const auto addFeatures = makeStructChainAdder(&features2);
3333 
3334 	if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
3335 		addFeatures(&robustness2Features);
3336 
3337 #ifndef CTS_USES_VULKANSC
3338 	if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
3339 		addFeatures(&pipelineRobustnessFeatures);
3340 #endif // CTS_USES_VULKANSC
3341 
3342 	if (context.isDeviceFunctionalitySupported("VK_EXT_extended_dynamic_state"))
3343 		addFeatures(&edsFeatures);
3344 
3345 	vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
3346 
3347 	if (!robustness2Features.robustBufferAccess2)
3348 		TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
3349 
3350 #ifndef CTS_USES_VULKANSC
3351 	if (m_params.pipelineRobustness && !pipelineRobustnessFeatures.pipelineRobustness)
3352 		TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
3353 #endif // CTS_USES_VULKANSC
3354 
3355 	if (m_params.dynamicStride && !edsFeatures.extendedDynamicState)
3356 		TCU_THROW(NotSupportedError, "extendedDynamicState not supported");
3357 }
3358 
initPrograms(vk::SourceCollections& programCollection) const3359 void OutOfBoundsStrideCase::initPrograms (vk::SourceCollections& programCollection) const
3360 {
3361 	std::ostringstream vert;
3362 	vert
3363 		<< "#version 460\n"
3364 		<< "layout (location=0) in vec4 inPos;\n"
3365 		<< "void main (void) {\n"
3366 		<< "    gl_Position = inPos;\n"
3367 		<< "    gl_PointSize = 1.0;\n"
3368 		<< "}\n"
3369 		;
3370 	programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
3371 
3372 	std::ostringstream frag;
3373 	frag
3374 		<< "#version 460\n"
3375 		<< "layout (location=0) out vec4 outColor;\n"
3376 		<< "void main (void) {\n"
3377 		<< "    outColor = vec4(0.0, 0.0, 1.0, 1.0);\n"
3378 		<< "}\n"
3379 		;
3380 	programCollection.glslSources.add("frag") << glu::FragmentSource(frag.str());
3381 }
3382 
iterate(void)3383 tcu::TestStatus OutOfBoundsStrideInstance::iterate (void)
3384 {
3385 	const auto&			vki				= m_context.getInstanceInterface();
3386 	const auto			physicalDevice	= m_context.getPhysicalDevice();
3387 	const auto&			vkd				= getDeviceInterface(m_context, true, m_params.pipelineRobustness);
3388 	const auto			device			= getLogicalDevice(m_context, true, m_params.pipelineRobustness);
3389 	SimpleAllocator		allocator		(vkd, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
3390 	const auto			qfIndex			= m_context.getUniversalQueueFamilyIndex();
3391 	const tcu::IVec3	fbDim			(8, 8, 1);
3392 	const auto			fbExtent		= makeExtent3D(fbDim);
3393 	const auto			colorFormat		= VK_FORMAT_R8G8B8A8_UNORM;
3394 	const auto			colorUsage		= (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
3395 	const auto			colorSRR		= makeDefaultImageSubresourceRange();
3396 	const auto			colorSRL		= makeDefaultImageSubresourceLayers();
3397 	const auto			v4Size			= static_cast<uint32_t>(sizeof(tcu::Vec4));
3398 	VkQueue				queue;
3399 
3400 	// Retrieve queue manually.
3401 	vkd.getDeviceQueue(device, qfIndex, 0u, &queue);
3402 
3403 	// Color buffer for the test.
3404 	ImageWithBuffer colorBuffer(vkd, device, allocator, fbExtent, colorFormat, colorUsage, VK_IMAGE_TYPE_2D);
3405 
3406 	// We will use points, one point per pixel, but we'll insert a padding after each point.
3407 	// We'll make the last padding out of the buffer, but the point itself will be inside the buffer.
3408 
3409 	// One point per pixel.
3410 	const auto				pointCount = fbExtent.width * fbExtent.height * fbExtent.depth;
3411 	std::vector<tcu::Vec4>	points;
3412 
3413 	points.reserve(pointCount);
3414 	for (uint32_t y = 0u; y < fbExtent.height; ++y)
3415 		for (uint32_t x = 0u; x < fbExtent.width; ++x)
3416 		{
3417 			const auto			xCoord		= ((static_cast<float>(x) + 0.5f) / static_cast<float>(fbExtent.width))  * 2.0f - 1.0f;
3418 			const auto			yCoord		= ((static_cast<float>(y) + 0.5f) / static_cast<float>(fbExtent.height)) * 2.0f - 1.0f;
3419 			const tcu::Vec4		coords		(xCoord, yCoord, 0.0f, 1.0f);
3420 
3421 			points.push_back(coords);
3422 		}
3423 
3424 	// Add paddings.
3425 	std::vector<tcu::Vec4> vertexBufferData;
3426 	vertexBufferData.reserve(points.size() * 2u);
3427 	for (const auto& point : points)
3428 	{
3429 		vertexBufferData.push_back(point);
3430 		vertexBufferData.push_back(tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f));
3431 	}
3432 
3433 	// Prepare vertex buffer. Note the size is slightly short and excludes the last padding.
3434 	const auto vertexBufferSize		= static_cast<VkDeviceSize>(de::dataSize(vertexBufferData) - v4Size);
3435 	const auto vertexBufferUsage	= (VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
3436 	const auto vertexBufferInfo		= makeBufferCreateInfo(vertexBufferSize, vertexBufferUsage);
3437 	const auto vertexBufferOffset	= VkDeviceSize{0};
3438 	const auto vertexBufferStride	= static_cast<VkDeviceSize>(2u * v4Size);
3439 
3440 	BufferWithMemory vertexBuffer(vkd, device, allocator, vertexBufferInfo, MemoryRequirement::HostVisible);
3441 	auto& vertexBufferAlloc	= vertexBuffer.getAllocation();
3442 	void* vertexBufferPtr	= vertexBufferAlloc.getHostPtr();
3443 
3444 	deMemcpy(vertexBufferPtr, de::dataOrNull(vertexBufferData), static_cast<size_t>(vertexBufferSize));
3445 
3446 	// Create the pipeline.
3447 	const auto&	binaries		= m_context.getBinaryCollection();
3448 	const auto	vertModule		= createShaderModule(vkd, device, binaries.get("vert"));
3449 	const auto	fragModule		= createShaderModule(vkd, device, binaries.get("frag"));
3450 	const auto	renderPass		= makeRenderPass(vkd, device, colorFormat);
3451 	const auto	framebuffer		= makeFramebuffer(vkd, device, renderPass.get(), colorBuffer.getImageView(), fbExtent.width, fbExtent.height);
3452 	const auto	pipelineLayout	= makePipelineLayout(vkd, device);
3453 
3454 	const std::vector<VkViewport>	viewports	(1u, makeViewport(fbExtent));
3455 	const std::vector<VkRect2D>		scissors	(1u, makeRect2D(fbExtent));
3456 
3457 	// Input state, which contains the right stride.
3458 	const auto bindingStride		= v4Size * 2u; // Vertex and padding.
3459 	const auto bindingDescription	= makeVertexInputBindingDescription(0u, bindingStride, VK_VERTEX_INPUT_RATE_VERTEX);
3460 	const auto attributeDescription	= makeVertexInputAttributeDescription(0u, 0u, vk::VK_FORMAT_R32G32B32A32_SFLOAT, 0u); // Vertex at the start of each item.
3461 
3462 	const VkPipelineVertexInputStateCreateInfo inputStateCreateInfo =
3463 	{
3464 		VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	//	VkStructureType								sType;
3465 		nullptr,													//	const void*									pNext;
3466 		0u,															//	VkPipelineVertexInputStateCreateFlags		flags;
3467 		1u,															//	uint32_t									vertexBindingDescriptionCount;
3468 		&bindingDescription,										//	const VkVertexInputBindingDescription*		pVertexBindingDescriptions;
3469 		1u,															//	uint32_t									vertexAttributeDescriptionCount;
3470 		&attributeDescription,										//	const VkVertexInputAttributeDescription*	pVertexAttributeDescriptions;
3471 	};
3472 
3473 	std::vector<VkDynamicState> dynamicStates;
3474 	if (m_params.dynamicStride)
3475 		dynamicStates.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT);
3476 
3477 	const VkPipelineDynamicStateCreateInfo dynamicStateCreateInfo =
3478 	{
3479 		VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,	//	VkStructureType						sType;
3480 		nullptr,												//	const void*							pNext;
3481 		0u,														//	VkPipelineDynamicStateCreateFlags	flags;
3482 		de::sizeU32(dynamicStates),								//	uint32_t							dynamicStateCount;
3483 		de::dataOrNull(dynamicStates),							//	const VkDynamicState*				pDynamicStates;
3484 	};
3485 
3486 	const auto pipeline = makeGraphicsPipeline(vkd, device, pipelineLayout.get(),
3487 		vertModule.get(), VK_NULL_HANDLE, VK_NULL_HANDLE, VK_NULL_HANDLE, fragModule.get(),
3488 		renderPass.get(), viewports, scissors, VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, 0u,
3489 		&inputStateCreateInfo, nullptr, nullptr, nullptr, nullptr, &dynamicStateCreateInfo);
3490 
3491 	// Command pool and buffer.
3492 	const CommandPoolWithBuffer	cmd			(vkd, device, qfIndex);
3493 	const auto					cmdBuffer	= cmd.cmdBuffer.get();
3494 
3495 	const auto clearColor = makeClearValueColor(tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f));
3496 	beginCommandBuffer(vkd, cmdBuffer);
3497 	beginRenderPass(vkd, cmdBuffer, renderPass.get(), framebuffer.get(), scissors.at(0u), clearColor);
3498 	if (m_params.dynamicStride)
3499 	{
3500 #ifndef CTS_USES_VULKANSC
3501 		vkd.cmdBindVertexBuffers2(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset, nullptr, &vertexBufferStride);
3502 #else
3503 		vkd.cmdBindVertexBuffers2EXT(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset, nullptr, &vertexBufferStride);
3504 #endif // CTS_USES_VULKANSC
3505 	}
3506 	else
3507 	{
3508 		vkd.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
3509 	}
3510 	vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline.get());
3511 	vkd.cmdDraw(cmdBuffer, pointCount, 1u, 0u, 0u);
3512 	endRenderPass(vkd, cmdBuffer);
3513 
3514 	// Copy image to verification buffer.
3515 	const auto color2Transfer = makeImageMemoryBarrier(
3516 		VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
3517 		VK_ACCESS_TRANSFER_READ_BIT,
3518 		VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
3519 		VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3520 		colorBuffer.getImage(), colorSRR);
3521 
3522 	cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, &color2Transfer);
3523 
3524 	const auto copyRegion = makeBufferImageCopy(fbExtent, colorSRL);
3525 	vkd.cmdCopyImageToBuffer(cmdBuffer, colorBuffer.getImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, colorBuffer.getBuffer(), 1u, &copyRegion);
3526 
3527 	const auto transfer2Host = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
3528 	cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, &transfer2Host);
3529 
3530 	endCommandBuffer(vkd, cmdBuffer);
3531 	submitCommandsAndWait(vkd, device, queue, cmdBuffer);
3532 
3533 	// Verify color buffer.
3534 	invalidateAlloc(vkd, device, colorBuffer.getBufferAllocation());
3535 
3536 	const tcu::Vec4						refColor		(0.0f, 0.0f, 1.0f, 1.0f); // Must match frag shader.
3537 	const tcu::Vec4						threshold		(0.0f, 0.0f, 0.0f, 0.0f);
3538 	const void*							resultData		= colorBuffer.getBufferAllocation().getHostPtr();
3539 	const auto							tcuFormat		= mapVkFormat(colorFormat);
3540 	const tcu::ConstPixelBufferAccess	resultAccess	(tcuFormat, fbDim, resultData);
3541 	auto&								log				= m_context.getTestContext().getLog();
3542 
3543 	if (!tcu::floatThresholdCompare(log, "Result", "", refColor, resultAccess, threshold, tcu::COMPARE_LOG_ON_ERROR))
3544 		return tcu::TestStatus::fail("Unexpected results in the color buffer -- check log for details");
3545 
3546 	return tcu::TestStatus::pass("Pass");
3547 }
3548 
getGPLSuffix(PipelineRobustnessCase prCase)3549 std::string getGPLSuffix (PipelineRobustnessCase prCase)
3550 {
3551 	if (prCase == PipelineRobustnessCase::ENABLED_FAST_GPL)
3552 		return "_fast_gpl";
3553 	if (prCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
3554 		return "_optimized_gpl";
3555 	return "";
3556 }
3557 
3558 }	// anonymous
3559 
createTests(tcu::TestCaseGroup* group, bool robustness2, bool pipelineRobustness)3560 static void createTests (tcu::TestCaseGroup* group, bool robustness2, bool pipelineRobustness)
3561 {
3562 	tcu::TestContext& testCtx = group->getTestContext();
3563 
3564 	typedef struct
3565 	{
3566 		deUint32				count;
3567 		const char*				name;
3568 	} TestGroupCase;
3569 
3570 	TestGroupCase fmtCases[] =
3571 	{
3572 		{ VK_FORMAT_R32_SINT,				"r32i"},
3573 		{ VK_FORMAT_R32_UINT,				"r32ui"},
3574 		{ VK_FORMAT_R32_SFLOAT,				"r32f"},
3575 		{ VK_FORMAT_R32G32_SINT,			"rg32i"},
3576 		{ VK_FORMAT_R32G32_UINT,			"rg32ui"},
3577 		{ VK_FORMAT_R32G32_SFLOAT,			"rg32f"},
3578 		{ VK_FORMAT_R32G32B32A32_SINT,		"rgba32i"},
3579 		{ VK_FORMAT_R32G32B32A32_UINT,		"rgba32ui"},
3580 		{ VK_FORMAT_R32G32B32A32_SFLOAT,	"rgba32f"},
3581 		{ VK_FORMAT_R64_SINT,				"r64i"},
3582 		{ VK_FORMAT_R64_UINT,				"r64ui"},
3583 	};
3584 
3585 	TestGroupCase fullDescCases[] =
3586 	{
3587 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,				"uniform_buffer"},
3588 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,				"storage_buffer"},
3589 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,		"uniform_buffer_dynamic"},
3590 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,		"storage_buffer_dynamic"},
3591 		{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,			"uniform_texel_buffer"},
3592 		{ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,			"storage_texel_buffer"},
3593 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image"},
3594 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image"},
3595 		{ VERTEX_ATTRIBUTE_FETCH,							"vertex_attribute_fetch"},
3596 	};
3597 
3598 	TestGroupCase imgDescCases[] =
3599 	{
3600 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image"},
3601 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image"},
3602 	};
3603 
3604 	TestGroupCase fullLenCases32Bit[] =
3605 	{
3606 		{ ~0U,			"null_descriptor"},
3607 		{ 0,			"img"},
3608 		{ 4,			"len_4"},
3609 		{ 8,			"len_8"},
3610 		{ 12,			"len_12"},
3611 		{ 16,			"len_16"},
3612 		{ 20,			"len_20"},
3613 		{ 31,			"len_31"},
3614 		{ 32,			"len_32"},
3615 		{ 33,			"len_33"},
3616 		{ 35,			"len_35"},
3617 		{ 36,			"len_36"},
3618 		{ 39,			"len_39"},
3619 		{ 40,			"len_41"},
3620 		{ 252,			"len_252"},
3621 		{ 256,			"len_256"},
3622 		{ 260,			"len_260"},
3623 	};
3624 
3625 	TestGroupCase fullLenCases64Bit[] =
3626 	{
3627 		{ ~0U,			"null_descriptor"},
3628 		{ 0,			"img"},
3629 		{ 8,			"len_8"},
3630 		{ 16,			"len_16"},
3631 		{ 24,			"len_24"},
3632 		{ 32,			"len_32"},
3633 		{ 40,			"len_40"},
3634 		{ 62,			"len_62"},
3635 		{ 64,			"len_64"},
3636 		{ 66,			"len_66"},
3637 		{ 70,			"len_70"},
3638 		{ 72,			"len_72"},
3639 		{ 78,			"len_78"},
3640 		{ 80,			"len_80"},
3641 		{ 504,			"len_504"},
3642 		{ 512,			"len_512"},
3643 		{ 520,			"len_520"},
3644 	};
3645 
3646 	TestGroupCase imgLenCases[] =
3647 	{
3648 		{ 0,	"img"},
3649 	};
3650 
3651 	TestGroupCase viewCases[] =
3652 	{
3653 		{ VK_IMAGE_VIEW_TYPE_1D,			"1d"},
3654 		{ VK_IMAGE_VIEW_TYPE_2D,			"2d"},
3655 		{ VK_IMAGE_VIEW_TYPE_3D,			"3d"},
3656 		{ VK_IMAGE_VIEW_TYPE_CUBE,			"cube"},
3657 		{ VK_IMAGE_VIEW_TYPE_1D_ARRAY,		"1d_array"},
3658 		{ VK_IMAGE_VIEW_TYPE_2D_ARRAY,		"2d_array"},
3659 		{ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,	"cube_array"},
3660 	};
3661 
3662 	TestGroupCase sampCases[] =
3663 	{
3664 		{ VK_SAMPLE_COUNT_1_BIT,			"samples_1"},
3665 		{ VK_SAMPLE_COUNT_4_BIT,			"samples_4"},
3666 	};
3667 
3668 	TestGroupCase stageCases[] =
3669 	{
3670 		// compute
3671 		{ STAGE_COMPUTE,	"comp"},
3672 		// fragment
3673 		{ STAGE_FRAGMENT,	"frag"},
3674 		// vertex
3675 		{ STAGE_VERTEX,		"vert"},
3676 #ifndef CTS_USES_VULKANSC
3677 		// raygen
3678 		{ STAGE_RAYGEN,		"rgen"},
3679 #endif
3680 	};
3681 
3682 	TestGroupCase volCases[] =
3683 	{
3684 		{ 0,			"nonvolatile"},
3685 		{ 1,			"volatile"},
3686 	};
3687 
3688 	TestGroupCase unrollCases[] =
3689 	{
3690 		{ 0,			"dontunroll"},
3691 		{ 1,			"unroll"},
3692 	};
3693 
3694 	TestGroupCase tempCases[] =
3695 	{
3696 		{ 0,			"notemplate"},
3697 #ifndef CTS_USES_VULKANSC
3698 		{ 1,			"template"},
3699 #endif
3700 	};
3701 
3702 	TestGroupCase pushCases[] =
3703 	{
3704 		{ 0,			"bind"},
3705 #ifndef CTS_USES_VULKANSC
3706 		{ 1,			"push"},
3707 #endif
3708 	};
3709 
3710 	TestGroupCase fmtQualCases[] =
3711 	{
3712 		{ 0,			"no_fmt_qual"},
3713 		{ 1,			"fmt_qual"},
3714 	};
3715 
3716 	TestGroupCase readOnlyCases[] =
3717 	{
3718 		{ 0,			"readwrite"},
3719 		{ 1,			"readonly"},
3720 	};
3721 
3722 	for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
3723 	{
3724 		de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name));
3725 		for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
3726 		{
3727 			de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name));
3728 			for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
3729 			{
3730 				de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name));
3731 
3732 				// Avoid too much duplication by excluding certain test cases
3733 				if (pipelineRobustness &&
3734 				    !(fmtCases[fmtNdx].count == VK_FORMAT_R32_UINT || fmtCases[fmtNdx].count == VK_FORMAT_R32G32B32A32_SFLOAT || fmtCases[fmtNdx].count == VK_FORMAT_R64_SINT))
3735 				{
3736 					continue;
3737 				}
3738 
3739 				int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3740 
3741 				for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3742 				{
3743 					de::MovePtr<tcu::TestCaseGroup> unrollGroup(new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name));
3744 
3745 					// Avoid too much duplication by excluding certain test cases
3746 					if (unrollNdx > 0 && pipelineRobustness)
3747 						continue;
3748 
3749 					for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3750 					{
3751 						de::MovePtr<tcu::TestCaseGroup> volGroup(new tcu::TestCaseGroup(testCtx, volCases[volNdx].name));
3752 
3753 						int numDescCases = robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3754 						TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3755 
3756 						for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3757 						{
3758 							de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name));
3759 
3760 							// Avoid too much duplication by excluding certain test cases
3761 							if (pipelineRobustness &&
3762 								!(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3763 									descCases[descNdx].count == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3764 							{
3765 								continue;
3766 							}
3767 
3768 							for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3769 							{
3770 								de::MovePtr<tcu::TestCaseGroup> rwGroup(new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name));
3771 
3772 								// readonly cases are just for storage_buffer
3773 								if (readOnlyCases[roNdx].count != 0 &&
3774 									descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3775 									descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3776 									continue;
3777 
3778 								if (pipelineRobustness &&
3779 									readOnlyCases[roNdx].count != 0)
3780 								{
3781 									continue;
3782 								}
3783 
3784 								for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3785 								{
3786 									de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name));
3787 
3788 									// format qualifier is only used for storage image and storage texel buffers
3789 									if (fmtQualCases[fmtQualNdx].count &&
3790 										!(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3791 										continue;
3792 
3793 									if (pushCases[pushNdx].count &&
3794 										(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3795 										continue;
3796 
3797 									const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3798 									int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
3799 									TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3800 
3801 									for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3802 									{
3803 										if (lenCases[lenNdx].count != ~0U)
3804 										{
3805 											bool bufferLen = lenCases[lenNdx].count != 0;
3806 											bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3807 											if (bufferLen != bufferDesc)
3808 												continue;
3809 
3810 											// Add template tests cases only for null_descriptor cases
3811 											if (tempCases[tempNdx].count)
3812 												continue;
3813 										}
3814 
3815 										if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3816 											((lenCases[lenNdx].count % fmtSize) != 0) &&
3817 											lenCases[lenNdx].count != ~0U)
3818 										{
3819 											continue;
3820 										}
3821 
3822 										// Avoid too much duplication by excluding certain test cases
3823 										if (pipelineRobustness && robustness2 &&
3824 											(lenCases[lenNdx].count == 0 || ((lenCases[lenNdx].count & (lenCases[lenNdx].count - 1)) != 0)))
3825 										{
3826 											continue;
3827 										}
3828 
3829 										// "volatile" only applies to storage images/buffers
3830 										if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3831 											continue;
3832 
3833 
3834 										de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name));
3835 										for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3836 										{
3837 											de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name));
3838 
3839 											// Avoid too much duplication by excluding certain test cases
3840 											if (pipelineRobustness && sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3841 											    continue;
3842 
3843 											for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3844 											{
3845 												if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3846 													descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3847 													descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3848 												{
3849 													// buffer descriptors don't have different dimensionalities. Only test "1D"
3850 													continue;
3851 												}
3852 
3853 												if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3854 													sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3855 												{
3856 													continue;
3857 												}
3858 
3859 												// Avoid too much duplication by excluding certain test cases
3860 												if (pipelineRobustness &&
3861 													!(viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_1D || viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D || viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D_ARRAY))
3862 												{
3863 													continue;
3864 												}
3865 
3866 												de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name));
3867 												for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3868 												{
3869 													Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3870 													VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
3871 													VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
3872 #ifndef CTS_USES_VULKANSC
3873 													if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
3874 													{
3875 														allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_KHR;
3876 														allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
3877 
3878 														if (pipelineRobustness)
3879 															continue;
3880 													}
3881 #endif // CTS_USES_VULKANSC
3882 													if ((lenCases[lenNdx].count == ~0U) && pipelineRobustness)
3883 														continue;
3884 
3885 													if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
3886 														currentStage != STAGE_VERTEX)
3887 														continue;
3888 
3889 													deUint32 imageDim[3] = {5, 11, 6};
3890 													if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
3891 														viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
3892 														imageDim[1] = imageDim[0];
3893 
3894 #ifndef CTS_USES_VULKANSC
3895 													std::vector<PipelineRobustnessCase> pipelineRobustnessCases;
3896 													if (!pipelineRobustness)
3897 														pipelineRobustnessCases.push_back(PipelineRobustnessCase::DISABLED);
3898 													else
3899 													{
3900 														pipelineRobustnessCases.push_back(PipelineRobustnessCase::ENABLED_MONOLITHIC);
3901 														if (currentStage != STAGE_RAYGEN && currentStage != STAGE_COMPUTE)
3902 														{
3903 															pipelineRobustnessCases.push_back(PipelineRobustnessCase::ENABLED_FAST_GPL);
3904 															pipelineRobustnessCases.push_back(PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL);
3905 														}
3906 													}
3907 #else
3908 													const std::vector<PipelineRobustnessCase> pipelineRobustnessCases (1u,
3909 														(pipelineRobustness ? PipelineRobustnessCase::ENABLED_MONOLITHIC : PipelineRobustnessCase::DISABLED));
3910 #endif // CTS_USES_VULKANSC
3911 
3912 													for (const auto& pipelineRobustnessCase : pipelineRobustnessCases)
3913 													{
3914 														CaseDef c =
3915 														{
3916 															(VkFormat)fmtCases[fmtNdx].count,								// VkFormat format;
3917 															currentStage,													// Stage stage;
3918 															allShaderStages,												// VkFlags allShaderStages;
3919 															allPipelineStages,												// VkFlags allPipelineStages;
3920 															(int)descCases[descNdx].count,									// VkDescriptorType descriptorType;
3921 															(VkImageViewType)viewCases[viewNdx].count,						// VkImageViewType viewType;
3922 															(VkSampleCountFlagBits)sampCases[sampNdx].count,				// VkSampleCountFlagBits samples;
3923 															(int)lenCases[lenNdx].count,									// int bufferLen;
3924 															(bool)unrollCases[unrollNdx].count,								// bool unroll;
3925 															(bool)volCases[volNdx].count,									// bool vol;
3926 															(bool)(lenCases[lenNdx].count == ~0U),							// bool nullDescriptor
3927 															(bool)tempCases[tempNdx].count,									// bool useTemplate
3928 															(bool)fmtQualCases[fmtQualNdx].count,							// bool formatQualifier
3929 															(bool)pushCases[pushNdx].count,									// bool pushDescriptor;
3930 															(bool)robustness2,												// bool testRobustness2;
3931 															pipelineRobustnessCase,											// PipelineRobustnessCase pipelineRobustnessCase;
3932 															{ imageDim[0], imageDim[1], imageDim[2] },						// deUint32 imageDim[3];
3933 															(bool)(readOnlyCases[roNdx].count == 1),						// bool readOnly;
3934 														};
3935 
3936 														const auto name = stageCases[stageNdx].name + getGPLSuffix(pipelineRobustnessCase);
3937 														viewGroup->addChild(new RobustnessExtsTestCase(testCtx, name, c));
3938 													}
3939 												}
3940 												sampGroup->addChild(viewGroup.release());
3941 											}
3942 											lenGroup->addChild(sampGroup.release());
3943 										}
3944 										fmtQualGroup->addChild(lenGroup.release());
3945 									}
3946 									// Put storage_buffer tests in separate readonly vs readwrite groups. Other types
3947 									// go directly into descGroup
3948 									if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3949 										descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3950 										rwGroup->addChild(fmtQualGroup.release());
3951 									} else {
3952 										descGroup->addChild(fmtQualGroup.release());
3953 									}
3954 								}
3955 								if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3956 									descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3957 									descGroup->addChild(rwGroup.release());
3958 								}
3959 							}
3960 							volGroup->addChild(descGroup.release());
3961 						}
3962 						unrollGroup->addChild(volGroup.release());
3963 					}
3964 					fmtGroup->addChild(unrollGroup.release());
3965 				}
3966 				tempGroup->addChild(fmtGroup.release());
3967 			}
3968 			pushGroup->addChild(tempGroup.release());
3969 		}
3970 		group->addChild(pushGroup.release());
3971 	}
3972 
3973 	if (robustness2)
3974 	{
3975 		de::MovePtr<tcu::TestCaseGroup> miscGroup (new tcu::TestCaseGroup(testCtx, "misc"));
3976 
3977 		for (const auto dynamicStride : { false, true })
3978 		{
3979 			const OutOfBoundsStrideParams	params		(pipelineRobustness, dynamicStride);
3980 			const std::string				nameSuffix	(dynamicStride ? "_dynamic_stride" : "");
3981 			const std::string				testName	("out_of_bounds_stride" + nameSuffix);
3982 
3983 			miscGroup->addChild(new OutOfBoundsStrideCase(testCtx, testName, params));
3984 		}
3985 
3986 		group->addChild(miscGroup.release());
3987 	}
3988 }
3989 
createRobustness2Tests(tcu::TestCaseGroup* group)3990 static void createRobustness2Tests (tcu::TestCaseGroup* group)
3991 {
3992 	createTests(group, /*robustness2=*/true, /*pipelineRobustness=*/false);
3993 }
3994 
createImageRobustnessTests(tcu::TestCaseGroup* group)3995 static void createImageRobustnessTests (tcu::TestCaseGroup* group)
3996 {
3997 	createTests(group, /*robustness2=*/false, /*pipelineRobustness=*/false);
3998 }
3999 
4000 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestCaseGroup* group)4001 static void createPipelineRobustnessTests (tcu::TestCaseGroup* group)
4002 {
4003 	tcu::TestContext& testCtx = group->getTestContext();
4004 
4005 	tcu::TestCaseGroup *robustness2Group = new tcu::TestCaseGroup(testCtx, "robustness2");
4006 
4007 	createTests(robustness2Group, /*robustness2=*/true, /*pipelineRobustness=*/true);
4008 
4009 	group->addChild(robustness2Group);
4010 
4011 	tcu::TestCaseGroup *imageRobustness2Group = new tcu::TestCaseGroup(testCtx, "image_robustness");
4012 
4013 	createTests(imageRobustness2Group, /*robustness2=*/false, /*pipelineRobustness=*/true);
4014 
4015 	group->addChild(imageRobustness2Group);
4016 }
4017 #endif
4018 
cleanupGroup(tcu::TestCaseGroup* group)4019 static void cleanupGroup (tcu::TestCaseGroup* group)
4020 {
4021 	DE_UNREF(group);
4022 	// Destroy singleton objects.
4023 	ImageRobustnessSingleton::destroy();
4024 	Robustness2Singleton::destroy();
4025 	PipelineRobustnessImageRobustnessSingleton::destroy();
4026 	PipelineRobustnessRobustness2Singleton::destroy();
4027 }
4028 
createRobustness2Tests(tcu::TestContext& testCtx)4029 tcu::TestCaseGroup* createRobustness2Tests (tcu::TestContext& testCtx)
4030 {
4031 	return createTestGroup(testCtx, "robustness2",
4032 							createRobustness2Tests, cleanupGroup);
4033 }
4034 
createImageRobustnessTests(tcu::TestContext& testCtx)4035 tcu::TestCaseGroup* createImageRobustnessTests (tcu::TestContext& testCtx)
4036 {
4037 	return createTestGroup(testCtx, "image_robustness",
4038 							createImageRobustnessTests, cleanupGroup);
4039 }
4040 
4041 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestContext& testCtx)4042 tcu::TestCaseGroup* createPipelineRobustnessTests (tcu::TestContext& testCtx)
4043 {
4044 	return createTestGroup(testCtx, "pipeline_robustness",
4045 							createPipelineRobustnessTests, cleanupGroup);
4046 }
4047 #endif
4048 
4049 }	// robustness
4050 }	// vkt
4051