1/*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2021 The Khronos Group Inc.
6 * Copyright (c) 2021 Valve Corporation.
7 * Copyright (c) 2023 LunarG, Inc.
8 * Copyright (c) 2023 Nintendo
9 *
10 * Licensed under the Apache License, Version 2.0 (the "License");
11 * you may not use this file except in compliance with the License.
12 * You may obtain a copy of the License at
13 *
14 *      http://www.apache.org/licenses/LICENSE-2.0
15 *
16 * Unless required by applicable law or agreed to in writing, software
17 * distributed under the License is distributed on an "AS IS" BASIS,
18 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 * See the License for the specific language governing permissions and
20 * limitations under the License.
21 *
22 *//*!
23 * \file
24 * \brief Dynamic State tests mixing it with compute and transfer.
25 *//*--------------------------------------------------------------------*/
26#include "vktDynamicStateComputeTests.hpp"
27#include "vktCustomInstancesDevices.hpp"
28
29#include "vkBufferWithMemory.hpp"
30#include "vkObjUtil.hpp"
31#include "vkCmdUtil.hpp"
32#include "vkBarrierUtil.hpp"
33#include "vkBuilderUtil.hpp"
34#include "vkTypeUtil.hpp"
35
36#include "tcuCommandLine.hpp"
37#include "tcuVector.hpp"
38
39#include <vector>
40#include <string>
41#include <functional>
42#include <map>
43#include <sstream>
44#include <cstring>
45#include <iterator>
46#include <numeric>
47#include <memory>
48
49namespace vkt
50{
51namespace DynamicState
52{
53
54namespace
55{
56
57using namespace vk;
58
59// Additional objects needed to set a given dynamic state that need to exist beyond the state-setting call. Empty by default.
60struct DynamicStateData
61{
62	virtual ~DynamicStateData() {}
63};
64
65// A vertex buffer and graphics pipeline are needed for vkCmdBindVertexBuffers2EXT().
66struct BindVertexBuffersData : public DynamicStateData
67{
68private:
69	using BufferPtr			= de::MovePtr<BufferWithMemory>;
70	using RenderPassPtr		= RenderPassWrapper;
71	using LayoutPtr			= Move<VkPipelineLayout>;
72	using ModulePtr			= Move<VkShaderModule>;
73	using PipelinePtr		= Move<VkPipeline>;
74
75	static constexpr deUint32 kWidth	= 16u;
76	static constexpr deUint32 kHeight	= 16u;
77
78	VkExtent3D getExtent (void)
79	{
80		return makeExtent3D(kWidth, kHeight, 1u);
81	}
82
83public:
84	BindVertexBuffersData(Context& ctx, VkDevice device, PipelineConstructionType pipelineConstructionType)
85		: m_vertexBuffer		()
86		, m_dataSize			(0u)
87		, m_vertexBufferSize	(0ull)
88		, m_renderPass			()
89		, m_pipelineLayout		()
90		, m_vertexShader		()
91		, m_graphicsPipeline	()
92	{
93		const auto&	vki			= ctx.getInstanceInterface();
94		const auto	phyDev		= ctx.getPhysicalDevice();
95		const auto&	vkd			= ctx.getDeviceInterface();
96		auto&		alloc		= ctx.getDefaultAllocator();
97
98		// Vertex buffer.
99		tcu::Vec4	vertex		(0.f, 0.f, 0.f, 1.f);
100		m_dataSize				= sizeof(vertex);
101		m_vertexBufferSize		= de::roundUp(static_cast<VkDeviceSize>(m_dataSize), getPhysicalDeviceProperties(vki, phyDev).limits.nonCoherentAtomSize);
102		const auto	bufferInfo	= makeBufferCreateInfo(m_vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
103
104		m_vertexBuffer			= BufferPtr(new BufferWithMemory(vkd, device, alloc, bufferInfo, MemoryRequirement::HostVisible));
105		auto&	bufferAlloc		= m_vertexBuffer->getAllocation();
106
107		deMemcpy(bufferAlloc.getHostPtr(), &vertex, m_dataSize);
108		flushAlloc(vkd, device, bufferAlloc);
109
110		// Empty render pass.
111		m_renderPass = RenderPassWrapper(pipelineConstructionType, vkd, device);
112
113		// Empty pipeline layout.
114		m_pipelineLayout = makePipelineLayout(vkd, device);
115
116		// Passthrough vertex shader.
117		m_vertexShader = createShaderModule(vkd, device, ctx.getBinaryCollection().get("vert"), 0u);
118
119		const auto						extent		= getExtent();
120		const std::vector<VkViewport>	viewports	(1, makeViewport(extent));
121		const std::vector<VkRect2D>		scissors	(1, makeRect2D(extent));
122		const VkDynamicState			state		= VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT;
123
124		const VkPipelineDynamicStateCreateInfo dynamicStateInfo =
125		{
126			VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,	//	VkStructureType						sType;
127			nullptr,												//	const void*							pNext;
128			0u,														//	VkPipelineDynamicStateCreateFlags	flags;
129			1u,														//	deUint32							dynamicStateCount;
130			&state,													//	const VkDynamicState*				pDynamicStates;
131		};
132
133		// Graphics pipeline.
134		m_graphicsPipeline = makeGraphicsPipeline(vkd, device, m_pipelineLayout.get(),
135			m_vertexShader.get(), DE_NULL, DE_NULL, DE_NULL, DE_NULL,
136			m_renderPass.get(), viewports, scissors, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, 0u,
137			nullptr, nullptr, nullptr, nullptr, nullptr, &dynamicStateInfo);
138	}
139
140	const BufferWithMemory*	getVertexBuffer () const
141	{
142		return m_vertexBuffer.get();
143	}
144
145	size_t getDataSize () const
146	{
147		return m_dataSize;
148	}
149
150	VkPipeline getPipeline () const
151	{
152		return m_graphicsPipeline.get();
153	}
154
155	virtual ~BindVertexBuffersData() {}
156
157private:
158	BufferPtr		m_vertexBuffer;
159	size_t			m_dataSize;
160	VkDeviceSize	m_vertexBufferSize;
161	RenderPassPtr	m_renderPass;
162	LayoutPtr		m_pipelineLayout;
163	ModulePtr		m_vertexShader;
164	PipelinePtr		m_graphicsPipeline;
165};
166
167// Function that records a state-setting command in the given command buffer.
168using RecordStateFunction = std::function<void(const DeviceInterface*, VkCommandBuffer, const DynamicStateData*)>;
169
170// State-setting functions
171void setViewport (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
172{
173	const VkViewport viewport =
174	{
175		0.0f,	//	float	x;
176		0.0f,	//	float	y;
177		1.0f,	//	float	width;
178		1.0f,	//	float	height;
179		0.0f,	//	float	minDepth;
180		1.0f,	//	float	maxDepth;
181	};
182	vkd->cmdSetViewport(cmdBuffer, 0u, 1u, &viewport);
183}
184
185void setScissor (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
186{
187	const VkRect2D scissor =
188	{
189		{ 0, 0 },	//	VkOffset2D	offset;
190		{ 1u, 1u },	//	VkExtent2D	extent;
191	};
192	vkd->cmdSetScissor(cmdBuffer, 0u, 1u, &scissor);
193}
194
195void setLineWidth (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
196{
197	vkd->cmdSetLineWidth(cmdBuffer, 1.0f);
198}
199
200void setDepthBias (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
201{
202	vkd->cmdSetDepthBias(cmdBuffer, 0.0f, 0.0f, 0.0f);
203}
204
205void setBlendConstants (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
206{
207	const float blendConstants[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
208	vkd->cmdSetBlendConstants(cmdBuffer, blendConstants);
209}
210
211void setDepthBounds (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
212{
213	vkd->cmdSetDepthBounds(cmdBuffer, 0.0f, 1.0f);
214}
215
216void setStencilCompareMask (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
217{
218	vkd->cmdSetStencilCompareMask(cmdBuffer, VK_STENCIL_FACE_FRONT_AND_BACK, 0xFFu);
219}
220
221void setStencilWriteMask (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
222{
223	vkd->cmdSetStencilWriteMask(cmdBuffer, VK_STENCIL_FACE_FRONT_AND_BACK, 0xFFu);
224}
225
226void setStencilReference (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
227{
228	vkd->cmdSetStencilReference(cmdBuffer, VK_STENCIL_FACE_FRONT_AND_BACK, 0xFFu);
229}
230
231void setDiscardRectangle (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
232{
233	const VkRect2D rectangle =
234	{
235		{ 0, 0 },	//	VkOffset2D	offset;
236		{ 1u, 1u },	//	VkExtent2D	extent;
237	};
238	vkd->cmdSetDiscardRectangleEXT(cmdBuffer, 0u, 1u, &rectangle);
239}
240
241void setSampleLocations (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
242{
243	const VkSampleLocationEXT locations[] =
244	{
245		{ 0.5f, 0.5f },
246		{ 0.5f, 1.5f },
247		{ 1.5f, 0.5f },
248		{ 1.5f, 1.5f },
249	};
250	const VkSampleLocationsInfoEXT info =
251	{
252		VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT,	//	VkStructureType				sType;
253		nullptr,										//	const void*					pNext;
254		VK_SAMPLE_COUNT_4_BIT,							//	VkSampleCountFlagBits		sampleLocationsPerPixel;
255		{ 1u, 1u },										//	VkExtent2D					sampleLocationGridSize;
256		4u,												//	deUint32					sampleLocationsCount;
257		locations,										//	const VkSampleLocationEXT*	pSampleLocations;
258	};
259	vkd->cmdSetSampleLocationsEXT(cmdBuffer, &info);
260}
261
262#ifndef CTS_USES_VULKANSC
263void setRTPipelineStatckSize (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
264{
265	vkd->cmdSetRayTracingPipelineStackSizeKHR(cmdBuffer, 4096u);
266}
267#endif // CTS_USES_VULKANSC
268
269void setFragmentShadingRage (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
270{
271	const VkExtent2D							fragmentSize	= { 1u, 1u };
272	const VkFragmentShadingRateCombinerOpKHR	combinerOps[2]	=
273	{
274		VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR,
275		VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR,
276	};
277	vkd->cmdSetFragmentShadingRateKHR(cmdBuffer, &fragmentSize, combinerOps);
278}
279
280void setLineStipple (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
281{
282	vkd->cmdSetLineStippleEXT(cmdBuffer, 1u, 1u);
283}
284
285void setCullMode (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
286{
287#ifndef CTS_USES_VULKANSC
288	vkd->cmdSetCullMode(cmdBuffer, VK_CULL_MODE_FRONT_AND_BACK);
289#else
290	vkd->cmdSetCullModeEXT(cmdBuffer, VK_CULL_MODE_FRONT_AND_BACK);
291#endif // CTS_USES_VULKANSC
292}
293
294void setFrontFace (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
295{
296#ifndef CTS_USES_VULKANSC
297	vkd->cmdSetFrontFace(cmdBuffer, VK_FRONT_FACE_COUNTER_CLOCKWISE);
298#else
299	vkd->cmdSetFrontFaceEXT(cmdBuffer, VK_FRONT_FACE_COUNTER_CLOCKWISE);
300#endif // CTS_USES_VULKANSC
301}
302
303void setPrimitiveTopology (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
304{
305#ifndef CTS_USES_VULKANSC
306	vkd->cmdSetPrimitiveTopology(cmdBuffer, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP);
307#else
308	vkd->cmdSetPrimitiveTopologyEXT(cmdBuffer, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP);
309#endif // CTS_USES_VULKANSC
310}
311
312void setViewportWithCount (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
313{
314	const VkViewport viewport =
315	{
316		0.0f,	//	float	x;
317		0.0f,	//	float	y;
318		1.0f,	//	float	width;
319		1.0f,	//	float	height;
320		0.0f,	//	float	minDepth;
321		1.0f,	//	float	maxDepth;
322	};
323#ifndef CTS_USES_VULKANSC
324	vkd->cmdSetViewportWithCount(cmdBuffer, 1u, &viewport);
325#else
326	vkd->cmdSetViewportWithCountEXT(cmdBuffer, 1u, &viewport);
327#endif // CTS_USES_VULKANSC
328}
329
330void setScissorWithCount (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
331{
332	const VkRect2D scissor =
333	{
334		{ 0, 0 },	//	VkOffset2D	offset;
335		{ 1u, 1u },	//	VkExtent2D	extent;
336	};
337#ifndef CTS_USES_VULKANSC
338	vkd->cmdSetScissorWithCount(cmdBuffer, 1u, &scissor);
339#else
340	vkd->cmdSetScissorWithCountEXT(cmdBuffer, 1u, &scissor);
341#endif // CTS_USES_VULKANSC
342}
343
344void bindVertexBuffers (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData* data)
345{
346	const auto bindData			= dynamic_cast<const BindVertexBuffersData*>(data);
347	DE_ASSERT(bindData != nullptr);
348	const auto vertexBuffer		= bindData->getVertexBuffer();
349	const auto dataSize			= static_cast<VkDeviceSize>(bindData->getDataSize());
350	const auto bufferOffset		= vertexBuffer->getAllocation().getOffset();
351	const auto stride			= static_cast<VkDeviceSize>(0);
352	const auto pipeline			= bindData->getPipeline();
353
354	vkd->cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
355#ifndef CTS_USES_VULKANSC
356	vkd->cmdBindVertexBuffers2(cmdBuffer, 0u, 1u, &vertexBuffer->get(), &bufferOffset, &dataSize, &stride);
357#else
358	vkd->cmdBindVertexBuffers2EXT(cmdBuffer, 0u, 1u, &vertexBuffer->get(), &bufferOffset, &dataSize, &stride);
359#endif // CTS_USES_VULKANSC
360}
361
362void setDepthTestEnable (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
363{
364#ifndef CTS_USES_VULKANSC
365	vkd->cmdSetDepthTestEnable(cmdBuffer, VK_TRUE);
366#else
367	vkd->cmdSetDepthTestEnableEXT(cmdBuffer, VK_TRUE);
368#endif // CTS_USES_VULKANSC
369}
370
371void setDepthWriteEnable (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
372{
373#ifndef CTS_USES_VULKANSC
374	vkd->cmdSetDepthWriteEnable(cmdBuffer, VK_TRUE);
375#else
376	vkd->cmdSetDepthWriteEnableEXT(cmdBuffer, VK_TRUE);
377#endif // CTS_USES_VULKANSC
378}
379
380void setDepthCompareOp (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
381{
382#ifndef CTS_USES_VULKANSC
383	vkd->cmdSetDepthCompareOp(cmdBuffer, VK_COMPARE_OP_LESS);
384#else
385	vkd->cmdSetDepthCompareOpEXT(cmdBuffer, VK_COMPARE_OP_LESS);
386#endif // CTS_USES_VULKANSC
387}
388
389void setDepthBoundsTestEnable (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
390{
391#ifndef CTS_USES_VULKANSC
392	vkd->cmdSetDepthBoundsTestEnable(cmdBuffer, VK_TRUE);
393#else
394	vkd->cmdSetDepthBoundsTestEnableEXT(cmdBuffer, VK_TRUE);
395#endif // CTS_USES_VULKANSC
396}
397
398void setStencilTestEnable (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
399{
400#ifndef CTS_USES_VULKANSC
401	vkd->cmdSetStencilTestEnable(cmdBuffer, VK_TRUE);
402#else
403	vkd->cmdSetStencilTestEnableEXT(cmdBuffer, VK_TRUE);
404#endif // CTS_USES_VULKANSC
405}
406
407void setStencilOp (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
408{
409#ifndef CTS_USES_VULKANSC
410	vkd->cmdSetStencilOp(cmdBuffer, VK_STENCIL_FACE_FRONT_AND_BACK, VK_STENCIL_OP_ZERO, VK_STENCIL_OP_INCREMENT_AND_CLAMP, VK_STENCIL_OP_KEEP, VK_COMPARE_OP_ALWAYS);
411#else
412	vkd->cmdSetStencilOpEXT(cmdBuffer, VK_STENCIL_FACE_FRONT_AND_BACK, VK_STENCIL_OP_ZERO, VK_STENCIL_OP_INCREMENT_AND_CLAMP, VK_STENCIL_OP_KEEP, VK_COMPARE_OP_ALWAYS);
413#endif // CTS_USES_VULKANSC
414}
415
416#ifndef CTS_USES_VULKANSC
417
418void setViewportWScaling (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
419{
420	const VkViewportWScalingNV viewport =
421	{
422		1.0f,	//	float	xcoeff;
423		1.0f,	//	float	ycoeff;
424	};
425	vkd->cmdSetViewportWScalingNV(cmdBuffer, 0u, 1u, &viewport);
426}
427
428void setViewportShadingRatePalette (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
429{
430	const VkShadingRatePaletteEntryNV	entry	= VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV;
431	const VkShadingRatePaletteNV		palette	=
432	{
433		1u,		//	deUint32							shadingRatePaletteEntryCount;
434		&entry,	//	const VkShadingRatePaletteEntryNV*	pShadingRatePaletteEntries;
435	};
436	vkd->cmdSetViewportShadingRatePaletteNV(cmdBuffer, 0u, 1u, &palette);
437}
438
439void setCoarseSamplingOrder (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
440{
441	const VkCoarseSampleLocationNV locations[2] =
442	{
443		{
444			0u,	//	deUint32	pixelX;
445			0u,	//	deUint32	pixelY;
446			0u,	//	deUint32	sample;
447		},
448		{
449			0u,	//	deUint32	pixelX;
450			1u,	//	deUint32	pixelY;
451			0u,	//	deUint32	sample;
452		},
453	};
454	const VkCoarseSampleOrderCustomNV order =
455	{
456		VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV,	//	VkShadingRatePaletteEntryNV		shadingRate;
457		1u,																//	deUint32						sampleCount;
458		2u,																//	deUint32						sampleLocationCount;
459		locations														//	const VkCoarseSampleLocationNV*	pSampleLocations;
460	};
461	vkd->cmdSetCoarseSampleOrderNV(cmdBuffer, VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, 1u, &order);
462}
463
464void setExclusiveScissor (const DeviceInterface* vkd, VkCommandBuffer cmdBuffer, const DynamicStateData*)
465{
466	const VkRect2D scissor =
467	{
468		{ 0, 0 },	//	VkOffset2D	offset;
469		{ 1u, 1u },	//	VkExtent2D	extent;
470	};
471	vkd->cmdSetExclusiveScissorNV(cmdBuffer, 0u, 1u, &scissor);
472}
473
474#endif // CTS_USES_VULKANSC
475
476const VkDynamicState dynamicStateList[] =
477{
478	VK_DYNAMIC_STATE_VIEWPORT,
479	VK_DYNAMIC_STATE_SCISSOR,
480	VK_DYNAMIC_STATE_LINE_WIDTH,
481	VK_DYNAMIC_STATE_DEPTH_BIAS,
482	VK_DYNAMIC_STATE_BLEND_CONSTANTS,
483	VK_DYNAMIC_STATE_DEPTH_BOUNDS,
484	VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
485	VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
486	VK_DYNAMIC_STATE_STENCIL_REFERENCE,
487	VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT,
488	VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT,
489#ifndef CTS_USES_VULKANSC
490	VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR,
491#endif // CTS_USES_VULKANSC
492	VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR,
493	VK_DYNAMIC_STATE_LINE_STIPPLE_EXT,
494	VK_DYNAMIC_STATE_CULL_MODE_EXT,
495	VK_DYNAMIC_STATE_FRONT_FACE_EXT,
496	VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT,
497	VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT,
498	VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT,
499	VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT,
500	VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
501	VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
502	VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT,
503	VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT,
504	VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,
505	VK_DYNAMIC_STATE_STENCIL_OP_EXT,
506#ifndef CTS_USES_VULKANSC
507	VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV,
508	VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV,
509	VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV,
510	VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV,
511#endif // CTS_USES_VULKANSC
512};
513
514// Information about a dynamic state.
515struct StateInfo
516{
517	std::vector<std::string>	requirements;	// List of required functionalities.
518	RecordStateFunction			recorder;		// Function that records the state to the command buffer being used.
519};
520
521// Returns the state info for a given dynamic state.
522const StateInfo& getDynamicStateInfo (VkDynamicState state)
523{
524	// Maps a given state to its state info structure.
525	using StateInfoMap = std::map<VkDynamicState, StateInfo>;
526
527	static const StateInfoMap result =
528	{
529		{	VK_DYNAMIC_STATE_VIEWPORT,								{	{},										setViewport						}	},
530		{	VK_DYNAMIC_STATE_SCISSOR,								{	{},										setScissor						}	},
531		{	VK_DYNAMIC_STATE_LINE_WIDTH,							{	{},										setLineWidth					}	},
532		{	VK_DYNAMIC_STATE_DEPTH_BIAS,							{	{},										setDepthBias					}	},
533		{	VK_DYNAMIC_STATE_BLEND_CONSTANTS,						{	{},										setBlendConstants				}	},
534		{	VK_DYNAMIC_STATE_DEPTH_BOUNDS,							{	{},										setDepthBounds					}	},
535		{	VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,					{	{},										setStencilCompareMask			}	},
536		{	VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,					{	{},										setStencilWriteMask				}	},
537		{	VK_DYNAMIC_STATE_STENCIL_REFERENCE,						{	{},										setStencilReference				}	},
538		{	VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT,					{	{ "VK_EXT_discard_rectangles" },		setDiscardRectangle				}	},
539		{	VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT,					{	{ "VK_EXT_sample_locations" },			setSampleLocations				}	},
540#ifndef CTS_USES_VULKANSC
541		{	VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR,	{	{ "VK_KHR_ray_tracing_pipeline" },		setRTPipelineStatckSize			}	},
542#endif // CTS_USES_VULKANSC
543		{	VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR,				{	{ "VK_KHR_fragment_shading_rate" },		setFragmentShadingRage			}	},
544		{	VK_DYNAMIC_STATE_LINE_STIPPLE_EXT,						{	{ "VK_EXT_line_rasterization" },		setLineStipple					}	},
545		{	VK_DYNAMIC_STATE_CULL_MODE_EXT,							{	{ "VK_EXT_extended_dynamic_state" },	setCullMode						}	},
546		{	VK_DYNAMIC_STATE_FRONT_FACE_EXT,						{	{ "VK_EXT_extended_dynamic_state" },	setFrontFace					}	},
547		{	VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT,				{	{ "VK_EXT_extended_dynamic_state" },	setPrimitiveTopology			}	},
548		{	VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT,				{	{ "VK_EXT_extended_dynamic_state" },	setViewportWithCount			}	},
549		{	VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT,				{	{ "VK_EXT_extended_dynamic_state" },	setScissorWithCount				}	},
550		{	VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT,		{	{ "VK_EXT_extended_dynamic_state" },	bindVertexBuffers				}	},
551		{	VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,					{	{ "VK_EXT_extended_dynamic_state" },	setDepthTestEnable				}	},
552		{	VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,				{	{ "VK_EXT_extended_dynamic_state" },	setDepthWriteEnable				}	},
553		{	VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT,					{	{ "VK_EXT_extended_dynamic_state" },	setDepthCompareOp				}	},
554		{	VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT,			{	{ "VK_EXT_extended_dynamic_state" },	setDepthBoundsTestEnable		}	},
555		{	VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,				{	{ "VK_EXT_extended_dynamic_state" },	setStencilTestEnable			}	},
556		{	VK_DYNAMIC_STATE_STENCIL_OP_EXT,						{	{ "VK_EXT_extended_dynamic_state" },	setStencilOp					}	},
557#ifndef CTS_USES_VULKANSC
558		{	VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV,					{	{ "VK_NV_clip_space_w_scaling" },		setViewportWScaling				}	},
559		{	VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV,		{	{ "VK_NV_shading_rate_image"},			setViewportShadingRatePalette	}	},
560		{	VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV,		{	{ "VK_NV_shading_rate_image"},			setCoarseSamplingOrder			}	},
561		{	VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV,					{	{ "VK_NV_scissor_exclusive"},			setExclusiveScissor				}	},
562#endif // CTS_USES_VULKANSC
563	};
564
565	const auto itr = result.find(state);
566	DE_ASSERT(itr != result.end());
567
568	return itr->second;
569}
570
571// Device helper: this is needed in some tests when we create custom devices.
572class DeviceHelper
573{
574public:
575	virtual ~DeviceHelper () {}
576	virtual const DeviceInterface&			getDeviceInterface	(void) const = 0;
577	virtual VkDevice						getDevice			(void) const = 0;
578	virtual uint32_t						getQueueFamilyIndex	(void) const = 0;
579	virtual VkQueue							getQueue			(void) const = 0;
580	virtual Allocator&						getAllocator		(void) const = 0;
581	virtual const std::vector<std::string>&	getDeviceExtensions	(void) const = 0;
582};
583
584// This one just reuses the default device from the context.
585class ContextDeviceHelper : public DeviceHelper
586{
587public:
588	ContextDeviceHelper (Context& context)
589		: m_deviceInterface		(context.getDeviceInterface())
590		, m_device				(context.getDevice())
591		, m_queueFamilyIndex	(context.getUniversalQueueFamilyIndex())
592		, m_queue				(context.getUniversalQueue())
593		, m_allocator			(context.getDefaultAllocator())
594		, m_extensions			(context.getDeviceExtensions())
595		{}
596
597	virtual ~ContextDeviceHelper () {}
598
599	const DeviceInterface&			getDeviceInterface	(void) const override	{ return m_deviceInterface;		}
600	VkDevice						getDevice			(void) const override	{ return m_device;				}
601	uint32_t						getQueueFamilyIndex	(void) const override	{ return m_queueFamilyIndex;	}
602	VkQueue							getQueue			(void) const override	{ return m_queue;				}
603	Allocator&						getAllocator		(void) const override	{ return m_allocator;			}
604	const std::vector<std::string>&	getDeviceExtensions	(void) const override	{ return m_extensions;			}
605
606protected:
607	const DeviceInterface&		m_deviceInterface;
608	const VkDevice				m_device;
609	const uint32_t				m_queueFamilyIndex;
610	const VkQueue				m_queue;
611	Allocator&					m_allocator;
612	std::vector<std::string>	m_extensions;
613};
614
615// This one creates a new device with VK_NV_shading_rate_image.
616class ShadingRateImageDeviceHelper : public DeviceHelper
617{
618public:
619	ShadingRateImageDeviceHelper (Context& context)
620	{
621		const auto&	vkp				= context.getPlatformInterface();
622		const auto&	vki				= context.getInstanceInterface();
623		const auto	instance		= context.getInstance();
624		const auto	physicalDevice	= context.getPhysicalDevice();
625		const auto	queuePriority	= 1.0f;
626
627		// Queue index first.
628		m_queueFamilyIndex = context.getUniversalQueueFamilyIndex();
629
630		// Create a universal queue that supports graphics and compute.
631		const VkDeviceQueueCreateInfo queueParams =
632		{
633			VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,	// VkStructureType				sType;
634			DE_NULL,									// const void*					pNext;
635			0u,											// VkDeviceQueueCreateFlags		flags;
636			m_queueFamilyIndex,							// deUint32						queueFamilyIndex;
637			1u,											// deUint32						queueCount;
638			&queuePriority								// const float*					pQueuePriorities;
639		};
640
641		const char* extensions[] =
642		{
643			"VK_NV_shading_rate_image",
644		};
645		m_extensions.push_back("VK_NV_shading_rate_image");
646
647#ifndef CTS_USES_VULKANSC
648		VkPhysicalDeviceShadingRateImageFeaturesNV	shadingRateImageFeatures	= initVulkanStructure();
649		VkPhysicalDeviceFeatures2					features2					= initVulkanStructure(&shadingRateImageFeatures);
650
651		vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
652#endif // CTS_USES_VULKANSC
653
654		const VkDeviceCreateInfo deviceCreateInfo =
655		{
656			VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,					//sType;
657#ifndef CTS_USES_VULKANSC
658			&features2,												//pNext;
659#else
660			DE_NULL,
661#endif // CTS_USES_VULKANSC
662			0u,														//flags
663			1u,														//queueRecordCount;
664			&queueParams,											//pRequestedQueues;
665			0u,														//layerCount;
666			nullptr,												//ppEnabledLayerNames;
667			static_cast<uint32_t>(de::arrayLength(extensions)),		// deUint32							enabledExtensionCount;
668			extensions,												// const char* const*				ppEnabledExtensionNames;
669			nullptr,												//pEnabledFeatures;
670		};
671
672		m_device	= createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(), vkp, instance, vki, physicalDevice, &deviceCreateInfo);
673		m_vkd		.reset(new DeviceDriver(vkp, instance, m_device.get(), context.getUsedApiVersion()));
674		m_queue		= getDeviceQueue(*m_vkd, *m_device, m_queueFamilyIndex, 0u);
675		m_allocator	.reset(new SimpleAllocator(*m_vkd, m_device.get(), getPhysicalDeviceMemoryProperties(vki, physicalDevice)));
676	}
677
678	virtual ~ShadingRateImageDeviceHelper () {}
679
680	const DeviceInterface&			getDeviceInterface	(void) const override	{ return *m_vkd;				}
681	VkDevice						getDevice			(void) const override	{ return m_device.get();		}
682	uint32_t						getQueueFamilyIndex	(void) const override	{ return m_queueFamilyIndex;	}
683	VkQueue							getQueue			(void) const override	{ return m_queue;				}
684	Allocator&						getAllocator		(void) const override	{ return *m_allocator;			}
685	const std::vector<std::string>&	getDeviceExtensions	(void) const override	{ return m_extensions;			}
686
687protected:
688	Move<VkDevice>						m_device;
689	std::unique_ptr<DeviceDriver>		m_vkd;
690	deUint32							m_queueFamilyIndex;
691	VkQueue								m_queue;
692	std::unique_ptr<SimpleAllocator>	m_allocator;
693	std::vector<std::string>			m_extensions;
694};
695
696std::unique_ptr<DeviceHelper> g_shadingRateDeviceHelper;
697std::unique_ptr<DeviceHelper> g_contextDeviceHelper;
698
699DeviceHelper& getDeviceHelper(Context& context, VkDynamicState dynamicState)
700{
701	const auto& stateInfo = getDynamicStateInfo(dynamicState);
702
703	if (de::contains(stateInfo.requirements.begin(), stateInfo.requirements.end(), "VK_NV_shading_rate_image"))
704	{
705		if (!g_shadingRateDeviceHelper)
706			g_shadingRateDeviceHelper.reset(new ShadingRateImageDeviceHelper(context));
707		return *g_shadingRateDeviceHelper;
708	}
709
710	if (!g_contextDeviceHelper)
711		g_contextDeviceHelper.reset(new ContextDeviceHelper(context));
712	return *g_contextDeviceHelper;
713}
714
715// Returns the set of auxiliary data needed to set a given state.
716de::MovePtr<DynamicStateData> getDynamicStateData (Context& ctx, VkDevice device, VkDynamicState state, PipelineConstructionType pipelineConstructionType)
717{
718	// Create vertex buffer for VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT.
719	if (state == VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)
720		return de::MovePtr<DynamicStateData>(new BindVertexBuffersData(ctx, device, pipelineConstructionType));
721
722	// null pointer normally.
723	return de::MovePtr<DynamicStateData>();
724}
725
726enum class OperType		{ COMPUTE = 0,	TRANSFER	};
727enum class WhenToSet	{ BEFORE = 0,	AFTER		};
728
729// Set dynamic state before or after attempting to run a compute or transfer operation.
730struct TestParams
731{
732	OperType					operationType;
733	WhenToSet					whenToSet;
734	std::vector<VkDynamicState>	states;
735};
736
737class DynamicStateComputeCase : public vkt::TestCase
738{
739public:
740
741							DynamicStateComputeCase		(tcu::TestContext& testCtx, const std::string& name, const TestParams& params, PipelineConstructionType pipelineConstructionType);
742	virtual					~DynamicStateComputeCase	(void) {}
743
744	virtual void			checkSupport				(Context& context) const;
745	virtual void			initPrograms				(vk::SourceCollections& programCollection) const;
746	virtual TestInstance*	createInstance				(Context& context) const;
747
748protected:
749	TestParams					m_params;
750	PipelineConstructionType	m_pipelineConstructionType;
751};
752
753class DynamicStateComputeInstance : public vkt::TestInstance
754{
755public:
756								DynamicStateComputeInstance		(Context& context, const TestParams& params, PipelineConstructionType pipelineConstructionType);
757	virtual						~DynamicStateComputeInstance	(void) {}
758
759	virtual tcu::TestStatus		iterate							(void);
760
761protected:
762	tcu::TestStatus				iterateTransfer					(void);
763	tcu::TestStatus				iterateCompute					(void);
764
765	TestParams					m_params;
766	PipelineConstructionType	m_pipelineConstructionType;
767};
768
769DynamicStateComputeCase::DynamicStateComputeCase(tcu::TestContext& testCtx, const std::string& name, const TestParams& params, PipelineConstructionType pipelineConstructionType)
770	: vkt::TestCase					(testCtx, name)
771	, m_params						(params)
772	, m_pipelineConstructionType	(pipelineConstructionType)
773{}
774
775DynamicStateComputeInstance::DynamicStateComputeInstance (Context& context, const TestParams& params, PipelineConstructionType pipelineConstructionType)
776	: vkt::TestInstance				(context)
777	, m_params						(params)
778	, m_pipelineConstructionType	(pipelineConstructionType)
779{}
780
781void DynamicStateComputeCase::checkSupport (Context& context) const
782{
783	checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(), m_pipelineConstructionType);
784
785	// Check required functionalities.
786	for (const auto& state : m_params.states)
787	{
788		const auto stateInfo = getDynamicStateInfo(state);
789		for (const auto& functionality : stateInfo.requirements)
790			context.requireDeviceFunctionality(functionality);
791	}
792}
793
794void DynamicStateComputeCase::initPrograms (vk::SourceCollections& programCollection) const
795{
796	if (m_params.operationType == OperType::COMPUTE)
797	{
798		std::ostringstream comp;
799		comp
800			<< "#version 450\n"
801			<< "\n"
802			<< "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
803			<< "\n"
804			<< "layout (push_constant, std430) uniform PushConstants {\n"
805			<< "	uint valueIndex;\n"
806			<< "} pc;\n"
807			<< "\n"
808			<< "layout (set=0, binding=0, std430) buffer OutputBlock {\n"
809			<< "	uint value[];\n"
810			<< "} ob;\n"
811			<< "\n"
812			<< "void main ()\n"
813			<< "{\n"
814			<< "	ob.value[pc.valueIndex] = 1u;\n"
815			<< "}\n"
816			;
817
818		programCollection.glslSources.add("comp") << glu::ComputeSource(comp.str());
819	}
820
821	if (de::contains(begin(m_params.states), end(m_params.states), VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT))
822	{
823		// Passthrough vertex shader for stand-in graphics pipeline.
824		std::ostringstream vert;
825		vert
826			<< "#version 450\n"
827			<< "layout (location=0) in vec4 inVertex;\n"
828			<< "void main() {\n"
829			<< "    gl_Position = inVertex;\n"
830			<< "}\n"
831			;
832
833		programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
834	}
835}
836
837vkt::TestInstance* DynamicStateComputeCase::createInstance (Context& context) const
838{
839	return new DynamicStateComputeInstance(context, m_params, m_pipelineConstructionType);
840}
841
842tcu::TestStatus DynamicStateComputeInstance::iterate (void)
843{
844	if (m_params.operationType == OperType::COMPUTE)
845		return iterateCompute();
846	else
847		return iterateTransfer();
848}
849
850void fillBuffer(const DeviceInterface& vkd, VkDevice device, BufferWithMemory& buffer, const std::vector<deUint32> &values)
851{
852	auto& alloc = buffer.getAllocation();
853
854	deMemcpy(alloc.getHostPtr(), values.data(), de::dataSize(values));
855	flushAlloc(vkd, device, alloc);
856}
857
858tcu::TestStatus DynamicStateComputeInstance::iterateTransfer (void)
859{
860	const auto&	vki			= m_context.getInstanceInterface();
861	const auto	phyDev		= m_context.getPhysicalDevice();
862	auto&		devHelper	= getDeviceHelper(m_context, m_params.states.at(0));
863	const auto&	vkd			= devHelper.getDeviceInterface();
864	const auto	device		= devHelper.getDevice();
865	const auto	qIndex		= devHelper.getQueueFamilyIndex();
866	const auto	queue		= devHelper.getQueue();
867	auto&		alloc		= devHelper.getAllocator();
868
869	const auto	cmdPool			= makeCommandPool(vkd, device, qIndex);
870	const auto	cmdBufferPtr	= allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
871	const auto	cmdBuffer		= cmdBufferPtr.get();
872
873	// Prepare two host-visible buffers for a transfer operation, with one element per dynamic state.
874	const deUint32		seqStart	= 1611747605u;
875
876	DE_ASSERT(!m_params.states.empty());
877	std::vector<deUint32>		srcValues(m_params.states.size());
878	const decltype(srcValues)	dstValues(srcValues.size(), 0u);
879	std::iota(begin(srcValues), end(srcValues), seqStart);
880
881	const auto			elemSize	= static_cast<VkDeviceSize>(sizeof(decltype(srcValues)::value_type));
882	const auto			dataSize	= static_cast<VkDeviceSize>(de::dataSize(srcValues));
883	const auto			bufferSize	= de::roundUp(dataSize, getPhysicalDeviceProperties(vki, phyDev).limits.nonCoherentAtomSize);
884	const auto			srcInfo		= makeBufferCreateInfo(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
885	const auto			dstInfo		= makeBufferCreateInfo(bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
886	BufferWithMemory	srcBuffer	(vkd, device, alloc, srcInfo, MemoryRequirement::HostVisible);
887	BufferWithMemory	dstBuffer	(vkd, device, alloc, dstInfo, MemoryRequirement::HostVisible);
888
889	// Fill source and destination buffer.
890	fillBuffer(vkd, device, srcBuffer, srcValues);
891	fillBuffer(vkd, device, dstBuffer, dstValues);
892
893	beginCommandBuffer(vkd, cmdBuffer);
894
895	// We need to preserve dynamic state data until the command buffer has run.
896	std::vector<de::MovePtr<DynamicStateData>> statesData;
897
898	for (size_t stateIdx = 0; stateIdx < m_params.states.size(); ++stateIdx)
899	{
900		// Get extra data needed for using the dynamic state.
901		const auto	offset		= elemSize * stateIdx;
902		const auto&	state		= m_params.states[stateIdx];
903		const auto	stateInfo	= getDynamicStateInfo(state);
904		statesData.push_back(getDynamicStateData(m_context, device, state, m_pipelineConstructionType));
905
906		// Record command if before.
907		if (m_params.whenToSet == WhenToSet::BEFORE)
908			stateInfo.recorder(&vkd, cmdBuffer, statesData.back().get());
909
910		// Transfer op (copy one buffer element per dynamic state).
911		const VkBufferCopy region = { offset, offset, elemSize };
912		vkd.cmdCopyBuffer(cmdBuffer, srcBuffer.get(), dstBuffer.get(), 1u, &region);
913
914		// Record command if after.
915		if (m_params.whenToSet == WhenToSet::AFTER)
916			stateInfo.recorder(&vkd, cmdBuffer, statesData.back().get());
917	}
918
919	const auto barrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
920	vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u, &barrier, 0u, nullptr, 0u, nullptr);
921
922	endCommandBuffer(vkd, cmdBuffer);
923	submitCommandsAndWait(vkd, device, queue, cmdBuffer);
924
925	// Invalidate alloc and check destination buffer.
926	auto& dstBufferAlloc = dstBuffer.getAllocation();
927	invalidateAlloc(vkd, device, dstBufferAlloc);
928
929	decltype(srcValues) results (srcValues.size());
930	deMemcpy(results.data(), dstBufferAlloc.getHostPtr(), de::dataSize(srcValues));
931
932	for (size_t valueIdx = 0; valueIdx < srcValues.size(); ++valueIdx)
933	{
934		const auto& orig	= srcValues[valueIdx];
935		const auto& res		= results[valueIdx];
936
937		if (orig != res)
938		{
939			std::ostringstream msg;
940			msg << "Unexpected value found in destination buffer at position " << valueIdx << " (found=" << res << " expected=" << orig << ")";
941			TCU_FAIL(msg.str());
942		}
943	}
944
945	return tcu::TestStatus::pass("Pass");
946}
947
948tcu::TestStatus DynamicStateComputeInstance::iterateCompute (void)
949{
950	const auto&	vki			= m_context.getInstanceInterface();
951	const auto	phyDev		= m_context.getPhysicalDevice();
952	auto&		devHelper	= getDeviceHelper(m_context, m_params.states.at(0));
953	const auto&	vkd			= devHelper.getDeviceInterface();
954	const auto	device		= devHelper.getDevice();
955	const auto	qIndex		= devHelper.getQueueFamilyIndex();
956	const auto	queue		= devHelper.getQueue();
957	auto&		alloc		= devHelper.getAllocator();
958
959	const auto	cmdPool			= makeCommandPool(vkd, device, qIndex);
960	const auto	cmdBufferPtr	= allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
961	const auto	cmdBuffer		= cmdBufferPtr.get();
962
963	DescriptorSetLayoutBuilder setLayoutBuilder;
964	setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT);
965	const auto	setLayout		= setLayoutBuilder.build(vkd, device);
966
967	// Push constants.
968	const deUint32	pcSize		= static_cast<deUint32>(sizeof(deUint32));
969	const auto		pcRange		= makePushConstantRange(VK_SHADER_STAGE_COMPUTE_BIT, 0u, pcSize);
970
971	// Pipeline.
972	const VkPipelineLayoutCreateInfo layoutInfo =
973	{
974		VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,	//	VkStructureType					sType;
975		nullptr,										//	const void*						pNext;
976		0u,												//	VkPipelineLayoutCreateFlags		flags;
977		1u,												//	deUint32						setLayoutCount;
978		&setLayout.get(),								//	const VkDescriptorSetLayout*	pSetLayouts;
979		1u,												//	deUint32						pushConstantRangeCount;
980		&pcRange,										//	const VkPushConstantRange*		pPushConstantRanges;
981	};
982	const auto pipelineLayout = createPipelineLayout(vkd, device, &layoutInfo);
983
984	const auto shaderModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get("comp"), 0u);
985
986	const VkPipelineShaderStageCreateInfo shaderStageInfo =
987	{
988		VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	//	VkStructureType						sType;
989		nullptr,												//	const void*							pNext;
990		0u,														//	VkPipelineShaderStageCreateFlags	flags;
991		VK_SHADER_STAGE_COMPUTE_BIT,							//	VkShaderStageFlagBits				stage;
992		shaderModule.get(),										//	VkShaderModule						module;
993		"main",													//	const char*							pName;
994		nullptr,												//	const VkSpecializationInfo*			pSpecializationInfo;
995	};
996
997	const VkComputePipelineCreateInfo pipelineInfo =
998	{
999		VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,	//	VkStructureType					sType;
1000		nullptr,										//	const void*						pNext;
1001		0u,												//	VkPipelineCreateFlags			flags;
1002		shaderStageInfo,								//	VkPipelineShaderStageCreateInfo	stage;
1003		pipelineLayout.get(),							//	VkPipelineLayout				layout;
1004		DE_NULL,										//	VkPipeline						basePipelineHandle;
1005		0,												//	deInt32							basePipelineIndex;
1006	};
1007	const auto pipeline = createComputePipeline(vkd, device, DE_NULL, &pipelineInfo);
1008
1009	DE_ASSERT(!m_params.states.empty());
1010
1011	// Output buffer with one value per state.
1012	std::vector<deUint32>	bufferData			(m_params.states.size(), 0u);
1013	const auto				dataSize			(de::dataSize(bufferData));
1014	const auto				outputBufferSize	= de::roundUp(static_cast<VkDeviceSize>(dataSize), getPhysicalDeviceProperties(vki, phyDev).limits.nonCoherentAtomSize);
1015	const auto				bufferCreateInfo	= makeBufferCreateInfo(outputBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
1016
1017	BufferWithMemory		outputBuffer		(vkd, device, alloc, bufferCreateInfo, MemoryRequirement::HostVisible);
1018	auto&					outputBufferAlloc	= outputBuffer.getAllocation();
1019	auto					outputBufferPtr		= outputBufferAlloc.getHostPtr();
1020
1021	deMemcpy(outputBufferPtr, bufferData.data(), dataSize);
1022	flushAlloc(vkd, device, outputBufferAlloc);
1023
1024	// Descriptor set.
1025	DescriptorPoolBuilder poolBuilder;
1026	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1027	const auto descriptorPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1028
1029	const auto descriptorSet = makeDescriptorSet(vkd, device, descriptorPool.get(), setLayout.get());
1030
1031	const auto bufferInfo = makeDescriptorBufferInfo(outputBuffer.get(), 0ull, outputBufferSize);
1032	DescriptorSetUpdateBuilder updateBuilder;
1033	updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferInfo);
1034	updateBuilder.update(vkd, device);
1035
1036	// Record and submit.
1037	beginCommandBuffer(vkd, cmdBuffer);
1038
1039	// We need to preserve dynamic state data until the command buffer has run.
1040	std::vector<de::MovePtr<DynamicStateData>> statesData;
1041
1042	for (size_t stateIdx = 0; stateIdx < m_params.states.size(); ++stateIdx)
1043	{
1044		// Objects needed to set the dynamic state.
1045		auto		state		= m_params.states[stateIdx];
1046		if (vk::isConstructionTypeShaderObject(m_pipelineConstructionType))
1047		{
1048			if (state == vk::VK_DYNAMIC_STATE_VIEWPORT)
1049				state = vk::VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT;
1050			if (state == vk::VK_DYNAMIC_STATE_SCISSOR)
1051				state = vk::VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT;
1052		}
1053
1054		const auto	stateInfo	= getDynamicStateInfo(state);
1055		statesData.push_back(getDynamicStateData(m_context, device, state, m_pipelineConstructionType));
1056
1057		if (m_params.whenToSet == WhenToSet::BEFORE)
1058			stateInfo.recorder(&vkd, cmdBuffer, statesData.back().get());
1059
1060		vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.get());
1061		vkd.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout.get(), 0u, 1u, &descriptorSet.get(), 0u, nullptr);
1062		{
1063			// Each state will write to a different buffer position.
1064			const deUint32 pcData = static_cast<deUint32>(stateIdx);
1065			vkd.cmdPushConstants(cmdBuffer, pipelineLayout.get(), VK_SHADER_STAGE_COMPUTE_BIT, 0u, pcSize, &pcData);
1066		}
1067		vkd.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
1068
1069		if (m_params.whenToSet == WhenToSet::AFTER)
1070			stateInfo.recorder(&vkd, cmdBuffer, statesData.back().get());
1071	}
1072
1073	// Barrier to read buffer contents.
1074	const auto barrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
1075	vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u, &barrier, 0u, nullptr, 0u, nullptr);
1076
1077	endCommandBuffer(vkd, cmdBuffer);
1078	submitCommandsAndWait(vkd, device, queue, cmdBuffer);
1079
1080	// Read and verify buffer contents.
1081	invalidateAlloc(vkd, device, outputBufferAlloc);
1082	deMemcpy(bufferData.data(), outputBufferPtr, dataSize);
1083
1084	for (size_t idx = 0u; idx < bufferData.size(); ++idx)
1085	{
1086		if (bufferData[idx] != 1u)
1087		{
1088			std::ostringstream msg;
1089			msg << "Unexpected value found at buffer position " << idx << ": " << bufferData[idx];
1090			TCU_FAIL(msg.str());
1091		}
1092	}
1093
1094	return tcu::TestStatus::pass("Pass");
1095}
1096
1097std::string getDynamicStateBriefName (VkDynamicState state)
1098{
1099	const auto fullName		= de::toString(state);
1100	const auto prefixLen	= strlen("VK_DYNAMIC_STATE_");
1101
1102	return de::toLower(fullName.substr(prefixLen));
1103}
1104
1105} // anonymous
1106
1107tcu::TestCaseGroup* createDynamicStateComputeTests (tcu::TestContext& testCtx, vk::PipelineConstructionType pipelineConstructionType)
1108{
1109	using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
1110
1111	// Dynamic state mixed with compute and transfer operations
1112	GroupPtr mainGroup(new tcu::TestCaseGroup(testCtx, "compute_transfer"));
1113
1114	const struct
1115	{
1116		OperType	operationType;
1117		const char*	name;
1118	} operations[] =
1119	{
1120		{	OperType::COMPUTE,	"compute"	},
1121		{	OperType::TRANSFER,	"transfer"	},
1122	};
1123
1124	const struct
1125	{
1126		WhenToSet	when;
1127		const char*	name;
1128	} moments[] =
1129	{
1130		{	WhenToSet::BEFORE,	"before"	},
1131		{	WhenToSet::AFTER,	"after"		},
1132	};
1133
1134	// Tests with a single dynamic state.
1135	{
1136		GroupPtr singleStateGroup(new tcu::TestCaseGroup(testCtx, "single"));
1137
1138		for (int operIdx = 0; operIdx < DE_LENGTH_OF_ARRAY(operations); ++operIdx)
1139		{
1140			GroupPtr operationGroup(new tcu::TestCaseGroup(testCtx, operations[operIdx].name));
1141
1142			for (int stateIdx = 0; stateIdx < DE_LENGTH_OF_ARRAY(dynamicStateList); ++stateIdx)
1143			{
1144				const auto	state		= dynamicStateList[stateIdx];
1145				const auto	stateName	= getDynamicStateBriefName(state);
1146
1147				GroupPtr stateGroup(new tcu::TestCaseGroup(testCtx, stateName.c_str()));
1148
1149				for (int momentIdx = 0; momentIdx < DE_LENGTH_OF_ARRAY(moments); ++momentIdx)
1150				{
1151					const TestParams testParams =
1152					{
1153						operations[operIdx].operationType,		//	OperType					operationType;
1154						moments[momentIdx].when,				//	WhenToSet					whenToSet;
1155						std::vector<VkDynamicState>(1, state),	//	std::vector<VkDynamicState>	state;
1156					};
1157
1158					stateGroup->addChild(new DynamicStateComputeCase(testCtx, moments[momentIdx].name, testParams, pipelineConstructionType));
1159				}
1160
1161				operationGroup->addChild(stateGroup.release());
1162			}
1163
1164			singleStateGroup->addChild(operationGroup.release());
1165		}
1166
1167		mainGroup->addChild(singleStateGroup.release());
1168	}
1169
1170	// A few tests with several dynamic states.
1171	{
1172		GroupPtr multiStateGroup(new tcu::TestCaseGroup(testCtx, "multi"));
1173
1174		for (int operIdx = 0; operIdx < DE_LENGTH_OF_ARRAY(operations); ++operIdx)
1175		{
1176			GroupPtr operationGroup(new tcu::TestCaseGroup(testCtx, operations[operIdx].name));
1177
1178			for (int momentIdx = 0; momentIdx < DE_LENGTH_OF_ARRAY(moments); ++momentIdx)
1179			{
1180				TestParams testParams =
1181				{
1182					operations[operIdx].operationType,	//	OperType					operationType;
1183					moments[momentIdx].when,			//	WhenToSet					whenToSet;
1184					std::vector<VkDynamicState>(),		//	std::vector<VkDynamicState>	states;
1185				};
1186
1187				// Use the basic states so as not to introduce extra requirements.
1188				for (int stateIdx = 0; stateIdx < DE_LENGTH_OF_ARRAY(dynamicStateList); ++stateIdx)
1189				{
1190					testParams.states.push_back(dynamicStateList[stateIdx]);
1191					if (dynamicStateList[stateIdx] == VK_DYNAMIC_STATE_STENCIL_REFERENCE)
1192						break;
1193				}
1194
1195				operationGroup->addChild(new DynamicStateComputeCase(testCtx, moments[momentIdx].name, testParams, pipelineConstructionType));
1196			}
1197
1198			multiStateGroup->addChild(operationGroup.release());
1199		}
1200
1201		mainGroup->addChild(multiStateGroup.release());
1202	}
1203
1204	return mainGroup.release();
1205}
1206
1207void cleanupDevice()
1208{
1209	g_shadingRateDeviceHelper.reset(nullptr);
1210	g_contextDeviceHelper.reset(nullptr);
1211}
1212
1213} // DynamicState
1214} // vkt
1215