1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2023 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20 /*!
21 * \file
22 * \brief Video decoding tests
23 */
24 /*--------------------------------------------------------------------*/
25
26 #include "vktVideoDecodeTests.hpp"
27 #include "vktVideoTestUtils.hpp"
28 #include "vkBarrierUtil.hpp"
29 #include "vkObjUtil.hpp"
30
31 #include "tcuFunctionLibrary.hpp"
32 #include "tcuPlatform.hpp"
33 #include "tcuTestLog.hpp"
34
35 #include "vkCmdUtil.hpp"
36 #include "vkDefs.hpp"
37 #include "vkImageWithMemory.hpp"
38 #include "tcuCommandLine.hpp"
39
40 #include "vktVideoClipInfo.hpp"
41
42 #include <deDefs.h>
43
44 #ifdef DE_BUILD_VIDEO
45 #include "extESExtractor.hpp"
46 #include "vktVideoBaseDecodeUtils.hpp"
47
48 #include "extNvidiaVideoParserIf.hpp"
49 // FIXME: The samples repo is missing this internal include from their H265 decoder
50 #include "nvVulkanh265ScalingList.h"
51 #include <VulkanH264Decoder.h>
52 #include <VulkanH265Decoder.h>
53
54 #include <utility>
55 #endif
56
57 namespace vkt
58 {
59 namespace video
60 {
61
62 // Set this to 1 to have the decoded YCbCr frames written to the
63 // filesystem in the YV12 format.
64 // Check the relevant sections to change the file name and so on...
65 #define FRAME_DUMP_DEBUG 0
66
67 namespace
68 {
69 using namespace vk;
70 using namespace std;
71
72 using de::MovePtr;
73
74 enum TestType
75 {
76 TEST_TYPE_H264_DECODE_I, // Case 6
77 TEST_TYPE_H264_DECODE_I_P, // Case 7
78 TEST_TYPE_H264_DECODE_CLIP_A,
79 TEST_TYPE_H264_DECODE_I_P_B_13, // Case 7a
80 TEST_TYPE_H264_DECODE_I_P_NOT_MATCHING_ORDER, // Case 8
81 TEST_TYPE_H264_DECODE_I_P_B_13_NOT_MATCHING_ORDER, // Case 8a
82 TEST_TYPE_H264_DECODE_QUERY_RESULT_WITH_STATUS, // Case 9
83 TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE, // Case 17
84 TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE_DPB, // Case 18
85 TEST_TYPE_H264_DECODE_INTERLEAVED, // Case 21
86 TEST_TYPE_H264_BOTH_DECODE_ENCODE_INTERLEAVED, // Case 23 TODO
87 TEST_TYPE_H264_H265_DECODE_INTERLEAVED, // Case 24
88
89 TEST_TYPE_H265_DECODE_I, // Case 15
90 TEST_TYPE_H265_DECODE_I_P, // Case 16
91 TEST_TYPE_H265_DECODE_CLIP_D,
92 TEST_TYPE_H265_DECODE_I_P_NOT_MATCHING_ORDER, // Case 16-2
93 TEST_TYPE_H265_DECODE_I_P_B_13, // Case 16-3
94 TEST_TYPE_H265_DECODE_I_P_B_13_NOT_MATCHING_ORDER, // Case 16-4
95
96 TEST_TYPE_LAST
97 };
98
getTestName(TestType type)99 const char* getTestName(TestType type)
100 {
101 const char* testName;
102 switch (type)
103 {
104 case TEST_TYPE_H264_DECODE_I:
105 testName = "h264_i";
106 break;
107 case TEST_TYPE_H264_DECODE_I_P:
108 testName = "h264_i_p";
109 break;
110 case TEST_TYPE_H264_DECODE_CLIP_A:
111 testName = "h264_420_8bit_high_176x144_30frames";
112 break;
113 case TEST_TYPE_H264_DECODE_I_P_NOT_MATCHING_ORDER:
114 testName = "h264_i_p_not_matching_order";
115 break;
116 case TEST_TYPE_H264_DECODE_I_P_B_13:
117 testName = "h264_i_p_b_13";
118 break;
119 case TEST_TYPE_H264_DECODE_I_P_B_13_NOT_MATCHING_ORDER:
120 testName = "h264_i_p_b_13_not_matching_order";
121 break;
122 case TEST_TYPE_H264_DECODE_QUERY_RESULT_WITH_STATUS:
123 testName = "h264_query_with_status";
124 break;
125 case TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE:
126 testName = "h264_resolution_change";
127 break;
128 case TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE_DPB:
129 testName = "h264_resolution_change_dpb";
130 break;
131 case TEST_TYPE_H264_DECODE_INTERLEAVED:
132 testName = "h264_interleaved";
133 break;
134 case TEST_TYPE_H264_H265_DECODE_INTERLEAVED:
135 testName = "h264_h265_interleaved";
136 break;
137 case TEST_TYPE_H265_DECODE_I:
138 testName = "h265_i";
139 break;
140 case TEST_TYPE_H265_DECODE_I_P:
141 testName = "h265_i_p";
142 break;
143 case TEST_TYPE_H265_DECODE_CLIP_D:
144 testName = "h265_420_8bit_main_176x144_30frames";
145 break;
146 case TEST_TYPE_H265_DECODE_I_P_NOT_MATCHING_ORDER:
147 testName = "h265_i_p_not_matching_order";
148 break;
149 case TEST_TYPE_H265_DECODE_I_P_B_13:
150 testName = "h265_i_p_b_13";
151 break;
152 case TEST_TYPE_H265_DECODE_I_P_B_13_NOT_MATCHING_ORDER:
153 testName = "h265_i_p_b_13_not_matching_order";
154 break;
155 default:
156 TCU_THROW(InternalError, "Unknown TestType");
157 }
158 return testName;
159 }
160
161
162 enum DecoderOption : deUint32
163 {
164 // The default is to do nothing additional to ordinary playback.
165 Default = 0,
166 // All decode operations will have their status checked for success (Q2 2023: not all vendors support these)
167 UseStatusQueries = 1 << 0,
168 // Do not playback the clip in the "normal fashion", instead cached decode parameters for later process
169 // this is primarily used to support out-of-order submission test cases, and per-GOP handling.
170 CachedDecoding = 1 << 1,
171 // When a parameter object changes the resolution of the test content, and the new video session would otherwise
172 // still be compatible with the last session (for example, larger decode surfaces preceeding smaller decode surfaces,
173 // a frame downsize), force the session to be recreated anyway.
174 RecreateDPBImages = 1 << 2,
175 };
176 static const int ALL_FRAMES = 0;
177
178 struct BaseDecodeParam
179 {
180 ClipName clip;
181 int framesToCheck;
182 DecoderOption decoderOptions;
183 };
184
185 struct DecodeTestParam
186 {
187 TestType type;
188 BaseDecodeParam stream;
189
190 } g_DecodeTests[] = {
191 {TEST_TYPE_H264_DECODE_I, {CLIP_A, 1, DecoderOption::Default}},
192 {TEST_TYPE_H264_DECODE_I_P, {CLIP_A, 2, DecoderOption::Default}},
193 {TEST_TYPE_H264_DECODE_I_P_NOT_MATCHING_ORDER, {CLIP_A, 2, DecoderOption::CachedDecoding}},
194 {TEST_TYPE_H264_DECODE_CLIP_A, {CLIP_A, ALL_FRAMES, DecoderOption::Default}},
195 {TEST_TYPE_H264_DECODE_I_P_B_13, {CLIP_H264_4K_26_IBP_MAIN, ALL_FRAMES, DecoderOption::Default}},
196 {TEST_TYPE_H264_DECODE_I_P_B_13_NOT_MATCHING_ORDER, {CLIP_H264_4K_26_IBP_MAIN, ALL_FRAMES, DecoderOption::CachedDecoding}},
197 {TEST_TYPE_H264_DECODE_QUERY_RESULT_WITH_STATUS, {CLIP_A, ALL_FRAMES, DecoderOption::UseStatusQueries}},
198 {TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE, {CLIP_C, ALL_FRAMES, DecoderOption::Default}},
199 {TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE_DPB, {CLIP_C, ALL_FRAMES, DecoderOption::RecreateDPBImages}},
200
201 {TEST_TYPE_H265_DECODE_I, {CLIP_D, 1, DecoderOption::Default}},
202 {TEST_TYPE_H265_DECODE_I_P, {CLIP_D, 2, DecoderOption::Default}},
203 {TEST_TYPE_H265_DECODE_I_P_NOT_MATCHING_ORDER, {CLIP_D, 2, DecoderOption::CachedDecoding}},
204 {TEST_TYPE_H265_DECODE_I_P_B_13, {CLIP_JELLY_HEVC, ALL_FRAMES, DecoderOption::Default}},
205 {TEST_TYPE_H265_DECODE_I_P_B_13_NOT_MATCHING_ORDER, {CLIP_JELLY_HEVC, ALL_FRAMES, DecoderOption::CachedDecoding}},
206 {TEST_TYPE_H265_DECODE_CLIP_D, {CLIP_D, ALL_FRAMES, DecoderOption::Default}},
207 };
208
209 struct InterleavingDecodeTestParams
210 {
211 TestType type;
212 BaseDecodeParam streamA;
213 BaseDecodeParam streamB;
214 } g_InterleavingTests[] = {
215 {TEST_TYPE_H264_DECODE_INTERLEAVED, {CLIP_A, ALL_FRAMES, DecoderOption::CachedDecoding}, {CLIP_A, ALL_FRAMES, DecoderOption::CachedDecoding}},
216 {TEST_TYPE_H264_H265_DECODE_INTERLEAVED, {CLIP_A, ALL_FRAMES, DecoderOption::CachedDecoding}, {CLIP_D, ALL_FRAMES, DecoderOption::CachedDecoding}},
217 };
218
219 class TestDefinition
220 {
221 public:
create(DecodeTestParam params, deUint32 baseSeed)222 static MovePtr<TestDefinition> create(DecodeTestParam params, deUint32 baseSeed)
223 {
224 return MovePtr<TestDefinition>(new TestDefinition(params, baseSeed));
225 }
226
TestDefinition(DecodeTestParam params, deUint32 baseSeed)227 TestDefinition(DecodeTestParam params, deUint32 baseSeed)
228 : m_params(params)
229 , m_info(clipInfo(params.stream.clip))
230 , m_hash(baseSeed)
231 {
232 m_profile = VkVideoCoreProfile(m_info->profile.codecOperation, m_info->profile.subsamplingFlags, m_info->profile.lumaBitDepth, m_info->profile.chromaBitDepth, m_info->profile.profileIDC);
233 if (m_params.stream.framesToCheck == ALL_FRAMES)
234 {
235 m_params.stream.framesToCheck = m_info->totalFrames;
236 }
237 if (params.type == TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE_DPB)
238 {
239 m_pictureParameterUpdateTriggerHack = 3;
240 }
241 }
242
getTestType() const243 TestType getTestType() const
244 {
245 return m_params.type;
246 }
247
getClipFilename() const248 const char* getClipFilename() const
249 {
250 return m_info->filename;
251 }
252
getClipInfo() const253 const ClipInfo* getClipInfo() const
254 {
255 return m_info;
256 };
257
getCodecOperation() const258 VkVideoCodecOperationFlagBitsKHR getCodecOperation() const
259 {
260 return m_profile.GetCodecType();
261 }
getProfile() const262 const VkVideoCoreProfile* getProfile() const
263 {
264 return &m_profile;
265 }
266
framesToCheck() const267 int framesToCheck() const
268 {
269 return m_params.stream.framesToCheck;
270 }
271
hasOption(DecoderOption o) const272 bool hasOption(DecoderOption o) const
273 {
274 return (m_params.stream.decoderOptions & o) != 0;
275 }
276
getParamaterUpdateHackRequirement() const277 int getParamaterUpdateHackRequirement() const
278 {
279 return m_pictureParameterUpdateTriggerHack;
280 }
281
requiredDeviceFlags() const282 VideoDevice::VideoDeviceFlags requiredDeviceFlags() const
283 {
284 return VideoDevice::VIDEO_DEVICE_FLAG_REQUIRE_SYNC2_OR_NOT_SUPPORTED |
285 (hasOption(DecoderOption::UseStatusQueries) ? VideoDevice::VIDEO_DEVICE_FLAG_QUERY_WITH_STATUS_FOR_DECODE_SUPPORT : VideoDevice::VIDEO_DEVICE_FLAG_NONE);
286 }
287
extensionProperties() const288 const VkExtensionProperties* extensionProperties() const
289 {
290 static const VkExtensionProperties h264StdExtensionVersion = {
291 VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_EXTENSION_NAME, VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_SPEC_VERSION};
292 static const VkExtensionProperties h265StdExtensionVersion = {
293 VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_EXTENSION_NAME, VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_SPEC_VERSION};
294
295 switch (m_profile.GetCodecType())
296 {
297 case VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR:
298 return &h264StdExtensionVersion;
299 case VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR:
300 return &h265StdExtensionVersion;
301 default:
302 tcu::die("Unsupported video codec %s\n", util::codecToName(m_profile.GetCodecType()));
303 break;
304 }
305
306 TCU_THROW(InternalError, "Unsupported codec");
307 };
308
updateHash(deUint32 baseHash)309 void updateHash(deUint32 baseHash)
310 {
311 m_hash = deUint32Hash(baseHash);
312 }
313
314 private:
315 DecodeTestParam m_params;
316 const ClipInfo* m_info{};
317 deUint32 m_hash{};
318 VkVideoCoreProfile m_profile;
319 // The 1-based count of parameter set updates after which to force a parameter object release.
320 // This is required due to the design of the NVIDIA decode-client API. It sends parameter updates and expects constructed parameter
321 // objects back synchronously, before the next video session is created in a following BeginSequence call.
322 int m_pictureParameterUpdateTriggerHack{0}; // Zero is "off"
323 };
324
325
326 // Vulkan video is not supported on android platform
327 // all external libraries, helper functions and test instances has been excluded
328 #ifdef DE_BUILD_VIDEO
329 using VkVideoParser = VkSharedBaseObj<VulkanVideoDecodeParser>;
330
createParser(const TestDefinition* params, VideoBaseDecoder* decoder, VkSharedBaseObj<VulkanVideoDecodeParser>& parser)331 void createParser(const TestDefinition* params, VideoBaseDecoder* decoder, VkSharedBaseObj<VulkanVideoDecodeParser>& parser)
332 {
333 VkVideoCapabilitiesKHR videoCaps{};
334 VkVideoDecodeCapabilitiesKHR videoDecodeCaps{};
335 util::getVideoDecodeCapabilities(*decoder->m_deviceContext, decoder->m_profile, videoCaps, videoDecodeCaps);
336
337 const VkParserInitDecodeParameters pdParams = {
338 NV_VULKAN_VIDEO_PARSER_API_VERSION,
339 dynamic_cast<VkParserVideoDecodeClient*>(decoder),
340 static_cast<deUint32>(2 * 1024 * 1024), // 2MiB is the default bitstream buffer size
341 static_cast<deUint32>(videoCaps.minBitstreamBufferOffsetAlignment),
342 static_cast<deUint32>(videoCaps.minBitstreamBufferSizeAlignment),
343 0,
344 0,
345 nullptr,
346 true,
347 };
348
349 if (videoLoggingEnabled())
350 {
351 tcu::print("Creating a parser with offset alignment=%d and size alignment=%d\n",
352 static_cast<deUint32>(videoCaps.minBitstreamBufferOffsetAlignment),
353 static_cast<deUint32>(videoCaps.minBitstreamBufferSizeAlignment));
354 }
355
356 const VkExtensionProperties* pStdExtensionVersion = params->extensionProperties();
357 DE_ASSERT(pStdExtensionVersion);
358
359 switch (params->getCodecOperation())
360 {
361 case VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR:
362 {
363 if (strcmp(pStdExtensionVersion->extensionName, VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_EXTENSION_NAME) || pStdExtensionVersion->specVersion != VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_SPEC_VERSION)
364 {
365 tcu::die("The requested decoder h.264 Codec STD version is NOT supported. The supported decoder h.264 Codec STD version is version %d of %s\n",
366 VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_SPEC_VERSION,
367 VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_EXTENSION_NAME);
368 }
369 VkSharedBaseObj<VulkanH264Decoder> nvVideoH264DecodeParser(new VulkanH264Decoder(params->getCodecOperation()));
370 parser = nvVideoH264DecodeParser;
371 break;
372 }
373 case VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR:
374 {
375 if (strcmp(pStdExtensionVersion->extensionName, VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_EXTENSION_NAME) || pStdExtensionVersion->specVersion != VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_SPEC_VERSION)
376 {
377 tcu::die("The requested decoder h.265 Codec STD version is NOT supported. The supported decoder h.265 Codec STD version is version %d of %s\n",
378 VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_SPEC_VERSION,
379 VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_EXTENSION_NAME);
380 }
381 VkSharedBaseObj<VulkanH265Decoder> nvVideoH265DecodeParser(new VulkanH265Decoder(params->getCodecOperation()));
382 parser = nvVideoH265DecodeParser;
383 break;
384 }
385 default:
386 TCU_FAIL("Unsupported codec type!");
387 }
388
389 VK_CHECK(parser->Initialize(&pdParams));
390 }
391
decoderFromTestDefinition(DeviceContext* devctx, const TestDefinition& test)392 static MovePtr<VideoBaseDecoder> decoderFromTestDefinition(DeviceContext* devctx, const TestDefinition& test)
393 {
394 VkSharedBaseObj<VulkanVideoFrameBuffer> vkVideoFrameBuffer;
395 VK_CHECK(VulkanVideoFrameBuffer::Create(devctx,
396 test.hasOption(DecoderOption::UseStatusQueries),
397 vkVideoFrameBuffer));
398
399 VideoBaseDecoder::Parameters params;
400 params.profile = test.getProfile();
401 params.context = devctx;
402 params.framebuffer = vkVideoFrameBuffer;
403 params.framesToCheck = test.framesToCheck();
404 params.queryDecodeStatus = test.hasOption(DecoderOption::UseStatusQueries);
405 params.outOfOrderDecoding = test.hasOption(DecoderOption::CachedDecoding);
406 params.alwaysRecreateDPB = test.hasOption(DecoderOption::RecreateDPBImages);
407 params.pictureParameterUpdateTriggerHack = test.getParamaterUpdateHackRequirement();
408
409 return MovePtr<VideoBaseDecoder>(new VideoBaseDecoder(std::move(params)));
410 }
411
412 class FrameProcessor
413 {
414 public:
415 static const int DECODER_QUEUE_SIZE = 6;
416
FrameProcessor(DeviceContext* devctx, const TestDefinition* params, VideoBaseDecoder* decoder, tcu::TestLog& log)417 FrameProcessor(DeviceContext* devctx, const TestDefinition* params, VideoBaseDecoder* decoder, tcu::TestLog& log)
418 : m_devctx(devctx)
419 , m_demuxer(params->getClipFilename(), log)
420 , m_decoder(decoder)
421 , m_frameData(DECODER_QUEUE_SIZE)
422 , m_frameDataIdx(0)
423 {
424 createParser(params, m_decoder, m_parser);
425 for (auto& frame : m_frameData)
426 frame.Reset();
427 }
428
parseNextChunk()429 void parseNextChunk()
430 {
431 deUint8* pData = 0;
432 deInt64 size = 0;
433 bool demuxerSuccess = m_demuxer.Demux(&pData, &size);
434
435 VkParserBitstreamPacket pkt;
436 pkt.pByteStream = pData; // Ptr to byte stream data decode/display event
437 pkt.nDataLength = size; // Data length for this packet
438 pkt.llPTS = 0; // Presentation Time Stamp for this packet (clock rate specified at initialization)
439 pkt.bEOS = !demuxerSuccess; // true if this is an End-Of-Stream packet (flush everything)
440 pkt.bPTSValid = false; // true if llPTS is valid (also used to detect frame boundaries for VC1 SP/MP)
441 pkt.bDiscontinuity = false; // true if DecMFT is signalling a discontinuity
442 pkt.bPartialParsing = 0; // 0: parse entire packet, 1: parse until next
443 pkt.bEOP = false; // true if the packet in pByteStream is exactly one frame
444 pkt.pbSideData = nullptr; // Auxiliary encryption information
445 pkt.nSideDataLength = 0; // Auxiliary encrypton information length
446
447 size_t parsedBytes = 0;
448 const bool parserSuccess = m_parser->ParseByteStream(&pkt, &parsedBytes);
449 if (videoLoggingEnabled())
450 std::cout << "Parsed " << parsedBytes << " bytes from bitstream" << std::endl;
451
452 m_videoStreamHasEnded = !(demuxerSuccess && parserSuccess);
453 }
454
getNextFrame(DecodedFrame* pFrame)455 int getNextFrame(DecodedFrame* pFrame)
456 {
457 // The below call to DequeueDecodedPicture allows returning the next frame without parsing of the stream.
458 // Parsing is only done when there are no more frames in the queue.
459 int32_t framesInQueue = m_decoder->GetVideoFrameBuffer()->DequeueDecodedPicture(pFrame);
460
461 // Loop until a frame (or more) is parsed and added to the queue.
462 while ((framesInQueue == 0) && !m_videoStreamHasEnded)
463 {
464 parseNextChunk();
465 framesInQueue = m_decoder->GetVideoFrameBuffer()->DequeueDecodedPicture(pFrame);
466 }
467
468 if ((framesInQueue == 0) && m_videoStreamHasEnded)
469 {
470 return -1;
471 }
472
473 return framesInQueue;
474 }
475
decodeFrame()476 const DecodedFrame* decodeFrame()
477 {
478 auto& vk = m_devctx->getDeviceDriver();
479 auto device = m_devctx->device;
480 DecodedFrame* pLastDecodedFrame = &m_frameData[m_frameDataIdx];
481
482 // Make sure the frame complete fence signaled (video frame is processed) before returning the frame.
483 if (pLastDecodedFrame->frameCompleteFence != VK_NULL_HANDLE)
484 {
485 VK_CHECK(vk.waitForFences(device, 1, &pLastDecodedFrame->frameCompleteFence, true, TIMEOUT_100ms));
486 VK_CHECK(vk.getFenceStatus(device, pLastDecodedFrame->frameCompleteFence));
487 }
488
489 m_decoder->ReleaseDisplayedFrame(pLastDecodedFrame);
490 pLastDecodedFrame->Reset();
491
492 TCU_CHECK_MSG(getNextFrame(pLastDecodedFrame) > 0, "Unexpected decode result");
493 TCU_CHECK_MSG(pLastDecodedFrame, "Unexpected decode result");
494
495 if (videoLoggingEnabled())
496 std::cout << "<= Wait on picIdx: " << pLastDecodedFrame->pictureIndex
497 << "\t\tdisplayWidth: " << pLastDecodedFrame->displayWidth
498 << "\t\tdisplayHeight: " << pLastDecodedFrame->displayHeight
499 << "\t\tdisplayOrder: " << pLastDecodedFrame->displayOrder
500 << "\tdecodeOrder: " << pLastDecodedFrame->decodeOrder
501 << "\ttimestamp " << pLastDecodedFrame->timestamp
502 << "\tdstImageView " << (pLastDecodedFrame->outputImageView ? pLastDecodedFrame->outputImageView->GetImageResource()->GetImage() : VK_NULL_HANDLE)
503 << std::endl;
504
505 m_frameDataIdx = (m_frameDataIdx + 1) % m_frameData.size();
506 return pLastDecodedFrame;
507 }
508
bufferFrames(int framesToDecode)509 void bufferFrames(int framesToDecode)
510 {
511 // This loop is for the out-of-order submissions cases. First all the frame information is gathered from the parser<->decoder loop
512 // then the command buffers are recorded in a random order, as well as the queue submissions, depending on the configuration of
513 // the test.
514 // NOTE: For this sequence to work, the frame buffer must have enough decode surfaces for the GOP intended for decode, otherwise
515 // picture allocation will fail pretty quickly! See m_numDecodeSurfaces, m_maxDecodeFramesCount
516 // The previous CTS cases were not actually randomizing the queue submission order (despite claiming too!)
517 DE_ASSERT(m_decoder->m_outOfOrderDecoding);
518 do
519 {
520 parseNextChunk();
521 size_t decodedFrames = m_decoder->GetVideoFrameBuffer()->GetDisplayedFrameCount();
522 if (decodedFrames == framesToDecode)
523 break;
524 }
525 while (!m_videoStreamHasEnded);
526 DE_ASSERT(m_decoder->m_cachedDecodeParams.size() == framesToDecode);
527 }
528
getBufferedDisplayCount() const529 int getBufferedDisplayCount() const { return m_decoder->GetVideoFrameBuffer()->GetDisplayedFrameCount(); }
530 private:
531 DeviceContext* m_devctx;
532 ESEDemuxer m_demuxer;
533 VkVideoParser m_parser;
534 VideoBaseDecoder* m_decoder;
535
536 std::vector<DecodedFrame> m_frameData;
537 size_t m_frameDataIdx{};
538
539 bool m_videoStreamHasEnded{false};
540 };
541
getDecodedImage(DeviceContext& devctx, VkImageLayout layout, const DecodedFrame* frame)542 de::MovePtr<vkt::ycbcr::MultiPlaneImageData> getDecodedImage(DeviceContext& devctx,
543 VkImageLayout layout,
544 const DecodedFrame* frame)
545 {
546 auto& vkd = devctx.getDeviceDriver();
547 auto device = devctx.device;
548 auto queueFamilyIndexDecode = devctx.decodeQueueFamilyIdx();
549 auto queueFamilyIndexTransfer = devctx.transferQueueFamilyIdx();
550 const VkExtent2D imageExtent{(deUint32)frame->displayWidth, (deUint32)frame->displayHeight};
551 const VkImage image = frame->outputImageView->GetImageResource()->GetImage();
552 const VkFormat format = frame->outputImageView->GetImageResource()->GetImageCreateInfo().format;
553 const uint32_t videoImageLayerIndex = frame->imageLayerIndex;
554
555 MovePtr<vkt::ycbcr::MultiPlaneImageData> multiPlaneImageData(new vkt::ycbcr::MultiPlaneImageData(format, tcu::UVec2(imageExtent.width, imageExtent.height)));
556 const VkQueue queueDecode = getDeviceQueue(vkd, device, queueFamilyIndexDecode, 0u);
557 const VkQueue queueTransfer = getDeviceQueue(vkd, device, queueFamilyIndexTransfer, 0u);
558 const VkImageSubresourceRange imageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, videoImageLayerIndex, 1);
559 const VkImageMemoryBarrier2KHR imageBarrierDecode = makeImageMemoryBarrier2(VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR,
560 VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR,
561 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR,
562 VK_ACCESS_NONE_KHR,
563 layout,
564 VK_IMAGE_LAYOUT_GENERAL,
565 image,
566 imageSubresourceRange);
567 const VkImageMemoryBarrier2KHR imageBarrierOwnershipDecode = makeImageMemoryBarrier2(VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR,
568 VK_ACCESS_NONE_KHR,
569 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR,
570 VK_ACCESS_NONE_KHR,
571 VK_IMAGE_LAYOUT_GENERAL,
572 VK_IMAGE_LAYOUT_GENERAL,
573 image,
574 imageSubresourceRange,
575 queueFamilyIndexDecode,
576 queueFamilyIndexTransfer);
577 const VkImageMemoryBarrier2KHR imageBarrierOwnershipTransfer = makeImageMemoryBarrier2(VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR,
578 VK_ACCESS_NONE_KHR,
579 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR,
580 VK_ACCESS_NONE_KHR,
581 VK_IMAGE_LAYOUT_GENERAL,
582 VK_IMAGE_LAYOUT_GENERAL,
583 image,
584 imageSubresourceRange,
585 queueFamilyIndexDecode,
586 queueFamilyIndexTransfer);
587 const VkImageMemoryBarrier2KHR imageBarrierTransfer = makeImageMemoryBarrier2(VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR,
588 VK_ACCESS_2_TRANSFER_READ_BIT_KHR,
589 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR,
590 VK_ACCESS_NONE_KHR,
591 VK_IMAGE_LAYOUT_GENERAL,
592 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
593 image,
594 imageSubresourceRange);
595 const Move<VkCommandPool> cmdDecodePool(makeCommandPool(vkd, device, queueFamilyIndexDecode));
596 const Move<VkCommandBuffer> cmdDecodeBuffer(allocateCommandBuffer(vkd, device, *cmdDecodePool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
597 const Move<VkCommandPool> cmdTransferPool(makeCommandPool(vkd, device, queueFamilyIndexTransfer));
598 const Move<VkCommandBuffer> cmdTransferBuffer(allocateCommandBuffer(vkd, device, *cmdTransferPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
599 Move<VkSemaphore> semaphore = createSemaphore(vkd, device);
600 Move<VkFence> decodeFence = createFence(vkd, device);
601 Move<VkFence> transferFence = createFence(vkd, device);
602 VkFence fences[] = {*decodeFence, *transferFence};
603 const VkPipelineStageFlags waitDstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
604 VkSubmitInfo decodeSubmitInfo{
605 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
606 DE_NULL, // const void* pNext;
607 0u, // deUint32 waitSemaphoreCount;
608 DE_NULL, // const VkSemaphore* pWaitSemaphores;
609 DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
610 1u, // deUint32 commandBufferCount;
611 &*cmdDecodeBuffer, // const VkCommandBuffer* pCommandBuffers;
612 1u, // deUint32 signalSemaphoreCount;
613 &*semaphore, // const VkSemaphore* pSignalSemaphores;
614 };
615 if (frame->frameCompleteSemaphore != VK_NULL_HANDLE)
616 {
617 decodeSubmitInfo.waitSemaphoreCount = 1;
618 decodeSubmitInfo.pWaitSemaphores = &frame->frameCompleteSemaphore;
619 decodeSubmitInfo.pWaitDstStageMask = &waitDstStageMask;
620 }
621 const VkSubmitInfo transferSubmitInfo{
622 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
623 DE_NULL, // const void* pNext;
624 1u, // deUint32 waitSemaphoreCount;
625 &*semaphore, // const VkSemaphore* pWaitSemaphores;
626 &waitDstStageMask, // const VkPipelineStageFlags* pWaitDstStageMask;
627 1u, // deUint32 commandBufferCount;
628 &*cmdTransferBuffer, // const VkCommandBuffer* pCommandBuffers;
629 0u, // deUint32 signalSemaphoreCount;
630 DE_NULL, // const VkSemaphore* pSignalSemaphores;
631 };
632
633 DEBUGLOG(std::cout << "getDecodedImage: " << image << " " << layout << std::endl);
634
635 beginCommandBuffer(vkd, *cmdDecodeBuffer, 0u);
636 cmdPipelineImageMemoryBarrier2(vkd, *cmdDecodeBuffer, &imageBarrierDecode);
637 cmdPipelineImageMemoryBarrier2(vkd, *cmdDecodeBuffer, &imageBarrierOwnershipDecode);
638 endCommandBuffer(vkd, *cmdDecodeBuffer);
639
640 beginCommandBuffer(vkd, *cmdTransferBuffer, 0u);
641 cmdPipelineImageMemoryBarrier2(vkd, *cmdTransferBuffer, &imageBarrierOwnershipTransfer);
642 cmdPipelineImageMemoryBarrier2(vkd, *cmdTransferBuffer, &imageBarrierTransfer);
643 endCommandBuffer(vkd, *cmdTransferBuffer);
644
645 VK_CHECK(vkd.queueSubmit(queueDecode, 1u, &decodeSubmitInfo, *decodeFence));
646 VK_CHECK(vkd.queueSubmit(queueTransfer, 1u, &transferSubmitInfo, *transferFence));
647
648 VK_CHECK(vkd.waitForFences(device, DE_LENGTH_OF_ARRAY(fences), fences, DE_TRUE, ~0ull));
649
650 vkt::ycbcr::downloadImage(vkd, device, queueFamilyIndexTransfer, devctx.allocator(), image,
651 multiPlaneImageData.get(), 0, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
652 videoImageLayerIndex);
653
654 const VkImageMemoryBarrier2KHR imageBarrierTransfer2 = makeImageMemoryBarrier2(VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR,
655 VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR,
656 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR,
657 VK_ACCESS_NONE_KHR,
658 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
659 layout,
660 image,
661 imageSubresourceRange);
662
663 vkd.resetCommandBuffer(*cmdTransferBuffer, 0u);
664 vkd.resetFences(device, 1, &*transferFence);
665 beginCommandBuffer(vkd, *cmdTransferBuffer, 0u);
666 cmdPipelineImageMemoryBarrier2(vkd, *cmdTransferBuffer, &imageBarrierTransfer2);
667 endCommandBuffer(vkd, *cmdTransferBuffer);
668
669 const VkSubmitInfo transferSubmitInfo2{
670 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
671 DE_NULL, // const void* pNext;
672 0u, // deUint32 waitSemaphoreCount;
673 DE_NULL, // const VkSemaphore* pWaitSemaphores;
674 DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
675 1u, // deUint32 commandBufferCount;
676 &*cmdTransferBuffer, // const VkCommandBuffer* pCommandBuffers;
677 0u, // deUint32 signalSemaphoreCount;
678 DE_NULL, // const VkSemaphore* pSignalSemaphores;
679 };
680
681 VK_CHECK(vkd.queueSubmit(queueTransfer, 1u, &transferSubmitInfo2, *transferFence));
682 VK_CHECK(vkd.waitForFences(device, 1, &*transferFence, DE_TRUE, ~0ull));
683
684 return multiPlaneImageData;
685 }
686
687
688 class VideoDecodeTestInstance : public VideoBaseTestInstance
689 {
690 public:
691 VideoDecodeTestInstance(Context& context, const TestDefinition* testDefinition);
692 tcu::TestStatus iterate(void);
693
694 protected:
695 const TestDefinition* m_testDefinition;
696 MovePtr<VideoBaseDecoder> m_decoder{};
697 static_assert(sizeof(DeviceContext) < 128, "DeviceContext has grown bigger than expected!");
698 DeviceContext m_deviceContext;
699 };
700
701 class InterleavingDecodeTestInstance : public VideoBaseTestInstance
702 {
703 public:
704 InterleavingDecodeTestInstance(Context& context, const std::vector<MovePtr<TestDefinition>>& testDefinitions);
705 tcu::TestStatus iterate(void);
706
707 protected:
708 const std::vector<MovePtr<TestDefinition>>& m_testDefinitions;
709 std::vector<MovePtr<VideoBaseDecoder>> m_decoders{};
710 static_assert(sizeof(DeviceContext) < 128, "DeviceContext has grown bigger than expected!");
711 DeviceContext m_deviceContext;
712 };
713
InterleavingDecodeTestInstance(Context& context, const std::vector<MovePtr<TestDefinition>>& testDefinitions)714 InterleavingDecodeTestInstance::InterleavingDecodeTestInstance(Context& context, const std::vector<MovePtr<TestDefinition>>& testDefinitions)
715 : VideoBaseTestInstance(context), m_testDefinitions(std::move(testDefinitions))
716 {
717 int requiredCodecs = VK_VIDEO_CODEC_OPERATION_NONE_KHR;
718 VideoDevice::VideoDeviceFlags requiredDeviceFlags = VideoDevice::VideoDeviceFlagBits::VIDEO_DEVICE_FLAG_NONE;
719 for (const auto& test : m_testDefinitions)
720 {
721 VkVideoCodecOperationFlagBitsKHR testBits = test->getCodecOperation();
722 requiredCodecs |= testBits;
723 requiredDeviceFlags |= test->requiredDeviceFlags();
724 }
725 VkDevice device = getDeviceSupportingQueue(VK_QUEUE_VIDEO_DECODE_BIT_KHR | VK_QUEUE_TRANSFER_BIT, requiredCodecs, requiredDeviceFlags);
726
727 m_deviceContext.context = &m_context;
728 m_deviceContext.device = device;
729 m_deviceContext.phys = m_context.getPhysicalDevice();
730 m_deviceContext.vd = &m_videoDevice;
731 // TODO: Support for multiple queues / multithreading
732 m_deviceContext.transferQueue =
733 getDeviceQueue(m_context.getDeviceInterface(), device, m_videoDevice.getQueueFamilyIndexTransfer(), 0);
734 m_deviceContext.decodeQueue =
735 getDeviceQueue(m_context.getDeviceInterface(), device, m_videoDevice.getQueueFamilyIndexDecode(), 0);
736
737 for (const auto& test : m_testDefinitions)
738 m_decoders.push_back(decoderFromTestDefinition(&m_deviceContext, *test));
739 }
740
VideoDecodeTestInstance(Context& context, const TestDefinition* testDefinition)741 VideoDecodeTestInstance::VideoDecodeTestInstance(Context& context, const TestDefinition* testDefinition)
742 : VideoBaseTestInstance(context), m_testDefinition(testDefinition)
743 {
744 VkDevice device = getDeviceSupportingQueue(VK_QUEUE_VIDEO_DECODE_BIT_KHR | VK_QUEUE_TRANSFER_BIT,
745 m_testDefinition->getCodecOperation(),
746 m_testDefinition->requiredDeviceFlags());
747
748 m_deviceContext.context = &m_context;
749 m_deviceContext.device = device;
750 m_deviceContext.phys = m_context.getPhysicalDevice();
751 m_deviceContext.vd = &m_videoDevice;
752 // TODO: Support for multiple queues / multithreading
753 m_deviceContext.transferQueue =
754 getDeviceQueue(m_context.getDeviceInterface(), device, m_videoDevice.getQueueFamilyIndexTransfer(), 0);
755 m_deviceContext.decodeQueue =
756 getDeviceQueue(m_context.getDeviceInterface(), device, m_videoDevice.getQueueFamilyIndexDecode(), 0);
757
758 m_decoder = decoderFromTestDefinition(&m_deviceContext, *m_testDefinition);
759 }
760
iterate()761 tcu::TestStatus VideoDecodeTestInstance::iterate()
762 {
763 #if FRAME_DUMP_DEBUG
764 #ifdef _WIN32
765 FILE* output = fopen("C:\\output.yuv", "wb");
766 #else
767 FILE* output = fopen("/tmp/output.yuv", "wb");
768 #endif
769 #endif
770
771 FrameProcessor processor(&m_deviceContext, m_testDefinition, m_decoder.get(), m_context.getTestContext().getLog());
772 std::vector<int> incorrectFrames;
773 std::vector<int> correctFrames;
774
775 if (m_testDefinition->hasOption(DecoderOption::CachedDecoding))
776 {
777 processor.bufferFrames(m_testDefinition->framesToCheck());
778 m_decoder->decodeFramesOutOfOrder();
779 }
780
781 for (int frameNumber = 0; frameNumber < m_testDefinition->framesToCheck(); frameNumber++)
782 {
783 const DecodedFrame* decodedFrame = processor.decodeFrame();
784 TCU_CHECK_MSG(decodedFrame, "Decoder did not produce the expected amount of frames");
785 auto resultImage = getDecodedImage(m_deviceContext, m_decoder->dpbAndOutputCoincide() ? VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR : VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR, decodedFrame);
786
787 #if FRAME_DUMP_DEBUG
788 auto bytes = semiplanarToYV12(*resultImage);
789 fwrite(bytes.data(), 1, bytes.size(), output);
790 #endif
791 std::string checksum = checksumForClipFrame(m_testDefinition->getClipInfo(), frameNumber);
792 if (imageMatchesReferenceChecksum(*resultImage, checksum))
793 {
794 correctFrames.push_back(frameNumber);
795 }
796 else
797 {
798 incorrectFrames.push_back(frameNumber);
799 }
800 }
801
802 #if FRAME_DUMP_DEBUG
803 fclose(output);
804 #endif
805 if (!correctFrames.empty() && correctFrames.size() == m_testDefinition->framesToCheck())
806 return tcu::TestStatus::pass(de::toString(m_testDefinition->framesToCheck()) + " correctly decoded frames");
807 else
808 {
809 stringstream ss;
810 ss << correctFrames.size() << " out of " << m_testDefinition->framesToCheck() << " frames rendered correctly (";
811 if (correctFrames.size() < incorrectFrames.size())
812 {
813 ss << "correct frames: ";
814 for (int i : correctFrames)
815 ss << i << " ";
816 }
817 else
818 {
819 ss << "incorrect frames: ";
820 for (int i : incorrectFrames)
821 ss << i << " ";
822 }
823 ss << "\b)";
824 return tcu::TestStatus::fail(ss.str());
825 }
826 }
827
iterate(void)828 tcu::TestStatus InterleavingDecodeTestInstance::iterate(void)
829 {
830 DE_ASSERT(m_testDefinitions.size() == m_decoders.size());
831 DE_ASSERT(m_decoders.size() > 1);
832
833 std::vector<MovePtr<FrameProcessor>> processors;
834 for (int i = 0; i < m_testDefinitions.size(); i++)
835 {
836 processors.push_back(MovePtr<FrameProcessor>(new FrameProcessor(&m_deviceContext, m_testDefinitions[i].get(), m_decoders[i].get(), m_context.getTestContext().getLog())));
837 }
838
839 #if FRAME_DUMP_DEBUG
840 #ifdef _WIN32
841 FILE* output = fopen("C:\\output.yuv", "wb");
842 #else
843 FILE* output = fopen("/tmp/output.yuv", "wb");
844 #endif
845 #endif
846
847 // First cache up all the decoded frames from the various decode sessions
848 for (int i = 0; i < m_testDefinitions.size(); i++)
849 {
850 const auto& test = m_testDefinitions[i];
851 auto& processor = processors[i];
852 processor->bufferFrames(test->framesToCheck());
853 DE_ASSERT(processor->getBufferedDisplayCount() == test->framesToCheck());
854 }
855
856 auto interleaveCacheSize = m_decoders[0]->m_cachedDecodeParams.size();
857 auto firstStreamDecodeQueue = m_decoders[0]->m_deviceContext->decodeQueue;
858
859 size_t totalFrames = 0;
860 for (auto& decoder : m_decoders)
861 {
862 DE_ASSERT(decoder->m_cachedDecodeParams.size() == interleaveCacheSize);
863 DE_ASSERT(decoder->m_deviceContext->decodeQueue == firstStreamDecodeQueue);
864 totalFrames += decoder->m_cachedDecodeParams.size();
865 }
866
867 DE_UNREF(firstStreamDecodeQueue);
868
869 // Interleave command buffer recording
870 for (int i = 0; i < interleaveCacheSize; i++)
871 {
872 for (auto& decoder : m_decoders)
873 {
874 decoder->WaitForFrameFences(decoder->m_cachedDecodeParams[i]);
875 decoder->ApplyPictureParameters(decoder->m_cachedDecodeParams[i]);
876 decoder->RecordCommandBuffer(decoder->m_cachedDecodeParams[i]);
877 }
878 }
879
880 // Interleave submissions
881 for (int i = 0; i < interleaveCacheSize; i++)
882 {
883 for (int decoderIdx = 0; decoderIdx < m_decoders.size(); decoderIdx++)
884 {
885 auto& decoder = m_decoders[decoderIdx];
886 auto& test = m_testDefinitions[decoderIdx];
887 decoder->SubmitQueue(decoder->m_cachedDecodeParams[i]);
888 if (test->hasOption(DecoderOption::UseStatusQueries))
889 {
890 decoder->QueryDecodeResults(decoder->m_cachedDecodeParams[i]);
891 }
892 }
893 }
894
895 struct InterleavedDecodeResults
896 {
897 std::vector<int> correctFrames;
898 std::vector<int> incorrectFrames;
899 };
900 std::vector<InterleavedDecodeResults> results(m_testDefinitions.size());
901
902 for (int i = 0; i < m_testDefinitions.size(); i++)
903 {
904 auto& test = m_testDefinitions[i];
905 auto& decoder = m_decoders[i];
906 auto& processor = processors[i];
907 for (int frameNumber = 0; frameNumber < m_testDefinitions[i]->framesToCheck(); frameNumber++)
908 {
909 const DecodedFrame* frame = processor->decodeFrame();
910 auto resultImage = getDecodedImage(m_deviceContext, decoder->dpbAndOutputCoincide() ? VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR : VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR, frame);
911 #if FRAME_DUMP_DEBUG
912 auto bytes = semiplanarToYV12(*resultImage);
913 fwrite(bytes.data(), 1, bytes.size(), output);
914 #endif
915 auto checksum = checksumForClipFrame(test->getClipInfo(), frameNumber);
916 if (imageMatchesReferenceChecksum(*resultImage, checksum))
917 {
918 results[i].correctFrames.push_back(frameNumber);
919 }
920 else
921 {
922 results[i].incorrectFrames.push_back(frameNumber);
923 }
924 }
925 }
926
927 #if FRAME_DUMP_DEBUG
928 fclose(output);
929 #endif
930
931 bool allTestsPassed = true;
932 int totalFramesCheck = 0;
933 for (const auto& res : results)
934 {
935 if (!res.incorrectFrames.empty())
936 allTestsPassed = false;
937 totalFramesCheck += (res.correctFrames.size() + res.incorrectFrames.size());
938 }
939 DE_ASSERT(totalFramesCheck == totalFrames);
940
941 if (allTestsPassed)
942 return tcu::TestStatus::pass(de::toString(totalFrames) + " correctly decoded frames");
943 else
944 {
945 stringstream ss;
946 ss << "Interleaving failure: ";
947 for (int i = 0; i < results.size(); i++)
948 {
949 const auto& result = results[i];
950 if (!result.incorrectFrames.empty())
951 {
952 ss << " (stream #" << i << " incorrect frames: ";
953 for (int frame : result.incorrectFrames)
954 ss << frame << " ";
955 ss << "\b)";
956 }
957 }
958 return tcu::TestStatus::fail(ss.str());
959 }
960 }
961
962 #endif // #ifdef DE_BUILD_VIDEO
963
964 class VideoDecodeTestCase : public vkt::TestCase
965 {
966 public:
VideoDecodeTestCase(tcu::TestContext& context, const char* name, MovePtr<TestDefinition> testDefinition)967 VideoDecodeTestCase(tcu::TestContext& context, const char* name, MovePtr<TestDefinition> testDefinition)
968 : vkt::TestCase(context, name), m_testDefinition(testDefinition)
969 {
970 }
971
972 TestInstance* createInstance(Context& context) const override;
973 void checkSupport(Context& context) const override;
974
975 private:
976 MovePtr<TestDefinition> m_testDefinition;
977 };
978
979 class InterleavingDecodeTestCase : public vkt::TestCase
980 {
981 public:
InterleavingDecodeTestCase(tcu::TestContext& context, const char* name, std::vector<MovePtr<TestDefinition>>&& testDefinitions)982 InterleavingDecodeTestCase(tcu::TestContext& context, const char* name, std::vector<MovePtr<TestDefinition>>&& testDefinitions)
983 : vkt::TestCase(context, name), m_testDefinitions(std::move(testDefinitions))
984 {
985 }
986
987 TestInstance* createInstance(Context& context) const override
988 {
989 #ifdef DE_BUILD_VIDEO
990 return new InterleavingDecodeTestInstance(context, m_testDefinitions);
991 #endif
992 DE_UNREF(context);
993 return nullptr;
994 }
995 void checkSupport(Context& context) const override;
996
997 private:
998 std::vector<MovePtr<TestDefinition>> m_testDefinitions;
999 };
1000
createInstance(Context& context) const1001 TestInstance* VideoDecodeTestCase::createInstance(Context& context) const
1002 {
1003 #ifdef DE_BUILD_VIDEO
1004 return new VideoDecodeTestInstance(context, m_testDefinition.get());
1005 #endif
1006
1007 #ifndef DE_BUILD_VIDEO
1008 DE_UNREF(context);
1009 return nullptr;
1010 #endif
1011 }
1012
checkSupport(Context& context) const1013 void VideoDecodeTestCase::checkSupport(Context& context) const
1014 {
1015 context.requireDeviceFunctionality("VK_KHR_video_queue");
1016 context.requireDeviceFunctionality("VK_KHR_synchronization2");
1017
1018 switch (m_testDefinition->getTestType())
1019 {
1020 case TEST_TYPE_H264_DECODE_I:
1021 case TEST_TYPE_H264_DECODE_I_P:
1022 case TEST_TYPE_H264_DECODE_CLIP_A:
1023 case TEST_TYPE_H264_DECODE_I_P_NOT_MATCHING_ORDER:
1024 case TEST_TYPE_H264_DECODE_I_P_B_13:
1025 case TEST_TYPE_H264_DECODE_I_P_B_13_NOT_MATCHING_ORDER:
1026 case TEST_TYPE_H264_DECODE_QUERY_RESULT_WITH_STATUS:
1027 case TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE:
1028 case TEST_TYPE_H264_DECODE_RESOLUTION_CHANGE_DPB:
1029 {
1030 context.requireDeviceFunctionality("VK_KHR_video_decode_h264");
1031 break;
1032 }
1033 case TEST_TYPE_H265_DECODE_I:
1034 case TEST_TYPE_H265_DECODE_I_P:
1035 case TEST_TYPE_H265_DECODE_CLIP_D:
1036 case TEST_TYPE_H265_DECODE_I_P_NOT_MATCHING_ORDER:
1037 case TEST_TYPE_H265_DECODE_I_P_B_13:
1038 case TEST_TYPE_H265_DECODE_I_P_B_13_NOT_MATCHING_ORDER:
1039 {
1040 context.requireDeviceFunctionality("VK_KHR_video_decode_h265");
1041 break;
1042 }
1043 default:
1044 TCU_THROW(InternalError, "Unknown TestType");
1045 }
1046 }
1047
checkSupport(Context& context) const1048 void InterleavingDecodeTestCase::checkSupport(Context& context) const
1049 {
1050 context.requireDeviceFunctionality("VK_KHR_video_queue");
1051 context.requireDeviceFunctionality("VK_KHR_synchronization2");
1052
1053 #ifdef DE_DEBUG
1054 DE_ASSERT(!m_testDefinitions.empty());
1055 TestType firstType = m_testDefinitions[0]->getTestType();
1056 for (const auto& test : m_testDefinitions)
1057 DE_ASSERT(test->getTestType() == firstType);
1058 #endif
1059 switch (m_testDefinitions[0]->getTestType())
1060 {
1061 case TEST_TYPE_H264_DECODE_INTERLEAVED:
1062 {
1063 context.requireDeviceFunctionality("VK_KHR_video_decode_h264");
1064 break;
1065 }
1066 case TEST_TYPE_H264_H265_DECODE_INTERLEAVED:
1067 {
1068 context.requireDeviceFunctionality("VK_KHR_video_decode_h264");
1069 context.requireDeviceFunctionality("VK_KHR_video_decode_h265");
1070 break;
1071 }
1072 default:
1073 TCU_THROW(InternalError, "Unknown interleaving test type");
1074 }
1075 }
1076
1077 } // namespace
1078
createVideoDecodeTests(tcu::TestContext& testCtx)1079 tcu::TestCaseGroup* createVideoDecodeTests(tcu::TestContext& testCtx)
1080 {
1081 const deUint32 baseSeed = static_cast<deUint32>(testCtx.getCommandLine().getBaseSeed());
1082 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "decode"));
1083
1084 for (const auto& decodeTest : g_DecodeTests)
1085 {
1086 auto defn = TestDefinition::create(decodeTest, baseSeed);
1087
1088 const char* testName = getTestName(defn->getTestType());
1089 deUint32 rngSeed = baseSeed ^ deStringHash(testName);
1090 defn->updateHash(rngSeed);
1091 group->addChild(new VideoDecodeTestCase(testCtx, testName, defn));
1092 }
1093
1094 for (const auto& interleavingTest : g_InterleavingTests)
1095 {
1096 const char* testName = getTestName(interleavingTest.type);
1097 std::vector<MovePtr<TestDefinition>> defns;
1098 DecodeTestParam streamA{interleavingTest.type, interleavingTest.streamA};
1099 defns.push_back(TestDefinition::create(streamA, baseSeed));
1100 DecodeTestParam streamB{interleavingTest.type, interleavingTest.streamB};
1101 defns.push_back(TestDefinition::create(streamB, baseSeed));
1102 group->addChild(new InterleavingDecodeTestCase(testCtx, testName, std::move(defns)));
1103 }
1104
1105 return group.release();
1106 }
1107
1108 } // namespace video
1109 } // namespace vkt
1110