/*------------------------------------------------------------------------ * Vulkan Conformance Tests * ------------------------ * * Copyright (c) 2015 The Khronos Group Inc. * Copyright (c) 2015 Samsung Electronics Co., Ltd. * Copyright (c) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *//*! * \file * \brief Common built-in function tests. *//*--------------------------------------------------------------------*/ #include "vktShaderCommonFunctionTests.hpp" #include "vktShaderExecutor.hpp" #include "vkQueryUtil.hpp" #include "gluContextInfo.hpp" #include "tcuTestLog.hpp" #include "tcuFormatUtil.hpp" #include "tcuFloat.hpp" #include "tcuInterval.hpp" #include "tcuFloatFormat.hpp" #include "tcuVectorUtil.hpp" #include "deRandom.hpp" #include "deMath.h" #include "deString.h" #include "deArrayUtil.hpp" #include "deSharedPtr.hpp" #include namespace vkt { namespace shaderexecutor { using std::vector; using std::string; using tcu::TestLog; using tcu::Vec2; using tcu::Vec3; using tcu::Vec4; using tcu::IVec2; using tcu::IVec3; using tcu::IVec4; namespace { // Utilities template struct VecArrayAccess { public: VecArrayAccess (const void* ptr) : m_array((tcu::Vector*)ptr) {} ~VecArrayAccess (void) {} const tcu::Vector& operator[] (size_t offset) const { return m_array[offset]; } tcu::Vector& operator[] (size_t offset) { return m_array[offset]; } private: tcu::Vector* m_array; }; template static void fillRandomVectors (de::Random& rnd, const tcu::Vector& minValue, const tcu::Vector& maxValue, void* dst, int numValues, int offset = 0) { VecArrayAccess access(dst); for (int ndx = 0; ndx < numValues; ndx++) access[offset + ndx] = tcu::randomVector(rnd, minValue, maxValue); } template static void fillRandomScalars (de::Random& rnd, T minValue, T maxValue, void* dst, int numValues, int offset = 0) { T* typedPtr = (T*)dst; for (int ndx = 0; ndx < numValues; ndx++) typedPtr[offset + ndx] = de::randomScalar(rnd, minValue, maxValue); } inline deUint32 getUlpDiff (float a, float b) { const deUint32 aBits = tcu::Float32(a).bits(); const deUint32 bBits = tcu::Float32(b).bits(); return aBits > bBits ? aBits - bBits : bBits - aBits; } inline deUint32 getUlpDiffIgnoreZeroSign (float a, float b) { if (tcu::Float32(a).isZero()) return getUlpDiff(tcu::Float32::construct(tcu::Float32(b).sign(), 0, 0).asFloat(), b); else if (tcu::Float32(b).isZero()) return getUlpDiff(a, tcu::Float32::construct(tcu::Float32(a).sign(), 0, 0).asFloat()); else return getUlpDiff(a, b); } inline deUint64 getMaxUlpDiffFromBits (int numAccurateBits, int numTotalBits) { const int numGarbageBits = numTotalBits-numAccurateBits; const deUint64 mask = (1ull<(precision, 0, DE_LENGTH_OF_ARRAY(bits))); return bits[precision]; } static int getExponentBits (glu::DataType type) { DE_ASSERT(glu::isDataTypeFloatOrVec(type) || glu::isDataTypeDoubleOrDVec(type)); return (glu::isDataTypeFloatOrVec(type) ? static_cast(tcu::Float32::EXPONENT_BITS) : static_cast(tcu::Float64::EXPONENT_BITS)); } static deUint32 getExponentMask (int exponentBits) { DE_ASSERT(exponentBits > 0); return ((1u< getScalarSizes (const vector& symbols) { vector sizes(symbols.size()); for (int ndx = 0; ndx < (int)symbols.size(); ++ndx) sizes[ndx] = symbols[ndx].varType.getScalarSize(); return sizes; } static vector getComponentByteSizes (const vector& symbols) { vector sizes; sizes.reserve(symbols.size()); for (const auto& sym : symbols) sizes.push_back(getComponentByteSize(sym.varType.getBasicType())); return sizes; } static int computeTotalByteSize (const vector& symbols) { int totalSize = 0; for (const auto& sym : symbols) totalSize += getComponentByteSize(sym.varType.getBasicType()) * sym.varType.getScalarSize(); return totalSize; } static vector getInputOutputPointers (const vector& symbols, vector& data, const int numValues) { vector pointers (symbols.size()); int curScalarOffset = 0; for (int varNdx = 0; varNdx < (int)symbols.size(); ++varNdx) { const Symbol& var = symbols[varNdx]; const int scalarSize = var.varType.getScalarSize(); const auto componentBytes = getComponentByteSize(var.varType.getBasicType()); // Uses planar layout as input/output specs do not support strides. pointers[varNdx] = &data[curScalarOffset]; curScalarOffset += scalarSize*numValues*componentBytes; } DE_ASSERT(curScalarOffset == (int)data.size()); return pointers; } void checkTypeSupport (Context& context, glu::DataType dataType) { if (glu::isDataTypeDoubleOrDVec(dataType)) { const auto& vki = context.getInstanceInterface(); const auto physicalDevice = context.getPhysicalDevice(); const auto features = vk::getPhysicalDeviceFeatures(vki, physicalDevice); if (!features.shaderFloat64) TCU_THROW(NotSupportedError, "64-bit floats not supported by the implementation"); } } // \todo [2013-08-08 pyry] Make generic utility and move to glu? struct HexFloat { const float value; HexFloat (const float value_) : value(value_) {} }; std::ostream& operator<< (std::ostream& str, const HexFloat& v) { return str << v.value << " / " << tcu::toHex(tcu::Float32(v.value).bits()); } struct HexDouble { const double value; HexDouble (const double value_) : value(value_) {} }; std::ostream& operator<< (std::ostream& str, const HexDouble& v) { return str << v.value << " / " << tcu::toHex(tcu::Float64(v.value).bits()); } struct HexBool { const deUint32 value; HexBool (const deUint32 value_) : value(value_) {} }; std::ostream& operator<< (std::ostream& str, const HexBool& v) { return str << (v.value ? "true" : "false") << " / " << tcu::toHex(v.value); } struct VarValue { const glu::VarType& type; const void* value; VarValue (const glu::VarType& type_, const void* value_) : type(type_), value(value_) {} }; std::ostream& operator<< (std::ostream& str, const VarValue& varValue) { DE_ASSERT(varValue.type.isBasicType()); const glu::DataType basicType = varValue.type.getBasicType(); const glu::DataType scalarType = glu::getDataTypeScalarType(basicType); const int numComponents = glu::getDataTypeScalarSize(basicType); if (numComponents > 1) str << glu::getDataTypeName(basicType) << "("; for (int compNdx = 0; compNdx < numComponents; compNdx++) { if (compNdx != 0) str << ", "; switch (scalarType) { case glu::TYPE_FLOAT: str << HexFloat(((const float*)varValue.value)[compNdx]); break; case glu::TYPE_INT: str << ((const deInt32*)varValue.value)[compNdx]; break; case glu::TYPE_UINT: str << tcu::toHex(((const deUint32*)varValue.value)[compNdx]); break; case glu::TYPE_BOOL: str << HexBool(((const deUint32*)varValue.value)[compNdx]); break; case glu::TYPE_DOUBLE: str << HexDouble(((const double*)varValue.value)[compNdx]); break; default: DE_ASSERT(false); } } if (numComponents > 1) str << ")"; return str; } static std::string getCommonFuncCaseName (glu::DataType baseType, glu::Precision precision) { const bool isDouble = glu::isDataTypeDoubleOrDVec(baseType); return string(glu::getDataTypeName(baseType)) + (isDouble ? "" : getPrecisionPostfix(precision)) + "_compute"; } template static void addFunctionCases (tcu::TestCaseGroup* parent, const char* functionName, const std::vector& scalarTypes) { tcu::TestCaseGroup* group = new tcu::TestCaseGroup(parent->getTestContext(), functionName, functionName); parent->addChild(group); for (const auto scalarType : scalarTypes) { const bool isDouble = glu::isDataTypeDoubleOrDVec(scalarType); const int lowestPrec = (isDouble ? glu::PRECISION_LAST : glu::PRECISION_MEDIUMP); const int highestPrec = (isDouble ? glu::PRECISION_LAST : glu::PRECISION_HIGHP); for (int vecSize = 1; vecSize <= 4; vecSize++) { for (int prec = lowestPrec; prec <= highestPrec; prec++) { group->addChild(new TestClass(parent->getTestContext(), glu::DataType(scalarType + vecSize - 1), glu::Precision(prec))); } } } } // CommonFunctionCase class CommonFunctionCase : public TestCase { public: CommonFunctionCase (tcu::TestContext& testCtx, const char* name, const char* description); ~CommonFunctionCase (void); virtual void initPrograms (vk::SourceCollections& programCollection) const { generateSources(glu::SHADERTYPE_COMPUTE, m_spec, programCollection); } virtual TestInstance* createInstance (Context& context) const = 0; protected: CommonFunctionCase (const CommonFunctionCase&); CommonFunctionCase& operator= (const CommonFunctionCase&); ShaderSpec m_spec; const int m_numValues; }; CommonFunctionCase::CommonFunctionCase (tcu::TestContext& testCtx, const char* name, const char* description) : TestCase (testCtx, name, description) , m_numValues (100) { } CommonFunctionCase::~CommonFunctionCase (void) { } // CommonFunctionTestInstance class CommonFunctionTestInstance : public TestInstance { public: CommonFunctionTestInstance (Context& context, const ShaderSpec& spec, int numValues, const char* name) : TestInstance (context) , m_spec (spec) , m_numValues (numValues) , m_name (name) , m_executor (createExecutor(context, glu::SHADERTYPE_COMPUTE, spec)) { } virtual tcu::TestStatus iterate (void); protected: virtual void getInputValues (int numValues, void* const* values) const = 0; virtual bool compare (const void* const* inputs, const void* const* outputs) = 0; const ShaderSpec m_spec; const int m_numValues; // \todo [2017-03-07 pyry] Hack used to generate seeds for test cases - get rid of this. const char* m_name; std::ostringstream m_failMsg; //!< Comparison failure help message. de::UniquePtr m_executor; }; tcu::TestStatus CommonFunctionTestInstance::iterate (void) { const int numInputBytes = computeTotalByteSize(m_spec.inputs); const int numOutputBytes = computeTotalByteSize(m_spec.outputs); vector inputData (numInputBytes * m_numValues); vector outputData (numOutputBytes * m_numValues); const vector inputPointers = getInputOutputPointers(m_spec.inputs, inputData, m_numValues); const vector outputPointers = getInputOutputPointers(m_spec.outputs, outputData, m_numValues); // Initialize input data. getInputValues(m_numValues, &inputPointers[0]); // Execute shader. m_executor->execute(m_numValues, &inputPointers[0], &outputPointers[0]); // Compare results. { const vector inScalarSizes = getScalarSizes(m_spec.inputs); const vector outScalarSizes = getScalarSizes(m_spec.outputs); const vector inCompByteSizes = getComponentByteSizes(m_spec.inputs); const vector outCompByteSizes = getComponentByteSizes(m_spec.outputs); vector curInputPtr (inputPointers.size()); vector curOutputPtr (outputPointers.size()); int numFailed = 0; tcu::TestContext& testCtx = m_context.getTestContext(); for (int valNdx = 0; valNdx < m_numValues; valNdx++) { // Set up pointers for comparison. for (int inNdx = 0; inNdx < (int)curInputPtr.size(); ++inNdx) curInputPtr[inNdx] = (deUint8*)inputPointers[inNdx] + inScalarSizes[inNdx]*inCompByteSizes[inNdx]*valNdx; for (int outNdx = 0; outNdx < (int)curOutputPtr.size(); ++outNdx) curOutputPtr[outNdx] = (deUint8*)outputPointers[outNdx] + outScalarSizes[outNdx]*outCompByteSizes[outNdx]*valNdx; if (!compare(&curInputPtr[0], &curOutputPtr[0])) { // \todo [2013-08-08 pyry] We probably want to log reference value as well? testCtx.getLog() << TestLog::Message << "ERROR: comparison failed for value " << valNdx << ":\n " << m_failMsg.str() << TestLog::EndMessage; testCtx.getLog() << TestLog::Message << " inputs:" << TestLog::EndMessage; for (int inNdx = 0; inNdx < (int)curInputPtr.size(); inNdx++) testCtx.getLog() << TestLog::Message << " " << m_spec.inputs[inNdx].name << " = " << VarValue(m_spec.inputs[inNdx].varType, curInputPtr[inNdx]) << TestLog::EndMessage; testCtx.getLog() << TestLog::Message << " outputs:" << TestLog::EndMessage; for (int outNdx = 0; outNdx < (int)curOutputPtr.size(); outNdx++) testCtx.getLog() << TestLog::Message << " " << m_spec.outputs[outNdx].name << " = " << VarValue(m_spec.outputs[outNdx].varType, curOutputPtr[outNdx]) << TestLog::EndMessage; m_failMsg.str(""); m_failMsg.clear(); numFailed += 1; } } testCtx.getLog() << TestLog::Message << (m_numValues - numFailed) << " / " << m_numValues << " values passed" << TestLog::EndMessage; if (numFailed == 0) return tcu::TestStatus::pass("Pass"); else return tcu::TestStatus::fail("Result comparison failed"); } } // Test cases class AbsCaseInstance : public CommonFunctionTestInstance { public: AbsCaseInstance (Context& context, const ShaderSpec& spec, int numValues, const char* name) : CommonFunctionTestInstance (context, spec, numValues, name) { } void getInputValues (int numValues, void* const* values) const { const IVec2 intRanges[] = { IVec2(-(1<<7)+1, (1<<7)-1), IVec2(-(1<<15)+1, (1<<15)-1), IVec2(0x80000001, 0x7fffffff) }; de::Random rnd (deStringHash(m_name) ^ 0x235facu); const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); const int scalarSize = glu::getDataTypeScalarSize(type); DE_ASSERT(!glu::isDataTypeFloatOrVec(type)); fillRandomScalars(rnd, intRanges[precision].x(), intRanges[precision].y(), values[0], numValues*scalarSize); } bool compare (const void* const* inputs, const void* const* outputs) { const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const int scalarSize = glu::getDataTypeScalarSize(type); DE_ASSERT(!glu::isDataTypeFloatOrVec(type)); for (int compNdx = 0; compNdx < scalarSize; compNdx++) { const int in0 = ((const int*)inputs[0])[compNdx]; const int out0 = ((const int*)outputs[0])[compNdx]; const int ref0 = de::abs(in0); if (out0 != ref0) { m_failMsg << "Expected [" << compNdx << "] = " << ref0; return false; } } return true; } }; class AbsCase : public CommonFunctionCase { public: AbsCase (tcu::TestContext& testCtx, glu::DataType baseType, glu::Precision precision) : CommonFunctionCase (testCtx, getCommonFuncCaseName(baseType, precision).c_str(), "abs") { m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); m_spec.source = "out0 = abs(in0);"; } TestInstance* createInstance (Context& ctx) const { return new AbsCaseInstance(ctx, m_spec, m_numValues, getName()); } }; class SignCaseInstance : public CommonFunctionTestInstance { public: SignCaseInstance (Context& context, const ShaderSpec& spec, int numValues, const char* name) : CommonFunctionTestInstance (context, spec, numValues, name) { } void getInputValues (int numValues, void* const* values) const { const IVec2 intRanges[] = { IVec2(-(1<<7), (1<<7)-1), IVec2(-(1<<15), (1<<15)-1), IVec2(0x80000000, 0x7fffffff) }; de::Random rnd (deStringHash(m_name) ^ 0x324u); const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); const int scalarSize = glu::getDataTypeScalarSize(type); DE_ASSERT(!glu::isDataTypeFloatOrVec(type)); std::fill((int*)values[0] + scalarSize*0, (int*)values[0] + scalarSize*1, +1); std::fill((int*)values[0] + scalarSize*1, (int*)values[0] + scalarSize*2, -1); std::fill((int*)values[0] + scalarSize*2, (int*)values[0] + scalarSize*3, 0); fillRandomScalars(rnd, intRanges[precision].x(), intRanges[precision].y(), (int*)values[0] + scalarSize*3, (numValues-3)*scalarSize); } bool compare (const void* const* inputs, const void* const* outputs) { const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const int scalarSize = glu::getDataTypeScalarSize(type); DE_ASSERT(!glu::isDataTypeFloatOrVec(type)); for (int compNdx = 0; compNdx < scalarSize; compNdx++) { const int in0 = ((const int*)inputs[0])[compNdx]; const int out0 = ((const int*)outputs[0])[compNdx]; const int ref0 = in0 < 0 ? -1 : in0 > 0 ? +1 : 0; if (out0 != ref0) { m_failMsg << "Expected [" << compNdx << "] = " << ref0; return false; } } return true; } }; class SignCase : public CommonFunctionCase { public: SignCase (tcu::TestContext& testCtx, glu::DataType baseType, glu::Precision precision) : CommonFunctionCase (testCtx, getCommonFuncCaseName(baseType, precision).c_str(), "sign") { m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); m_spec.source = "out0 = sign(in0);"; } TestInstance* createInstance (Context& ctx) const { return new SignCaseInstance(ctx, m_spec, m_numValues, getName()); } }; static void infNanRandomFloats(int numValues, void* const* values, const char *name, const ShaderSpec& spec) { constexpr deUint64 kOne = 1; de::Random rnd (deStringHash(name) ^ 0xc2a39fu); const glu::DataType type = spec.inputs[0].varType.getBasicType(); const glu::Precision precision = spec.inputs[0].varType.getPrecision(); const int scalarSize = glu::getDataTypeScalarSize(type); const int minMantissaBits = getMinMantissaBits(type, precision); const int numMantissaBits = getNumMantissaBits(type); const deUint64 mantissaMask = ~getMaxUlpDiffFromBits(minMantissaBits, numMantissaBits) & ((kOne<(tcu::Float64::EXPONENT_BIAS) : static_cast(tcu::Float32::EXPONENT_BIAS)); int numInf = 0; int numNan = 0; for (int valNdx = 0; valNdx < numValues*scalarSize; valNdx++) { // Roughly 25% chance of each of Inf and NaN const bool isInf = rnd.getFloat() > 0.75f; const bool isNan = !isInf && rnd.getFloat() > 0.66f; const deUint64 m = rnd.getUint64() & mantissaMask; const deUint64 e = static_cast(rnd.getUint32() & exponentMask); const deUint64 sign = static_cast(rnd.getUint32() & 0x1u); // Ensure the 'quiet' bit is set on NaNs (also ensures we don't generate inf by mistake) const deUint64 mantissa = isInf ? 0 : (isNan ? ((kOne<<(numMantissaBits-1)) | m) : m); const deUint64 exp = (isNan || isInf) ? exponentMask : std::min(e, exponentBias); const deUint64 value = (sign << (numMantissaBits + exponentBits)) | (exp << numMantissaBits) | static_cast(mantissa); if (isInf) numInf++; if (isNan) numNan++; if (isDouble) { DE_ASSERT(tcu::Float64(value).isInf() == isInf && tcu::Float64(value).isNaN() == isNan); ((deUint64*)values[0])[valNdx] = value; } else { const auto value32 = static_cast(value); DE_ASSERT(tcu::Float32(value32).isInf() == isInf && tcu::Float32(value32).isNaN() == isNan); ((deUint32*)values[0])[valNdx] = value32; } } // Check for minimal coverage of intended cases. DE_ASSERT(0 < numInf); DE_ASSERT(0 < numNan); DE_ASSERT(numInf + numNan < numValues*scalarSize); } class IsnanCaseInstance : public CommonFunctionTestInstance { public: IsnanCaseInstance (Context& context, const ShaderSpec& spec, int numValues, const char* name) : CommonFunctionTestInstance (context, spec, numValues, name) { } void getInputValues (int numValues, void* const* values) const { infNanRandomFloats(numValues, values, m_name, m_spec); } bool compare (const void* const* inputs, const void* const* outputs) { const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); const int scalarSize = glu::getDataTypeScalarSize(type); const bool isDouble = glu::isDataTypeDoubleOrDVec(type); for (int compNdx = 0; compNdx < scalarSize; compNdx++) { const bool out0 = reinterpret_cast(outputs[0])[compNdx] != 0; bool ok; bool ref; if (isDouble) { const double in0 = reinterpret_cast(inputs[0])[compNdx]; ref = tcu::Float64(in0).isNaN(); ok = (out0 == ref); } else { const float in0 = reinterpret_cast(inputs[0])[compNdx]; ref = tcu::Float32(in0).isNaN(); // NaN support only required for highp. Otherwise just check for false positives. if (precision == glu::PRECISION_HIGHP) ok = (out0 == ref); else ok = ref || !out0; } if (!ok) { m_failMsg << "Expected [" << compNdx << "] = " << (ref ? "true" : "false"); return false; } } return true; } }; class IsnanCase : public CommonFunctionCase { public: IsnanCase (tcu::TestContext& testCtx, glu::DataType baseType, glu::Precision precision) : CommonFunctionCase (testCtx, getCommonFuncCaseName(baseType, precision).c_str(), "isnan") { DE_ASSERT(glu::isDataTypeFloatOrVec(baseType) || glu::isDataTypeDoubleOrDVec(baseType)); const int vecSize = glu::getDataTypeScalarSize(baseType); const glu::DataType boolType = vecSize > 1 ? glu::getDataTypeBoolVec(vecSize) : glu::TYPE_BOOL; m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); m_spec.outputs.push_back(Symbol("out0", glu::VarType(boolType, glu::PRECISION_LAST))); m_spec.source = "out0 = isnan(in0);"; } void checkSupport (Context& context) const { checkTypeSupport(context, m_spec.inputs[0].varType.getBasicType()); } TestInstance* createInstance (Context& ctx) const { return new IsnanCaseInstance(ctx, m_spec, m_numValues, getName()); } }; class IsinfCaseInstance : public CommonFunctionTestInstance { public: IsinfCaseInstance (Context& context, const ShaderSpec& spec, int numValues, const char* name) : CommonFunctionTestInstance(context, spec, numValues, name) { } void getInputValues (int numValues, void* const* values) const { infNanRandomFloats(numValues, values, m_name, m_spec); } bool compare (const void* const* inputs, const void* const* outputs) { const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); const int scalarSize = glu::getDataTypeScalarSize(type); const bool isDouble = glu::isDataTypeDoubleOrDVec(type); for (int compNdx = 0; compNdx < scalarSize; compNdx++) { const bool out0 = reinterpret_cast(outputs[0])[compNdx] != 0; bool ref; bool ok; if (isDouble) { const double in0 = reinterpret_cast(inputs[0])[compNdx]; ref = tcu::Float64(in0).isInf(); ok = (out0 == ref); } else { const float in0 = reinterpret_cast(inputs[0])[compNdx]; if (precision == glu::PRECISION_HIGHP) { // Only highp is required to support inf/nan ref = tcu::Float32(in0).isInf(); ok = (out0 == ref); } else { // Inf support is optional, check that inputs that are not Inf in mediump don't result in true. ref = tcu::Float16(in0).isInf(); ok = (out0 || !ref); } } if (!ok) { m_failMsg << "Expected [" << compNdx << "] = " << HexBool(ref); return false; } } return true; } }; class IsinfCase : public CommonFunctionCase { public: IsinfCase (tcu::TestContext& testCtx, glu::DataType baseType, glu::Precision precision) : CommonFunctionCase (testCtx, getCommonFuncCaseName(baseType, precision).c_str(), "isinf") { DE_ASSERT(glu::isDataTypeFloatOrVec(baseType) || glu::isDataTypeDoubleOrDVec(baseType)); const int vecSize = glu::getDataTypeScalarSize(baseType); const glu::DataType boolType = vecSize > 1 ? glu::getDataTypeBoolVec(vecSize) : glu::TYPE_BOOL; m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); m_spec.outputs.push_back(Symbol("out0", glu::VarType(boolType, glu::PRECISION_LAST))); m_spec.source = "out0 = isinf(in0);"; } void checkSupport (Context& context) const { checkTypeSupport(context, m_spec.inputs[0].varType.getBasicType()); } TestInstance* createInstance (Context& ctx) const { return new IsinfCaseInstance(ctx, m_spec, m_numValues, getName()); } }; class FloatBitsToUintIntCaseInstance : public CommonFunctionTestInstance { public: FloatBitsToUintIntCaseInstance (Context& context, const ShaderSpec& spec, int numValues, const char* name) : CommonFunctionTestInstance (context, spec, numValues, name) { } void getInputValues (int numValues, void* const* values) const { const Vec2 ranges[] = { Vec2(-2.0f, 2.0f), // lowp Vec2(-1e3f, 1e3f), // mediump Vec2(-1e7f, 1e7f) // highp }; de::Random rnd (deStringHash(m_name) ^ 0x2790au); const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); const int scalarSize = glu::getDataTypeScalarSize(type); fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), values[0], numValues*scalarSize); } bool compare (const void* const* inputs, const void* const* outputs) { const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); const int scalarSize = glu::getDataTypeScalarSize(type); const int minMantissaBits = getMinMantissaBits(type, precision); const int numMantissaBits = getNumMantissaBits(type); const int maxUlpDiff = static_cast(getMaxUlpDiffFromBits(minMantissaBits, numMantissaBits)); for (int compNdx = 0; compNdx < scalarSize; compNdx++) { const float in0 = ((const float*)inputs[0])[compNdx]; const deUint32 out0 = ((const deUint32*)outputs[0])[compNdx]; const deUint32 refOut0 = tcu::Float32(in0).bits(); const int ulpDiff = de::abs((int)out0 - (int)refOut0); if (ulpDiff > maxUlpDiff) { m_failMsg << "Expected [" << compNdx << "] = " << tcu::toHex(refOut0) << " with threshold " << tcu::toHex(maxUlpDiff) << ", got diff " << tcu::toHex(ulpDiff); return false; } } return true; } }; class FloatBitsToUintIntCase : public CommonFunctionCase { public: FloatBitsToUintIntCase (tcu::TestContext& testCtx, glu::DataType baseType, glu::Precision precision, bool outIsSigned) : CommonFunctionCase (testCtx, getCommonFuncCaseName(baseType, precision).c_str(), outIsSigned ? "floatBitsToInt" : "floatBitsToUint") { const int vecSize = glu::getDataTypeScalarSize(baseType); const glu::DataType intType = outIsSigned ? (vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT) : (vecSize > 1 ? glu::getDataTypeUintVec(vecSize) : glu::TYPE_UINT); m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); m_spec.outputs.push_back(Symbol("out0", glu::VarType(intType, glu::PRECISION_HIGHP))); m_spec.source = outIsSigned ? "out0 = floatBitsToInt(in0);" : "out0 = floatBitsToUint(in0);"; } TestInstance* createInstance (Context& ctx) const { return new FloatBitsToUintIntCaseInstance(ctx, m_spec, m_numValues, getName()); } }; class FloatBitsToIntCase : public FloatBitsToUintIntCase { public: FloatBitsToIntCase (tcu::TestContext& testCtx, glu::DataType baseType, glu::Precision precision) : FloatBitsToUintIntCase (testCtx, baseType, precision, true) { } }; class FloatBitsToUintCase : public FloatBitsToUintIntCase { public: FloatBitsToUintCase (tcu::TestContext& testCtx, glu::DataType baseType, glu::Precision precision) : FloatBitsToUintIntCase (testCtx, baseType, precision, false) { } }; class BitsToFloatCaseInstance : public CommonFunctionTestInstance { public: BitsToFloatCaseInstance (Context& context, const ShaderSpec& spec, int numValues, const char* name) : CommonFunctionTestInstance (context, spec, numValues, name) { } void getInputValues (int numValues, void* const* values) const { de::Random rnd (deStringHash(m_name) ^ 0xbbb225u); const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const int scalarSize = glu::getDataTypeScalarSize(type); const Vec2 range (-1e8f, +1e8f); // \note Filled as floats. fillRandomScalars(rnd, range.x(), range.y(), values[0], numValues*scalarSize); } bool compare (const void* const* inputs, const void* const* outputs) { const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); const int scalarSize = glu::getDataTypeScalarSize(type); const deUint32 maxUlpDiff = 0; for (int compNdx = 0; compNdx < scalarSize; compNdx++) { const float in0 = ((const float*)inputs[0])[compNdx]; const float out0 = ((const float*)outputs[0])[compNdx]; const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(in0, out0); if (ulpDiff > maxUlpDiff) { m_failMsg << "Expected [" << compNdx << "] = " << tcu::toHex(tcu::Float32(in0).bits()) << " with ULP threshold " << tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff); return false; } } return true; } }; class BitsToFloatCase : public CommonFunctionCase { public: BitsToFloatCase (tcu::TestContext& testCtx, glu::DataType baseType) : CommonFunctionCase (testCtx, getCommonFuncCaseName(baseType, glu::PRECISION_HIGHP).c_str(), glu::isDataTypeIntOrIVec(baseType) ? "intBitsToFloat" : "uintBitsToFloat") { const bool inIsSigned = glu::isDataTypeIntOrIVec(baseType); const int vecSize = glu::getDataTypeScalarSize(baseType); const glu::DataType floatType = vecSize > 1 ? glu::getDataTypeFloatVec(vecSize) : glu::TYPE_FLOAT; m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, glu::PRECISION_HIGHP))); m_spec.outputs.push_back(Symbol("out0", glu::VarType(floatType, glu::PRECISION_HIGHP))); m_spec.source = inIsSigned ? "out0 = intBitsToFloat(in0);" : "out0 = uintBitsToFloat(in0);"; } TestInstance* createInstance (Context& ctx) const { return new BitsToFloatCaseInstance(ctx, m_spec, m_numValues, getName()); } }; } // anonymous ShaderCommonFunctionTests::ShaderCommonFunctionTests (tcu::TestContext& testCtx) : tcu::TestCaseGroup (testCtx, "common", "Common function tests") { } ShaderCommonFunctionTests::~ShaderCommonFunctionTests (void) { } void ShaderCommonFunctionTests::init (void) { static const std::vector kIntOnly (1u, glu::TYPE_INT); static const std::vector kFloatOnly (1u, glu::TYPE_FLOAT); static const std::vector kFloatAndDouble {glu::TYPE_FLOAT, glu::TYPE_DOUBLE}; addFunctionCases (this, "abs", kIntOnly); addFunctionCases (this, "sign", kIntOnly); addFunctionCases (this, "isnan", kFloatAndDouble); addFunctionCases (this, "isinf", kFloatAndDouble); addFunctionCases (this, "floatbitstoint", kFloatOnly); addFunctionCases (this, "floatbitstouint", kFloatOnly); // (u)intBitsToFloat() { tcu::TestCaseGroup* intGroup = new tcu::TestCaseGroup(m_testCtx, "intbitstofloat", "intBitsToFloat() Tests"); tcu::TestCaseGroup* uintGroup = new tcu::TestCaseGroup(m_testCtx, "uintbitstofloat", "uintBitsToFloat() Tests"); addChild(intGroup); addChild(uintGroup); for (int vecSize = 1; vecSize < 4; vecSize++) { const glu::DataType intType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT; const glu::DataType uintType = vecSize > 1 ? glu::getDataTypeUintVec(vecSize) : glu::TYPE_UINT; intGroup->addChild(new BitsToFloatCase(getTestContext(), intType)); uintGroup->addChild(new BitsToFloatCase(getTestContext(), uintType)); } } } } // shaderexecutor } // vkt