Lines Matching defs:layout
23 * \brief SSBO layout case.
466 //! Appends all child elements to layout, returns value that should be appended to offset.
468 BufferLayout& layout,
475 // Reference layout uses std430 rules by default. std140 rules are
476 // choosen only for blocks that have std140 layout.
528 layout.bufferVars.push_back(entry);
553 layout.bufferVars.push_back(entry);
581 layout.bufferVars.push_back(entry);
588 curOffset += computeReferenceLayout(layout, curBlockNdx, curOffset, curPrefix + "[" + de::toString(elemNdx) + "]", type.getElementType(), layoutFlags);
596 curOffset += computeReferenceLayout(layout, curBlockNdx, curOffset, curPrefix + "." + memberIter->getName(), memberIter->getType(), layoutFlags);
605 //! Appends all child elements to layout, returns offset increment.
606 int computeReferenceLayout (BufferLayout& layout, int curBlockNdx, const std::string& blockPrefix, int baseOffset, const BufferVar& bufVar, deUint32 blockLayoutFlags)
646 layout.bufferVars.push_back(entry);
678 layout.bufferVars.push_back(entry);
691 // the layout computation twice. Instead we fix stride to child elements afterwards.
693 const int firstChildNdx = (int)layout.bufferVars.size();
695 const int size = computeReferenceLayout(layout, curBlockNdx, deAlign32(curOffset, baseAlignment), prefix, varType.getElementType(), combinedFlags);
698 for (int childNdx = firstChildNdx; childNdx < (int)layout.bufferVars.size(); childNdx++)
700 layout.bufferVars[childNdx].topLevelArraySize = topLevelArraySize;
701 layout.bufferVars[childNdx].topLevelArrayStride = stride;
711 return computeReferenceLayout(layout, curBlockNdx, baseOffset, blockPrefix + bufVar.getName(), varType, combinedFlags);
714 void computeReferenceLayout (BufferLayout& layout, ShaderInterface& interface)
724 int activeBlockNdx = (int)layout.blocks.size();
725 int firstVarNdx = (int)layout.bufferVars.size();
727 size_t oldSize = layout.bufferVars.size();
731 curOffset += computeReferenceLayout(layout, activeBlockNdx, blockPrefix, curOffset, bufVar, block.getFlags());
734 DE_ASSERT(!(layout.bufferVars.size() <= oldSize));
735 bufVar.setOffset(layout.bufferVars[oldSize].offset);
737 oldSize = layout.bufferVars.size();
740 int varIndicesEnd = (int)layout.bufferVars.size();
744 // Create block layout entries for each instance.
748 layout.blocks.push_back(BlockLayoutEntry());
749 BlockLayoutEntry& blockEntry = layout.blocks.back();
821 void generateValues (const BufferLayout& layout, const vector<BlockDataPtr>& blockPointers, deUint32 seed)
824 const int numBlocks = (int)layout.blocks.size();
830 const BlockLayoutEntry& blockLayout = layout.blocks[blockNdx];
832 const int numEntries = (int)layout.blocks[blockNdx].activeVarIndices.size();
837 const BufferVarLayoutEntry& varEntry = layout.bufferVars[varNdx];
951 src << "layout(" << LayoutFlagsFmt(bufferVar.getFlags() & LAYOUT_MASK) << ") ";
953 src << "layout(offset = "<<bufferVar.getOffset()<<") ";
960 src << "layout(";
1280 void generateCompareSrc (std::ostream& src, const char* resultVar, const ShaderInterface& interface, const BufferLayout& layout, const vector<BlockDataPtr>& blockPointers, MatrixLoadFlags matrixLoadFlag)
1293 const int blockNdx = layout.getBlockIndex(instanceName);
1303 generateCompareSrc(src, resultVar, layout, block, instanceNdx, blockPtr, bufVar, glu::SubTypeAccess(bufVar.getType()), matrixLoadFlag);
1391 void generateWriteSrc (std::ostream& src, const ShaderInterface& interface, const BufferLayout& layout, const vector<BlockDataPtr>& blockPointers, MatrixStoreFlags matrixStoreFlag)
1404 const int blockNdx = layout.getBlockIndex(instanceName);
1414 generateWriteSrc(src, layout, block, instanceNdx, blockPtr, bufVar, glu::SubTypeAccess(bufVar.getType()), matrixStoreFlag);
1420 string generateComputeShader (const ShaderInterface& interface, const BufferLayout& layout, const vector<BlockDataPtr>& comparePtrs, const vector<BlockDataPtr>& writePtrs, MatrixLoadFlags matrixLoadFlag, MatrixStoreFlags matrixStoreFlag, bool usePhysStorageBuffer)
1439 src << "layout(local_size_x = 1) in;\n";
1443 src << "layout(std140, binding = 0) buffer AcBlock { highp uint ac_numPassed; };\n\n";
1459 src << "layout (push_constant, std430) uniform PC {\n";
1485 generateCompareSrc(src, "allOk", interface, layout, comparePtrs, matrixLoadFlag);
1492 generateWriteSrc(src, interface, layout, writePtrs, matrixStoreFlag);
1566 // \note Src layout is used as reference in case of activeVarIndices happens to be incorrect in dstLayout blocks.
1594 const BufferLayout& layout,
1609 copyNonWrittenData(layout, block, instanceNdx, srcBlockPtr, dstBlockPtr, bufVar, accessPath.element(elemNdx));
1616 copyNonWrittenData(layout, block, instanceNdx, srcBlockPtr, dstBlockPtr, bufVar, accessPath.member(memberNdx));
1623 const int varNdx = layout.getVariableIndex(apiName);
1627 const BufferVarLayoutEntry& varLayout = layout.bufferVars[varNdx];
1633 void copyNonWrittenData (const ShaderInterface& interface, const BufferLayout& layout, const vector<BlockDataPtr>& srcPtrs, const vector<BlockDataPtr>& dstPtrs)
1646 const int blockNdx = layout.getBlockIndex(instanceName);
1657 copyNonWrittenData(layout, block, instanceNdx, srcBlockPtr, dstBlockPtr, bufVar, glu::SubTypeAccess(bufVar.getType()));
1858 static bool hasUnsizedArray (const BufferLayout& layout, const BlockLayoutEntry& entry)
1862 if (isUnsizedArray(layout.bufferVars[*varNdx]))
1869 static int getUnsizedArrayStride (const BufferLayout& layout, const BlockLayoutEntry& entry)
1873 const BufferVarLayoutEntry& varEntry = layout.bufferVars[*varNdx];
1884 vector<int> computeBufferSizes (const ShaderInterface& interface, const BufferLayout& layout)
1886 vector<int> sizes(layout.blocks.size());
1897 const int blockNdx = layout.getBlockIndex(apiName);
1901 const BlockLayoutEntry& blockLayout = layout.blocks[blockNdx];
1903 const bool isLastUnsized = hasUnsizedArray(layout, blockLayout);
1905 const int stride = isLastUnsized ? getUnsizedArrayStride(layout, blockLayout) : 0;
1915 BlockDataPtr getBlockDataPtr (const BufferLayout& layout, const BlockLayoutEntry& blockLayout, void* ptr, int bufferSize)
1917 const bool isLastUnsized = hasUnsizedArray(layout, blockLayout);
1922 const int lastArrayStride = getUnsizedArrayStride(layout, blockLayout);
1952 void initRefDataStorage (const ShaderInterface& interface, const BufferLayout& layout, RefDataStorage& storage)
1956 const vector<int> bufferSizes = computeBufferSizes(interface, layout);
1973 DE_ASSERT(bufferSizes.size() == layout.blocks.size());
1976 storage.pointers.resize(layout.blocks.size());
1978 for (int blockNdx = 0; blockNdx < (int)layout.blocks.size(); blockNdx++)
1980 const BlockLayoutEntry& blockLayout = layout.blocks[blockNdx];
1983 storage.pointers[blockNdx] = getBlockDataPtr(layout, blockLayout, basePtr + curOffset, bufferSize);
1992 vector<BlockDataPtr> blockLocationsToPtrs (const BufferLayout& layout, const vector<BlockLocation>& blockLocations, const vector<void*>& bufPtrs)
1996 DE_ASSERT(layout.blocks.size() == blockLocations.size());
1998 for (int blockNdx = 0; blockNdx < (int)layout.blocks.size(); blockNdx++)
2000 const BlockLayoutEntry& blockLayout = layout.blocks[blockNdx];
2003 blockPtrs[blockNdx] = getBlockDataPtr(layout, blockLayout, (deUint8*)bufPtrs[location.index] + location.offset, location.size);
2334 *pipelineLayout, // VkPipelineLayout layout;
2464 // Valid scalar layouts are a superset of valid relaxed layouts. So check scalar layout first.