1/* 2 * Copyright 2020 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "src/gpu/d3d/GrD3DPipelineState.h" 9 10#include "include/private/SkTemplates.h" 11#include "src/gpu/GrFragmentProcessor.h" 12#include "src/gpu/GrGeometryProcessor.h" 13#include "src/gpu/GrProgramInfo.h" 14#include "src/gpu/GrStencilSettings.h" 15#include "src/gpu/GrXferProcessor.h" 16#include "src/gpu/d3d/GrD3DBuffer.h" 17#include "src/gpu/d3d/GrD3DGpu.h" 18#include "src/gpu/d3d/GrD3DPipeline.h" 19#include "src/gpu/d3d/GrD3DRootSignature.h" 20#include "src/gpu/d3d/GrD3DTexture.h" 21#include "src/gpu/effects/GrTextureEffect.h" 22 23GrD3DPipelineState::GrD3DPipelineState( 24 sk_sp<GrD3DPipeline> pipeline, 25 sk_sp<GrD3DRootSignature> rootSignature, 26 const GrGLSLBuiltinUniformHandles& builtinUniformHandles, 27 const UniformInfoArray& uniforms, 28 uint32_t uniformSize, 29 uint32_t numSamplers, 30 std::unique_ptr<GrGeometryProcessor::ProgramImpl> gpImpl, 31 std::unique_ptr<GrXferProcessor::ProgramImpl> xpImpl, 32 std::vector<std::unique_ptr<GrFragmentProcessor::ProgramImpl>> fpImpls, 33 size_t vertexStride, 34 size_t instanceStride) 35 : fPipeline(std::move(pipeline)) 36 , fRootSignature(std::move(rootSignature)) 37 , fBuiltinUniformHandles(builtinUniformHandles) 38 , fGPImpl(std::move(gpImpl)) 39 , fXPImpl(std::move(xpImpl)) 40 , fFPImpls(std::move(fpImpls)) 41 , fDataManager(uniforms, uniformSize) 42 , fNumSamplers(numSamplers) 43 , fVertexStride(vertexStride) 44 , fInstanceStride(instanceStride) {} 45 46void GrD3DPipelineState::setAndBindConstants(GrD3DGpu* gpu, 47 const GrRenderTarget* renderTarget, 48 const GrProgramInfo& programInfo) { 49 this->setRenderTargetState(renderTarget, programInfo.origin()); 50 51 fGPImpl->setData(fDataManager, *gpu->caps()->shaderCaps(), programInfo.geomProc()); 52 53 for (int i = 0; i < programInfo.pipeline().numFragmentProcessors(); ++i) { 54 const auto& fp = programInfo.pipeline().getFragmentProcessor(i); 55 fp.visitWithImpls([&](const GrFragmentProcessor& fp, 56 GrFragmentProcessor::ProgramImpl& impl) { 57 impl.setData(fDataManager, fp); 58 }, *fFPImpls[i]); 59 } 60 61 programInfo.pipeline().setDstTextureUniforms(fDataManager, &fBuiltinUniformHandles); 62 fXPImpl->setData(fDataManager, programInfo.pipeline().getXferProcessor()); 63 64 D3D12_GPU_VIRTUAL_ADDRESS constantsAddress = fDataManager.uploadConstants(gpu); 65 gpu->currentCommandList()->setGraphicsRootConstantBufferView( 66 (unsigned int)(GrD3DRootSignature::ParamIndex::kConstantBufferView), 67 constantsAddress); 68} 69 70void GrD3DPipelineState::setRenderTargetState(const GrRenderTarget* rt, GrSurfaceOrigin origin) { 71 // Set RT adjustment and RT flip 72 SkISize dimensions = rt->dimensions(); 73 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid()); 74 if (fRenderTargetState.fRenderTargetOrigin != origin || 75 fRenderTargetState.fRenderTargetSize != dimensions) { 76 fRenderTargetState.fRenderTargetSize = dimensions; 77 fRenderTargetState.fRenderTargetOrigin = origin; 78 79 // The client will mark a swap buffer as kTopLeft when making a SkSurface because 80 // D3D's framebuffer space has (0, 0) at the top left. This agrees with Skia's device 81 // coords. However, in NDC (-1, -1) is the bottom left. So we flip when origin is kTopLeft. 82 bool flip = (origin == kTopLeft_GrSurfaceOrigin); 83 std::array<float, 4> v = SkSL::Compiler::GetRTAdjustVector(dimensions, flip); 84 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, v.data()); 85 if (fBuiltinUniformHandles.fRTFlipUni.isValid()) { 86 // Note above that framebuffer space has origin top left. So we need !flip here. 87 std::array<float, 2> d = SkSL::Compiler::GetRTFlipVector(rt->height(), !flip); 88 fDataManager.set2fv(fBuiltinUniformHandles.fRTFlipUni, 1, d.data()); 89 } 90 } 91} 92 93void GrD3DPipelineState::setAndBindTextures(GrD3DGpu* gpu, 94 const GrGeometryProcessor& geomProc, 95 const GrSurfaceProxy* const geomProcTextures[], 96 const GrPipeline& pipeline) { 97 SkASSERT(geomProcTextures || !geomProc.numTextureSamplers()); 98 99 std::vector<D3D12_CPU_DESCRIPTOR_HANDLE> shaderResourceViews(fNumSamplers); 100 std::vector<D3D12_CPU_DESCRIPTOR_HANDLE> samplers(fNumSamplers); 101 unsigned int currTextureBinding = 0; 102 103 for (int i = 0; i < geomProc.numTextureSamplers(); ++i) { 104 SkASSERT(geomProcTextures[i]->asTextureProxy()); 105 const auto& sampler = geomProc.textureSampler(i); 106 auto texture = static_cast<GrD3DTexture*>(geomProcTextures[i]->peekTexture()); 107 shaderResourceViews[currTextureBinding] = texture->shaderResourceView(); 108 samplers[currTextureBinding++] = 109 gpu->resourceProvider().findOrCreateCompatibleSampler(sampler.samplerState()); 110 gpu->currentCommandList()->addSampledTextureRef(texture); 111 } 112 113 if (GrTexture* dstTexture = pipeline.peekDstTexture()) { 114 auto texture = static_cast<GrD3DTexture*>(dstTexture); 115 shaderResourceViews[currTextureBinding] = texture->shaderResourceView(); 116 samplers[currTextureBinding++] = gpu->resourceProvider().findOrCreateCompatibleSampler( 117 GrSamplerState::Filter::kNearest); 118 gpu->currentCommandList()->addSampledTextureRef(texture); 119 } 120 121 pipeline.visitTextureEffects([&](const GrTextureEffect& te) { 122 GrSamplerState samplerState = te.samplerState(); 123 auto* texture = static_cast<GrD3DTexture*>(te.texture()); 124 shaderResourceViews[currTextureBinding] = texture->shaderResourceView(); 125 samplers[currTextureBinding++] = 126 gpu->resourceProvider().findOrCreateCompatibleSampler(samplerState); 127 gpu->currentCommandList()->addSampledTextureRef(texture); 128 }); 129 130 SkASSERT(fNumSamplers == currTextureBinding); 131 132 // fill in descriptor tables and bind to root signature 133 if (fNumSamplers > 0) { 134 // set up descriptor tables and bind heaps 135 sk_sp<GrD3DDescriptorTable> srvTable = 136 gpu->resourceProvider().findOrCreateShaderViewTable(shaderResourceViews); 137 sk_sp<GrD3DDescriptorTable> samplerTable = 138 gpu->resourceProvider().findOrCreateSamplerTable(samplers); 139 gpu->currentCommandList()->setDescriptorHeaps(srvTable->heap(), samplerTable->heap()); 140 141 // bind shader resource view table 142 gpu->currentCommandList()->setGraphicsRootDescriptorTable( 143 (unsigned int)GrD3DRootSignature::ParamIndex::kShaderViewDescriptorTable, 144 srvTable->baseGpuDescriptor()); 145 146 // bind sampler table 147 gpu->currentCommandList()->setGraphicsRootDescriptorTable( 148 (unsigned int)GrD3DRootSignature::ParamIndex::kSamplerDescriptorTable, 149 samplerTable->baseGpuDescriptor()); 150 } 151} 152 153void GrD3DPipelineState::bindBuffers(GrD3DGpu* gpu, sk_sp<const GrBuffer> indexBuffer, 154 sk_sp<const GrBuffer> instanceBuffer, 155 sk_sp<const GrBuffer> vertexBuffer, 156 GrD3DDirectCommandList* commandList) { 157 // Here our vertex and instance inputs need to match the same 0-based bindings they were 158 // assigned in the PipelineState. That is, vertex first (if any) followed by instance. 159 if (vertexBuffer) { 160 auto* d3dVertexBuffer = static_cast<const GrD3DBuffer*>(vertexBuffer.get()); 161 SkASSERT(!d3dVertexBuffer->isCpuBuffer()); 162 SkASSERT(!d3dVertexBuffer->isMapped()); 163 const_cast<GrD3DBuffer*>(d3dVertexBuffer)->setResourceState( 164 gpu, D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER); 165 } 166 if (instanceBuffer) { 167 auto* d3dInstanceBuffer = static_cast<const GrD3DBuffer*>(instanceBuffer.get()); 168 SkASSERT(!d3dInstanceBuffer->isCpuBuffer()); 169 SkASSERT(!d3dInstanceBuffer->isMapped()); 170 const_cast<GrD3DBuffer*>(d3dInstanceBuffer)->setResourceState( 171 gpu, D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER); 172 } 173 commandList->setVertexBuffers(0, std::move(vertexBuffer), fVertexStride, 174 std::move(instanceBuffer), fInstanceStride); 175 176 if (auto* d3dIndexBuffer = static_cast<const GrD3DBuffer*>(indexBuffer.get())) { 177 SkASSERT(!d3dIndexBuffer->isCpuBuffer()); 178 SkASSERT(!d3dIndexBuffer->isMapped()); 179 const_cast<GrD3DBuffer*>(d3dIndexBuffer)->setResourceState( 180 gpu, D3D12_RESOURCE_STATE_INDEX_BUFFER); 181 commandList->setIndexBuffer(std::move(indexBuffer)); 182 } 183} 184