1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkOpsRenderPass.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrBackendDrawableInfo.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkTraceEvent.h"
15 #include "src/gpu/GrBackendUtils.h"
16 #include "src/gpu/GrDirectContextPriv.h"
17 #include "src/gpu/GrOpFlushState.h"
18 #include "src/gpu/GrPipeline.h"
19 #include "src/gpu/GrRenderTarget.h"
20 #include "src/gpu/effects/GrTextureEffect.h"
21 #include "src/gpu/vk/GrVkBuffer.h"
22 #include "src/gpu/vk/GrVkCommandBuffer.h"
23 #include "src/gpu/vk/GrVkCommandPool.h"
24 #include "src/gpu/vk/GrVkFramebuffer.h"
25 #include "src/gpu/vk/GrVkGpu.h"
26 #include "src/gpu/vk/GrVkImage.h"
27 #include "src/gpu/vk/GrVkImageView.h"
28 #include "src/gpu/vk/GrVkPipeline.h"
29 #include "src/gpu/vk/GrVkRenderPass.h"
30 #include "src/gpu/vk/GrVkRenderTarget.h"
31 #include "src/gpu/vk/GrVkResourceProvider.h"
32 #include "src/gpu/vk/GrVkSemaphore.h"
33 #include "src/gpu/vk/GrVkTexture.h"
34
35 /////////////////////////////////////////////////////////////////////////////
36
get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn, VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp)37 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
38 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
39 switch (loadOpIn) {
40 case GrLoadOp::kLoad:
41 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
42 break;
43 case GrLoadOp::kClear:
44 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
45 break;
46 case GrLoadOp::kDiscard:
47 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
48 break;
49 default:
50 SK_ABORT("Invalid LoadOp");
51 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
52 }
53
54 switch (storeOpIn) {
55 case GrStoreOp::kStore:
56 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
57 break;
58 case GrStoreOp::kDiscard:
59 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
60 break;
61 default:
62 SK_ABORT("Invalid StoreOp");
63 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
64 }
65 }
66
GrVkOpsRenderPass(GrVkGpu* gpu)67 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
68
setAttachmentLayouts(LoadFromResolve loadFromResolve)69 void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) {
70 bool withStencil = fCurrentRenderPass->hasStencilAttachment();
71 bool withResolve = fCurrentRenderPass->hasResolveAttachment();
72
73 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
74 // We need to use the GENERAL layout in this case since we'll be using texture barriers
75 // with an input attachment.
76 VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
77 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
78 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
79 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
80 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
81 fFramebuffer->colorAttachment()->setImageLayout(
82 fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false);
83 } else {
84 // Change layout of our render target so it can be used as the color attachment.
85 // TODO: If we know that we will never be blending or loading the attachment we could drop
86 // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
87 fFramebuffer->colorAttachment()->setImageLayout(
88 fGpu,
89 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
90 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
91 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
92 false);
93 }
94
95 if (withResolve) {
96 GrVkImage* resolveAttachment = fFramebuffer->resolveAttachment();
97 SkASSERT(resolveAttachment);
98 if (loadFromResolve == LoadFromResolve::kLoad) {
99 resolveAttachment->setImageLayout(fGpu,
100 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
101 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
102 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
103 false);
104 } else {
105 resolveAttachment->setImageLayout(
106 fGpu,
107 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
108 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
109 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
110 false);
111 }
112 }
113
114 // If we are using a stencil attachment we also need to update its layout
115 if (withStencil) {
116 auto* vkStencil = fFramebuffer->stencilAttachment();
117 SkASSERT(vkStencil);
118
119 // We need the write and read access bits since we may load and store the stencil.
120 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
121 // wait there.
122 vkStencil->setImageLayout(fGpu,
123 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
124 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
125 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
126 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
127 false);
128 }
129 }
130
131 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
132 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
133 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds, const VkExtent2D& granularity, int maxWidth, int maxHeight)134 void adjust_bounds_to_granularity(SkIRect* dstBounds,
135 const SkIRect& srcBounds,
136 const VkExtent2D& granularity,
137 int maxWidth,
138 int maxHeight) {
139 // Adjust Width
140 if ((0 != granularity.width && 1 != granularity.width)) {
141 // Start with the right side of rect so we know if we end up going pass the maxWidth.
142 int rightAdj = srcBounds.fRight % granularity.width;
143 if (rightAdj != 0) {
144 rightAdj = granularity.width - rightAdj;
145 }
146 dstBounds->fRight = srcBounds.fRight + rightAdj;
147 if (dstBounds->fRight > maxWidth) {
148 dstBounds->fRight = maxWidth;
149 dstBounds->fLeft = 0;
150 } else {
151 dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
152 }
153 } else {
154 dstBounds->fLeft = srcBounds.fLeft;
155 dstBounds->fRight = srcBounds.fRight;
156 }
157
158 // Adjust height
159 if ((0 != granularity.height && 1 != granularity.height)) {
160 // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
161 int bottomAdj = srcBounds.fBottom % granularity.height;
162 if (bottomAdj != 0) {
163 bottomAdj = granularity.height - bottomAdj;
164 }
165 dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
166 if (dstBounds->fBottom > maxHeight) {
167 dstBounds->fBottom = maxHeight;
168 dstBounds->fTop = 0;
169 } else {
170 dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
171 }
172 } else {
173 dstBounds->fTop = srcBounds.fTop;
174 dstBounds->fBottom = srcBounds.fBottom;
175 }
176 }
177
beginRenderPass(const VkClearValue& clearColor, LoadFromResolve loadFromResolve)178 bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor,
179 LoadFromResolve loadFromResolve) {
180 this->setAttachmentLayouts(loadFromResolve);
181
182 bool firstSubpassUsesSecondaryCB =
183 loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer);
184
185 bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() &&
186 fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA();
187
188 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
189
190 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
191 fOrigin,
192 dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds);
193
194 // The bounds we use for the render pass should be of the granularity supported
195 // by the device.
196 const VkExtent2D& granularity = fCurrentRenderPass->granularity();
197 SkIRect adjustedBounds;
198 if ((0 != granularity.width && 1 != granularity.width) ||
199 (0 != granularity.height && 1 != granularity.height)) {
200 adjust_bounds_to_granularity(&adjustedBounds,
201 nativeBounds,
202 granularity,
203 dimensions.width(),
204 dimensions.height());
205 } else {
206 adjustedBounds = nativeBounds;
207 }
208
209 if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget,
210 adjustedBounds, firstSubpassUsesSecondaryCB)) {
211 if (fCurrentSecondaryCommandBuffer) {
212 fCurrentSecondaryCommandBuffer->end(fGpu);
213 }
214 fCurrentRenderPass = nullptr;
215 return false;
216 }
217
218 if (loadFromResolve == LoadFromResolve::kLoad) {
219 this->loadResolveIntoMSAA(adjustedBounds);
220 }
221
222 return true;
223 }
224
init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo, const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo)225 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
226 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
227 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
228 VkAttachmentLoadOp loadOp;
229 VkAttachmentStoreOp storeOp;
230 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp);
231 GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
232
233 get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp);
234 GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp);
235
236 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp);
237 GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
238
239 GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle();
240 SkASSERT(rpHandle.isValid());
241 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
242 vkColorOps,
243 vkResolveOps,
244 vkStencilOps);
245
246 if (!fCurrentRenderPass) {
247 return false;
248 }
249
250 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
251 SkASSERT(fGpu->cmdPool());
252 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
253 if (!fCurrentSecondaryCommandBuffer) {
254 fCurrentRenderPass = nullptr;
255 return false;
256 }
257 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
258 }
259
260 VkClearValue vkClearColor;
261 vkClearColor.color.float32[0] = colorInfo.fClearColor[0];
262 vkClearColor.color.float32[1] = colorInfo.fClearColor[1];
263 vkClearColor.color.float32[2] = colorInfo.fClearColor[2];
264 vkClearColor.color.float32[3] = colorInfo.fClearColor[3];
265
266 return this->beginRenderPass(vkClearColor, fLoadFromResolve);
267 }
268
initWrapped()269 bool GrVkOpsRenderPass::initWrapped() {
270 SkASSERT(fFramebuffer->isExternal());
271 fCurrentRenderPass = fFramebuffer->externalRenderPass();
272 SkASSERT(fCurrentRenderPass);
273 fCurrentRenderPass->ref();
274
275 fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer();
276 if (!fCurrentSecondaryCommandBuffer) {
277 return false;
278 }
279 return true;
280 }
281
~GrVkOpsRenderPass()282 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
283 this->reset();
284 }
285
gpu()286 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
287
currentCommandBuffer()288 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
289 if (fCurrentSecondaryCommandBuffer) {
290 return fCurrentSecondaryCommandBuffer.get();
291 }
292 // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we
293 // are still using this object.
294 SkASSERT(fGpu->currentCommandBuffer());
295 return fGpu->currentCommandBuffer();
296 }
297
loadResolveIntoMSAA(const SkIRect& nativeBounds)298 void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) {
299 fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass,
300 fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(),
301 nativeBounds);
302 fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer));
303
304 // If we loaded the resolve attachment, then we would have set the image layout to be
305 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input
306 // attachment. However, when we switched to the main subpass it will transition the layout
307 // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking
308 // of the layout to match the new layout.
309 SkASSERT(fFramebuffer->resolveAttachment());
310 fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
311 }
312
submit()313 void GrVkOpsRenderPass::submit() {
314 if (!fRenderTarget) {
315 return;
316 }
317 if (!fCurrentRenderPass) {
318 SkASSERT(fGpu->isDeviceLost());
319 return;
320 }
321
322 // We don't want to actually submit the secondary command buffer if it is wrapped.
323 if (this->wrapsSecondaryCommandBuffer()) {
324 // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer
325 // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the
326 // GrVkSecondaryCommandBuffer alive.
327 fFramebuffer->returnExternalGrSecondaryCommandBuffer(
328 std::move(fCurrentSecondaryCommandBuffer));
329 return;
330 }
331
332 if (fCurrentSecondaryCommandBuffer) {
333 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
334 }
335 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
336 }
337
set(GrRenderTarget* rt, sk_sp<GrVkFramebuffer> framebuffer, GrSurfaceOrigin origin, const SkIRect& bounds, const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo, const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo, GrVkRenderPass::SelfDependencyFlags selfDepFlags, GrVkRenderPass::LoadFromResolve loadFromResolve, const SkTArray<GrSurfaceProxy*, true>& sampledProxies)338 bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
339 sk_sp<GrVkFramebuffer> framebuffer,
340 GrSurfaceOrigin origin,
341 const SkIRect& bounds,
342 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
343 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
344 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
345 GrVkRenderPass::SelfDependencyFlags selfDepFlags,
346 GrVkRenderPass::LoadFromResolve loadFromResolve,
347 const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
348 SkASSERT(!fRenderTarget);
349 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
350
351 #ifdef SK_DEBUG
352 fIsActive = true;
353 #endif
354
355 // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we
356 // access it. If the command buffer is valid here should be valid throughout the use of the
357 // render pass since nothing should trigger a submit while this render pass is active.
358 if (!fGpu->currentCommandBuffer()) {
359 return false;
360 }
361
362 this->INHERITED::set(rt, origin);
363
364 for (int i = 0; i < sampledProxies.count(); ++i) {
365 if (sampledProxies[i]->isInstantiated()) {
366 SkASSERT(sampledProxies[i]->asTextureProxy());
367 GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
368 SkASSERT(vkTex);
369 GrVkImage* texture = vkTex->textureImage();
370 SkASSERT(texture);
371 texture->setImageLayout(
372 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
373 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
374 }
375 }
376
377 SkASSERT(framebuffer);
378 fFramebuffer = std::move(framebuffer);
379
380 SkASSERT(bounds.isEmpty() ||
381 SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds));
382 fBounds = bounds;
383
384 fSelfDependencyFlags = selfDepFlags;
385 fLoadFromResolve = loadFromResolve;
386
387 if (this->wrapsSecondaryCommandBuffer()) {
388 return this->initWrapped();
389 }
390
391 return this->init(colorInfo, resolveInfo, stencilInfo);
392 }
393
reset()394 void GrVkOpsRenderPass::reset() {
395 if (fCurrentSecondaryCommandBuffer) {
396 // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
397 // secondary command buffer from since we haven't submitted any work yet.
398 SkASSERT(fGpu->cmdPool());
399 fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
400 }
401 if (fCurrentRenderPass) {
402 fCurrentRenderPass->unref();
403 fCurrentRenderPass = nullptr;
404 }
405 fCurrentCBIsEmpty = true;
406
407 fRenderTarget = nullptr;
408 fFramebuffer.reset();
409
410 fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
411
412 fLoadFromResolve = LoadFromResolve::kNo;
413 fOverridePipelinesForResolveLoad = false;
414
415 #ifdef SK_DEBUG
416 fIsActive = false;
417 #endif
418 }
419
wrapsSecondaryCommandBuffer() const420 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
421 return fFramebuffer->isExternal();
422 }
423
424 ////////////////////////////////////////////////////////////////////////////////
425
onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask)426 void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
427 if (!fCurrentRenderPass) {
428 SkASSERT(fGpu->isDeviceLost());
429 return;
430 }
431
432 GrAttachment* sb = fFramebuffer->stencilAttachment();
433 // this should only be called internally when we know we have a
434 // stencil buffer.
435 SkASSERT(sb);
436 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
437
438 // The contract with the callers does not guarantee that we preserve all bits in the stencil
439 // during this clear. Thus we will clear the entire stencil to the desired value.
440
441 VkClearDepthStencilValue vkStencilColor;
442 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
443 if (insideStencilMask) {
444 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
445 } else {
446 vkStencilColor.stencil = 0;
447 }
448
449 VkClearRect clearRect;
450 // Flip rect if necessary
451 SkIRect vkRect;
452 if (!scissor.enabled()) {
453 vkRect.setXYWH(0, 0, sb->width(), sb->height());
454 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
455 vkRect = scissor.rect();
456 } else {
457 vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom,
458 scissor.rect().fRight, sb->height() - scissor.rect().fTop);
459 }
460
461 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
462 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
463
464 clearRect.baseArrayLayer = 0;
465 clearRect.layerCount = 1;
466
467 uint32_t stencilIndex;
468 SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
469
470 VkClearAttachment attachment;
471 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
472 attachment.colorAttachment = 0; // this value shouldn't matter
473 attachment.clearValue.depthStencil = vkStencilColor;
474
475 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
476 fCurrentCBIsEmpty = false;
477 }
478
onClear(const GrScissorState& scissor, std::array<float, 4> color)479 void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
480 if (!fCurrentRenderPass) {
481 SkASSERT(fGpu->isDeviceLost());
482 return;
483 }
484
485 VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}};
486
487 // If we end up in a situation where we are calling clear without a scissior then in general it
488 // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
489 // there are situations where higher up we couldn't discard the previous ops and set a clear
490 // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
491 // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We
492 // can then reenable this assert assuming we can't get messed up by a waitOp.
493 //SkASSERT(!fCurrentCBIsEmpty || scissor);
494
495 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
496 // We always do a sub rect clear with clearAttachments since we are inside a render pass
497 VkClearRect clearRect;
498 // Flip rect if necessary
499 SkIRect vkRect;
500 if (!scissor.enabled()) {
501 vkRect.setSize(dimensions);
502 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
503 vkRect = scissor.rect();
504 } else {
505 vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom,
506 scissor.rect().fRight, dimensions.height() - scissor.rect().fTop);
507 }
508 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
509 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
510 clearRect.baseArrayLayer = 0;
511 clearRect.layerCount = 1;
512
513 uint32_t colorIndex;
514 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
515
516 VkClearAttachment attachment;
517 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
518 attachment.colorAttachment = colorIndex;
519 attachment.clearValue.color = vkColor;
520
521 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
522 fCurrentCBIsEmpty = false;
523 return;
524 }
525
526 ////////////////////////////////////////////////////////////////////////////////
527
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)528 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
529 SkASSERT(!this->wrapsSecondaryCommandBuffer());
530
531 bool withResolve = fFramebuffer->resolveAttachment();
532 bool withStencil = fFramebuffer->stencilAttachment();
533
534 // If we have a resolve attachment we must do a resolve load in the new render pass since we
535 // broke up the original one. GrProgramInfos were made without any knowledge that the render
536 // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We
537 // need to override that to make sure they are compatible with the extra load subpass.
538 fOverridePipelinesForResolveLoad |=
539 withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad;
540
541 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
542 VK_ATTACHMENT_STORE_OP_STORE);
543 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD,
544 VK_ATTACHMENT_STORE_OP_STORE);
545 LoadFromResolve loadFromResolve = LoadFromResolve::kNo;
546 if (withResolve) {
547 vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE};
548 loadFromResolve = LoadFromResolve::kLoad;
549 }
550 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
551 VK_ATTACHMENT_STORE_OP_STORE);
552
553 SkASSERT(fCurrentRenderPass);
554 fCurrentRenderPass->unref();
555 fCurrentRenderPass = nullptr;
556
557 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
558 auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve);
559 if (!fb) {
560 return;
561 }
562 fFramebuffer = sk_ref_sp(fb);
563
564 SkASSERT(fFramebuffer);
565 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
566 fFramebuffer->compatibleRenderPassHandle();
567 SkASSERT(rpHandle.isValid());
568
569 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
570 vkColorOps,
571 vkResolveOps,
572 vkStencilOps);
573
574 if (!fCurrentRenderPass) {
575 return;
576 }
577
578 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
579 mustUseSecondaryCommandBuffer) {
580 SkASSERT(fGpu->cmdPool());
581 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
582 if (!fCurrentSecondaryCommandBuffer) {
583 fCurrentRenderPass = nullptr;
584 return;
585 }
586 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
587 }
588
589 VkClearValue vkClearColor;
590 memset(&vkClearColor, 0, sizeof(VkClearValue));
591
592 this->beginRenderPass(vkClearColor, loadFromResolve);
593 }
594
inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload)595 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
596 if (!fCurrentRenderPass) {
597 SkASSERT(fGpu->isDeviceLost());
598 return;
599 }
600 if (fCurrentSecondaryCommandBuffer) {
601 fCurrentSecondaryCommandBuffer->end(fGpu);
602 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
603 }
604 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
605
606 // We pass in true here to signal that after the upload we need to set the upload textures
607 // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
608 state->doUpload(upload, true);
609
610 this->addAdditionalRenderPass(false);
611 }
612
613 ////////////////////////////////////////////////////////////////////////////////
614
onEnd()615 void GrVkOpsRenderPass::onEnd() {
616 if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) {
617 fCurrentSecondaryCommandBuffer->end(fGpu);
618 }
619 }
620
onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds)621 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
622 if (!fCurrentRenderPass) {
623 SkASSERT(fGpu->isDeviceLost());
624 return false;
625 }
626
627 SkRect rtRect = SkRect::Make(fBounds);
628 if (rtRect.intersect(drawBounds)) {
629 rtRect.roundOut(&fCurrentPipelineBounds);
630 } else {
631 fCurrentPipelineBounds.setEmpty();
632 }
633
634 GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
635 SkASSERT(fCurrentRenderPass);
636
637 VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
638 fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
639 fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad);
640 if (!fCurrentPipelineState) {
641 return false;
642 }
643
644 fCurrentPipelineState->bindPipeline(fGpu, currentCB);
645
646 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
647 // same place (i.e., the target renderTargetProxy) they had best agree.
648 SkASSERT(programInfo.origin() == fOrigin);
649
650 auto colorAttachment = fFramebuffer->colorAttachment();
651 if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo,
652 currentCB)) {
653 return false;
654 }
655
656 if (!programInfo.pipeline().isScissorTestEnabled()) {
657 // "Disable" scissor by setting it to the full pipeline bounds.
658 GrVkPipeline::SetDynamicScissorRectState(
659 fGpu, currentCB, colorAttachment->dimensions(), fOrigin,
660 fCurrentPipelineBounds);
661 }
662 GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions());
663 GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
664 programInfo.pipeline().writeSwizzle(),
665 programInfo.pipeline().getXferProcessor());
666
667 return true;
668 }
669
onSetScissorRect(const SkIRect& scissor)670 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
671 SkIRect combinedScissorRect;
672 if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
673 combinedScissorRect = SkIRect::MakeEmpty();
674 }
675 GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
676 fFramebuffer->colorAttachment()->dimensions(),
677 fOrigin, combinedScissorRect);
678 }
679
680 #ifdef SK_DEBUG
check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu)681 void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) {
682 SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext()));
683 auto vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
684 SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
685 }
686 #endif
687
onBindTextures(const GrGeometryProcessor& geomProc, const GrSurfaceProxy* const geomProcTextures[], const GrPipeline& pipeline)688 bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
689 const GrSurfaceProxy* const geomProcTextures[],
690 const GrPipeline& pipeline) {
691 #ifdef SK_DEBUG
692 SkASSERT(fCurrentPipelineState);
693 auto colorAttachment = fFramebuffer->colorAttachment();
694 for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
695 check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu);
696 }
697 pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
698 check_sampled_texture(te.texture(), colorAttachment, fGpu);
699 });
700 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
701 check_sampled_texture(dstTexture, colorAttachment, fGpu);
702 }
703 #endif
704 if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures,
705 this->currentCommandBuffer())) {
706 return false;
707 }
708 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
709 // We bind the color attachment as an input attachment
710 auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu);
711 if (!ds) {
712 return false;
713 }
714 return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds),
715 this->currentCommandBuffer());
716 }
717 return true;
718 }
719
onBindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer, sk_sp<const GrBuffer> vertexBuffer, GrPrimitiveRestart primRestart)720 void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
721 sk_sp<const GrBuffer> instanceBuffer,
722 sk_sp<const GrBuffer> vertexBuffer,
723 GrPrimitiveRestart primRestart) {
724 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
725 if (!fCurrentRenderPass) {
726 SkASSERT(fGpu->isDeviceLost());
727 return;
728 }
729 SkASSERT(fCurrentPipelineState);
730 SkASSERT(!fGpu->caps()->usePrimitiveRestart()); // Ignore primitiveRestart parameter.
731
732 GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
733 SkASSERT(currCmdBuf);
734
735 // There is no need to put any memory barriers to make sure host writes have finished here.
736 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
737 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
738 // an active RenderPass.
739
740 // Here our vertex and instance inputs need to match the same 0-based bindings they were
741 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
742 uint32_t binding = 0;
743 if (auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get())) {
744 SkASSERT(!gpuVertexBuffer->isCpuBuffer());
745 SkASSERT(!gpuVertexBuffer->isMapped());
746 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer));
747 }
748 if (auto* gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer.get())) {
749 SkASSERT(!gpuInstanceBuffer->isCpuBuffer());
750 SkASSERT(!gpuInstanceBuffer->isMapped());
751 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer));
752 }
753 if (auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get())) {
754 SkASSERT(!gpuIndexBuffer->isCpuBuffer());
755 SkASSERT(!gpuIndexBuffer->isMapped());
756 currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer));
757 }
758 }
759
onDrawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex)760 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
761 int baseInstance,
762 int vertexCount, int baseVertex) {
763 if (!fCurrentRenderPass) {
764 SkASSERT(fGpu->isDeviceLost());
765 return;
766 }
767 SkASSERT(fCurrentPipelineState);
768 this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
769 fGpu->stats()->incNumDraws();
770 fCurrentCBIsEmpty = false;
771 }
772
onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex)773 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
774 int baseInstance, int baseVertex) {
775 if (!fCurrentRenderPass) {
776 SkASSERT(fGpu->isDeviceLost());
777 return;
778 }
779 SkASSERT(fCurrentPipelineState);
780 this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
781 baseIndex, baseVertex, baseInstance);
782 fGpu->stats()->incNumDraws();
783 fCurrentCBIsEmpty = false;
784 }
785
onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount)786 void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
787 int drawCount) {
788 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
789 if (!fCurrentRenderPass) {
790 SkASSERT(fGpu->isDeviceLost());
791 return;
792 }
793 const GrVkCaps& caps = fGpu->vkCaps();
794 SkASSERT(caps.nativeDrawIndirectSupport());
795 SkASSERT(fCurrentPipelineState);
796
797 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
798 uint32_t remainingDraws = drawCount;
799 const size_t stride = sizeof(GrDrawIndirectCommand);
800 while (remainingDraws >= 1) {
801 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
802 this->currentCommandBuffer()->drawIndirect(
803 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
804 remainingDraws -= currDrawCount;
805 offset += stride * currDrawCount;
806 fGpu->stats()->incNumDraws();
807 }
808 fCurrentCBIsEmpty = false;
809 }
810
onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount)811 void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
812 int drawCount) {
813 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
814 if (!fCurrentRenderPass) {
815 SkASSERT(fGpu->isDeviceLost());
816 return;
817 }
818 const GrVkCaps& caps = fGpu->vkCaps();
819 SkASSERT(caps.nativeDrawIndirectSupport());
820 SkASSERT(fCurrentPipelineState);
821 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
822 uint32_t remainingDraws = drawCount;
823 const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
824 while (remainingDraws >= 1) {
825 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
826 this->currentCommandBuffer()->drawIndexedIndirect(
827 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
828 remainingDraws -= currDrawCount;
829 offset += stride * currDrawCount;
830 fGpu->stats()->incNumDraws();
831 }
832 fCurrentCBIsEmpty = false;
833 }
834
835 ////////////////////////////////////////////////////////////////////////////////
836
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)837 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
838 if (!fCurrentRenderPass) {
839 SkASSERT(fGpu->isDeviceLost());
840 return;
841 }
842
843 VkRect2D bounds;
844 bounds.offset = { 0, 0 };
845 bounds.extent = { 0, 0 };
846
847 if (!fCurrentSecondaryCommandBuffer) {
848 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
849 this->addAdditionalRenderPass(true);
850 // We may have failed to start a new render pass
851 if (!fCurrentRenderPass) {
852 SkASSERT(fGpu->isDeviceLost());
853 return;
854 }
855 }
856 SkASSERT(fCurrentSecondaryCommandBuffer);
857
858 GrVkDrawableInfo vkInfo;
859 vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
860 vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
861 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
862 vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat();
863 vkInfo.fDrawBounds = &bounds;
864 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
865 vkInfo.fImage = fFramebuffer->colorAttachment()->image();
866 #else
867 vkInfo.fImage = VK_NULL_HANDLE;
868 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
869
870 GrBackendDrawableInfo info(vkInfo);
871
872 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
873 this->currentCommandBuffer()->invalidateState();
874 // Also assume that the drawable produced output.
875 fCurrentCBIsEmpty = false;
876
877 drawable->draw(info);
878 fGpu->addDrawable(std::move(drawable));
879 }
880
881
882 ////////////////////////////////////////////////////////////////////////////////
883
onDrawBlurImage(const GrSurfaceProxyView& proxyView, const SkBlurArg& blurArg)884 void GrVkOpsRenderPass::onDrawBlurImage(const GrSurfaceProxyView& proxyView, const SkBlurArg& blurArg)
885 {
886 if (!proxyView.proxy()) {
887 return;
888 }
889
890 GrVkTexture* texture = static_cast<GrVkTexture*>(proxyView.proxy()->peekTexture());
891 if (!texture) {
892 return;
893 }
894 GrVkImage* image = texture->textureImage();
895 if (!image) {
896 return;
897 }
898
899 HITRACE_OHOS_NAME_ALWAYS("DrawBlurImage");
900 // reference textureop, resource's refcount should add.
901 fGpu->currentCommandBuffer()->addResource(image->textureView());
902 fGpu->currentCommandBuffer()->addResource(image->resource());
903 // OH ISSUE : fix hps blur, add GrSurface reference protection
904 fGpu->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(texture));
905 SkOriginInfo originInfo {};
906 originInfo.imageOrigin = proxyView.origin();
907 originInfo.rtOrigin = fOrigin;
908 fGpu->currentCommandBuffer()->drawBlurImage(fGpu, image, fFramebuffer->colorAttachment()->dimensions(),
909 originInfo, blurArg);
910 return;
911 }
912