1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkGpu.h"
9 
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkConvertPixels.h"
17 #include "src/core/SkMipmap.h"
18 #include "src/core/SkTraceEvent.h"
19 #include "src/core/SkUtils.h"
20 #include "src/gpu/GrBackendUtils.h"
21 #include "src/gpu/GrDataUtils.h"
22 #include "src/gpu/GrDirectContextPriv.h"
23 #include "src/gpu/GrGeometryProcessor.h"
24 #include "src/gpu/GrGpuResourceCacheAccess.h"
25 #include "src/gpu/GrNativeRect.h"
26 #include "src/gpu/GrPipeline.h"
27 #include "src/gpu/GrRenderTarget.h"
28 #include "src/gpu/GrResourceProvider.h"
29 #include "src/gpu/GrTexture.h"
30 #include "src/gpu/GrThreadSafePipelineBuilder.h"
31 #include "src/gpu/SkGr.h"
32 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
33 #include "src/gpu/vk/GrVkBuffer.h"
34 #include "src/gpu/vk/GrVkCommandBuffer.h"
35 #include "src/gpu/vk/GrVkCommandPool.h"
36 #include "src/gpu/vk/GrVkFramebuffer.h"
37 #include "src/gpu/vk/GrVkImage.h"
38 #include "src/gpu/vk/GrVkInterface.h"
39 #include "src/gpu/vk/GrVkMemory.h"
40 #include "src/gpu/vk/GrVkOpsRenderPass.h"
41 #include "src/gpu/vk/GrVkPipeline.h"
42 #include "src/gpu/vk/GrVkPipelineState.h"
43 #include "src/gpu/vk/GrVkRenderPass.h"
44 #include "src/gpu/vk/GrVkResourceProvider.h"
45 #include "src/gpu/vk/GrVkSemaphore.h"
46 #include "src/gpu/vk/GrVkTexture.h"
47 #include "src/gpu/vk/GrVkTextureRenderTarget.h"
48 #include "src/image/SkImage_Gpu.h"
49 #include "src/image/SkSurface_Gpu.h"
50 
51 #include "include/gpu/vk/GrVkExtensions.h"
52 #include "include/gpu/vk/GrVkTypes.h"
53 
54 #include <utility>
55 
56 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
57 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
58 
59 constexpr uint8_t ASTC_HEADER_SIZE = 16;
60 
Make(const GrVkBackendContext& backendContext, const GrContextOptions& options, GrDirectContext* direct)61 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
62                            const GrContextOptions& options, GrDirectContext* direct) {
63     if (backendContext.fInstance == VK_NULL_HANDLE ||
64         backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
65         backendContext.fDevice == VK_NULL_HANDLE ||
66         backendContext.fQueue == VK_NULL_HANDLE) {
67         return nullptr;
68     }
69     if (!backendContext.fGetProc) {
70         return nullptr;
71     }
72 
73     PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
74             reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
75                     backendContext.fGetProc("vkEnumerateInstanceVersion",
76                                             VK_NULL_HANDLE, VK_NULL_HANDLE));
77     uint32_t instanceVersion = 0;
78     if (!localEnumerateInstanceVersion) {
79         instanceVersion = VK_MAKE_VERSION(1, 0, 0);
80     } else {
81         VkResult err = localEnumerateInstanceVersion(&instanceVersion);
82         if (err) {
83             SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
84             return nullptr;
85         }
86     }
87 
88     PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
89             reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
90                     backendContext.fGetProc("vkGetPhysicalDeviceProperties",
91                                             backendContext.fInstance,
92                                             VK_NULL_HANDLE));
93 
94     if (!localGetPhysicalDeviceProperties) {
95         return nullptr;
96     }
97     VkPhysicalDeviceProperties physDeviceProperties;
98     localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
99     uint32_t physDevVersion = physDeviceProperties.apiVersion;
100 
101     uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
102                                                         : instanceVersion;
103 
104     instanceVersion = std::min(instanceVersion, apiVersion);
105     physDevVersion = std::min(physDevVersion, apiVersion);
106 
107     sk_sp<const GrVkInterface> interface;
108 
109     if (backendContext.fVkExtensions) {
110         interface.reset(new GrVkInterface(backendContext.fGetProc,
111                                           backendContext.fInstance,
112                                           backendContext.fDevice,
113                                           instanceVersion,
114                                           physDevVersion,
115                                           backendContext.fVkExtensions));
116         if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
117             return nullptr;
118         }
119     } else {
120         GrVkExtensions extensions;
121         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
122         // need to know if this is enabled to know if we can transition to a present layout when
123         // flushing a surface.
124         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
125             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
126             extensions.init(backendContext.fGetProc, backendContext.fInstance,
127                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
128         }
129         interface.reset(new GrVkInterface(backendContext.fGetProc,
130                                           backendContext.fInstance,
131                                           backendContext.fDevice,
132                                           instanceVersion,
133                                           physDevVersion,
134                                           &extensions));
135         if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
136             return nullptr;
137         }
138     }
139 
140     sk_sp<GrVkCaps> caps;
141     if (backendContext.fDeviceFeatures2) {
142         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
143                                 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
144                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
145     } else if (backendContext.fDeviceFeatures) {
146         VkPhysicalDeviceFeatures2 features2;
147         features2.pNext = nullptr;
148         features2.features = *backendContext.fDeviceFeatures;
149         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
150                                 features2, instanceVersion, physDevVersion,
151                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
152     } else {
153         VkPhysicalDeviceFeatures2 features;
154         memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
155         features.pNext = nullptr;
156         if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
157             features.features.geometryShader = true;
158         }
159         if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
160             features.features.dualSrcBlend = true;
161         }
162         if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
163             features.features.sampleRateShading = true;
164         }
165         GrVkExtensions extensions;
166         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
167         // need to know if this is enabled to know if we can transition to a present layout when
168         // flushing a surface.
169         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
170             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
171             extensions.init(backendContext.fGetProc, backendContext.fInstance,
172                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
173         }
174         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
175                                 features, instanceVersion, physDevVersion, extensions,
176                                 backendContext.fProtectedContext));
177     }
178 
179     if (!caps) {
180         return nullptr;
181     }
182 
183     sk_sp<GrVkMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
184     if (!memoryAllocator) {
185         // We were not given a memory allocator at creation
186         memoryAllocator = GrVkAMDMemoryAllocator::Make(backendContext.fInstance,
187                                                        backendContext.fPhysicalDevice,
188                                                        backendContext.fDevice, physDevVersion,
189                                                        backendContext.fVkExtensions, interface,
190                                                        caps.get());
191     }
192     if (!memoryAllocator) {
193         SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
194         return nullptr;
195     }
196     const size_t maxBlockCount = SkGetVmaBlockCountMax(); // limit memory hols for vma cache
197     sk_sp<GrVkMemoryAllocator> memoryAllocatorCacheImage =
198         GrVkAMDMemoryAllocator::Make(backendContext.fInstance,
199                                      backendContext.fPhysicalDevice,
200                                      backendContext.fDevice, physDevVersion,
201                                      backendContext.fVkExtensions, interface,
202                                      caps.get(), true, maxBlockCount);
203     if (!memoryAllocatorCacheImage) {
204         SkDEBUGFAIL("No supplied vulkan memory allocator for cache image and unable to create one internally.");
205         return nullptr;
206     }
207 
208      sk_sp<GrVkGpu> vkGpu(new GrVkGpu(direct, backendContext, std::move(caps), interface,
209                                       instanceVersion, physDevVersion,
210                                       std::move(memoryAllocator),
211                                       std::move(memoryAllocatorCacheImage)));
212      if (backendContext.fProtectedContext == GrProtected::kYes &&
213          !vkGpu->vkCaps().supportsProtectedMemory()) {
214          return nullptr;
215      }
216      return std::move(vkGpu);
217 }
218 
219 ////////////////////////////////////////////////////////////////////////////////
220 
GrVkGpu(GrDirectContext* direct, const GrVkBackendContext& backendContext, sk_sp<GrVkCaps> caps, sk_sp<const GrVkInterface> interface, uint32_t instanceVersion, uint32_t physicalDeviceVersion, sk_sp<GrVkMemoryAllocator> memoryAllocator, sk_sp<GrVkMemoryAllocator> memoryAllocatorCacheImage)221 GrVkGpu::GrVkGpu(GrDirectContext* direct, const GrVkBackendContext& backendContext,
222                  sk_sp<GrVkCaps> caps, sk_sp<const GrVkInterface> interface,
223                  uint32_t instanceVersion, uint32_t physicalDeviceVersion,
224                  sk_sp<GrVkMemoryAllocator> memoryAllocator,
225                  sk_sp<GrVkMemoryAllocator> memoryAllocatorCacheImage)
226         : INHERITED(direct)
227         , fInterface(std::move(interface))
228         , fMemoryAllocator(std::move(memoryAllocator))
229         , fMemoryAllocatorCacheImage(std::move(memoryAllocatorCacheImage))
230         , fVkCaps(std::move(caps))
231         , fPhysicalDevice(backendContext.fPhysicalDevice)
232         , fDevice(backendContext.fDevice)
233         , fQueue(backendContext.fQueue)
234         , fQueueIndex(backendContext.fGraphicsQueueIndex)
235         , fResourceProvider(this)
236         , fStagingBufferManager(this)
237         , fDisconnected(false)
238         , fProtectedContext(backendContext.fProtectedContext) {
239     SkASSERT(!backendContext.fOwnsInstanceAndDevice);
240     SkASSERT(fMemoryAllocator);
241     SkASSERT(fMemoryAllocatorCacheImage);
242 
243     this->initCapsAndCompiler(fVkCaps);
244 
245     VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
246     VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
247 
248     fResourceProvider.init();
249 
250     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
251     if (fMainCmdPool) {
252         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
253         SkASSERT(this->currentCommandBuffer());
254         this->currentCommandBuffer()->begin(this);
255     }
256 }
257 
destroyResources()258 void GrVkGpu::destroyResources() {
259     if (fMainCmdPool) {
260         fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
261         fMainCmdPool->close();
262     }
263 
264     // wait for all commands to finish
265     this->finishOutstandingGpuWork();
266 
267     if (fMainCmdPool) {
268         fMainCmdPool->unref();
269         fMainCmdPool = nullptr;
270     }
271 
272     for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
273         fSemaphoresToWaitOn[i]->unref();
274     }
275     fSemaphoresToWaitOn.reset();
276 
277     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
278         fSemaphoresToSignal[i]->unref();
279     }
280     fSemaphoresToSignal.reset();
281 
282     fStagingBufferManager.reset();
283 
284     fMSAALoadManager.destroyResources(this);
285 
286     // must call this just before we destroy the command pool and VkDevice
287     fResourceProvider.destroyResources();
288 }
289 
~GrVkGpu()290 GrVkGpu::~GrVkGpu() {
291     if (!fDisconnected) {
292         this->destroyResources();
293     }
294     // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
295     // clients can continue to delete backend textures even after a context has been abandoned.
296     fMemoryAllocator.reset();
297     fMemoryAllocatorCacheImage.reset();
298 }
299 
300 
disconnect(DisconnectType type)301 void GrVkGpu::disconnect(DisconnectType type) {
302     INHERITED::disconnect(type);
303     if (!fDisconnected) {
304         this->destroyResources();
305 
306         fSemaphoresToWaitOn.reset();
307         fSemaphoresToSignal.reset();
308         fMainCmdBuffer = nullptr;
309         fDisconnected = true;
310     }
311 }
312 
pipelineBuilder()313 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
314     return fResourceProvider.pipelineStateCache();
315 }
316 
refPipelineBuilder()317 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
318     return fResourceProvider.refPipelineStateCache();
319 }
320 
321 ///////////////////////////////////////////////////////////////////////////////
322 
onGetOpsRenderPass( GrRenderTarget* rt, bool useMSAASurface, GrAttachment* stencil, GrSurfaceOrigin origin, const SkIRect& bounds, const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo, const SkTArray<GrSurfaceProxy*, true>& sampledProxies, GrXferBarrierFlags renderPassXferBarriers)323 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
324         GrRenderTarget* rt,
325         bool useMSAASurface,
326         GrAttachment* stencil,
327         GrSurfaceOrigin origin,
328         const SkIRect& bounds,
329         const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
330         const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
331         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
332         GrXferBarrierFlags renderPassXferBarriers) {
333     if (!fCachedOpsRenderPass) {
334         fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
335     }
336 
337     // For the given render target and requested render pass features we need to find a compatible
338     // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
339     // is compatible, but that is part of the framebuffer that we get here.
340     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
341 
342     SkASSERT(!useMSAASurface ||
343              rt->numSamples() > 1 ||
344              (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
345               vkRT->resolveAttachment() &&
346               vkRT->resolveAttachment()->supportsInputAttachmentUsage()));
347 
348     // Covert the GrXferBarrierFlags into render pass self dependency flags
349     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
350     if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
351         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
352     }
353     if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
354         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
355     }
356 
357     // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
358     // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
359     // case we also need to update the color load/store ops since we don't want to ever load or
360     // store the msaa color attachment, but may need to for the resolve attachment.
361     GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
362     bool withResolve = false;
363     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
364     GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
365     if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
366         withResolve = true;
367         localColorInfo.fStoreOp = GrStoreOp::kDiscard;
368         if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
369             loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
370             localColorInfo.fLoadOp = GrLoadOp::kDiscard;
371         } else {
372             resolveInfo.fLoadOp = GrLoadOp::kDiscard;
373         }
374     }
375 
376     // Get the framebuffer to use for the render pass
377    sk_sp<GrVkFramebuffer> framebuffer;
378     if (vkRT->wrapsSecondaryCommandBuffer()) {
379         framebuffer = vkRT->externalFramebuffer();
380     } else {
381         auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
382                                        loadFromResolve);
383         framebuffer = sk_ref_sp(fb);
384     }
385     if (!framebuffer) {
386         return nullptr;
387     }
388 
389     if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
390                                    stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
391                                    sampledProxies)) {
392         return nullptr;
393     }
394     return fCachedOpsRenderPass.get();
395 }
396 
submitCommandBuffer(SyncQueue sync)397 bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
398     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
399     if (!this->currentCommandBuffer()) {
400         return false;
401     }
402     SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
403 
404     if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
405         !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
406         // We may have added finished procs during the flush call. Since there is no actual work
407         // we are not submitting the command buffer and may never come back around to submit it.
408         // Thus we call all current finished procs manually, since the work has technically
409         // finished.
410         this->currentCommandBuffer()->callFinishedProcs();
411         SkASSERT(fDrawables.empty());
412         fResourceProvider.checkCommandBuffers();
413         return true;
414     }
415 
416     fMainCmdBuffer->end(this);
417     SkASSERT(fMainCmdPool);
418     fMainCmdPool->close();
419     bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
420                                                    fSemaphoresToWaitOn);
421 
422     if (didSubmit && sync == kForce_SyncQueue) {
423         fMainCmdBuffer->forceSync(this);
424     }
425 
426     // We must delete any drawables that had to wait until submit to destroy.
427     fDrawables.reset();
428 
429     // If we didn't submit the command buffer then we did not wait on any semaphores. We will
430     // continue to hold onto these semaphores and wait on them during the next command buffer
431     // submission.
432     if (didSubmit) {
433         for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
434             fSemaphoresToWaitOn[i]->unref();
435         }
436         fSemaphoresToWaitOn.reset();
437     }
438 
439     // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
440     // not try to recover the work that wasn't submitted and instead just drop it all. The client
441     // will be notified that the semaphores were not submit so that they will not try to wait on
442     // them.
443     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
444         fSemaphoresToSignal[i]->unref();
445     }
446     fSemaphoresToSignal.reset();
447 
448     // Release old command pool and create a new one
449     fMainCmdPool->unref();
450     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
451     if (fMainCmdPool) {
452         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
453         SkASSERT(fMainCmdBuffer);
454         fMainCmdBuffer->begin(this);
455     } else {
456         fMainCmdBuffer = nullptr;
457     }
458     // We must wait to call checkCommandBuffers until after we get a new command buffer. The
459     // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
460     // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
461     // one that was just submitted.
462     fResourceProvider.checkCommandBuffers();
463     return didSubmit;
464 }
465 
466 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern accessPattern, const void* data)467 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
468                                            GrAccessPattern accessPattern, const void* data) {
469 #ifdef SK_DEBUG
470     switch (type) {
471         case GrGpuBufferType::kVertex:
472         case GrGpuBufferType::kIndex:
473         case GrGpuBufferType::kDrawIndirect:
474             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
475                      accessPattern == kStatic_GrAccessPattern);
476             break;
477         case GrGpuBufferType::kXferCpuToGpu:
478             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
479             break;
480         case GrGpuBufferType::kXferGpuToCpu:
481             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
482                      accessPattern == kStream_GrAccessPattern);
483             break;
484         case GrGpuBufferType::kUniform:
485             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
486             break;
487     }
488 #endif
489     sk_sp<GrGpuBuffer> buff = GrVkBuffer::Make(this, size, type, accessPattern);
490 
491     if (data && buff) {
492         buff->updateData(data, size);
493     }
494     return buff;
495 }
496 
onWritePixels(GrSurface* surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling)497 bool GrVkGpu::onWritePixels(GrSurface* surface,
498                             SkIRect rect,
499                             GrColorType surfaceColorType,
500                             GrColorType srcColorType,
501                             const GrMipLevel texels[],
502                             int mipLevelCount,
503                             bool prepForTexSampling) {
504     GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
505     if (!texture) {
506         return false;
507     }
508     GrVkImage* texImage = texture->textureImage();
509 
510     // Make sure we have at least the base level
511     if (!mipLevelCount || !texels[0].fPixels) {
512         return false;
513     }
514 
515     SkASSERT(!GrVkFormatIsCompressed(texImage->imageFormat()));
516     bool success = false;
517     bool linearTiling = texImage->isLinearTiled();
518     if (linearTiling) {
519         if (mipLevelCount > 1) {
520             SkDebugf("Can't upload mipmap data to linear tiled texture");
521             return false;
522         }
523         if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) {
524             // Need to change the layout to general in order to perform a host write
525             texImage->setImageLayout(this,
526                                      VK_IMAGE_LAYOUT_GENERAL,
527                                      VK_ACCESS_HOST_WRITE_BIT,
528                                      VK_PIPELINE_STAGE_HOST_BIT,
529                                      false);
530             if (!this->submitCommandBuffer(kForce_SyncQueue)) {
531                 return false;
532             }
533         }
534         success = this->uploadTexDataLinear(texImage,
535                                             rect,
536                                             srcColorType,
537                                             texels[0].fPixels,
538                                             texels[0].fRowBytes);
539     } else {
540         SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
541         success = this->uploadTexDataOptimal(texImage,
542                                              rect,
543                                              srcColorType,
544                                              texels,
545                                              mipLevelCount);
546         if (1 == mipLevelCount) {
547             texture->markMipmapsDirty();
548         }
549     }
550 
551     if (prepForTexSampling) {
552         texImage->setImageLayout(this,
553                                       VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
554                                       VK_ACCESS_SHADER_READ_BIT,
555                                       VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
556                                       false);
557     }
558 
559     return success;
560 }
561 
onTransferPixelsTo(GrTexture* texture, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp<GrGpuBuffer> transferBuffer, size_t bufferOffset, size_t rowBytes)562 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
563                                  SkIRect rect,
564                                  GrColorType surfaceColorType,
565                                  GrColorType bufferColorType,
566                                  sk_sp<GrGpuBuffer> transferBuffer,
567                                  size_t bufferOffset,
568                                  size_t rowBytes) {
569     if (!this->currentCommandBuffer()) {
570         return false;
571     }
572 
573     size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
574     if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
575         return false;
576     }
577 
578     // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
579     if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
580         return false;
581     }
582     GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
583     if (!tex) {
584         return false;
585     }
586     GrVkImage* vkImage = tex->textureImage();
587     VkFormat format = vkImage->imageFormat();
588 
589     // Can't transfer compressed data
590     SkASSERT(!GrVkFormatIsCompressed(format));
591 
592     if (!transferBuffer) {
593         return false;
594     }
595 
596     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
597         return false;
598     }
599     SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
600 
601     SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
602 
603     // Set up copy region
604     VkBufferImageCopy region;
605     memset(&region, 0, sizeof(VkBufferImageCopy));
606     region.bufferOffset = bufferOffset;
607     region.bufferRowLength = (uint32_t)(rowBytes/bpp);
608     region.bufferImageHeight = 0;
609     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
610     region.imageOffset = { rect.left(), rect.top(), 0 };
611     region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
612 
613     // Change layout of our target so it can be copied to
614     vkImage->setImageLayout(this,
615                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
616                             VK_ACCESS_TRANSFER_WRITE_BIT,
617                             VK_PIPELINE_STAGE_TRANSFER_BIT,
618                             false);
619 
620     const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
621 
622     // Copy the buffer to the image.
623     this->currentCommandBuffer()->copyBufferToImage(this,
624                                                     vkBuffer->vkBuffer(),
625                                                     vkImage,
626                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
627                                                     1,
628                                                     &region);
629     this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
630 
631     tex->markMipmapsDirty();
632     return true;
633 }
634 
onTransferPixelsFrom(GrSurface* surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp<GrGpuBuffer> transferBuffer, size_t offset)635 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
636                                    SkIRect rect,
637                                    GrColorType surfaceColorType,
638                                    GrColorType bufferColorType,
639                                    sk_sp<GrGpuBuffer> transferBuffer,
640                                    size_t offset) {
641     if (!this->currentCommandBuffer()) {
642         return false;
643     }
644     SkASSERT(surface);
645     SkASSERT(transferBuffer);
646     if (fProtectedContext == GrProtected::kYes) {
647         return false;
648     }
649 
650     GrVkImage* srcImage;
651     if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
652         // Reading from render targets that wrap a secondary command buffer is not allowed since
653         // it would require us to know the VkImage, which we don't have, as well as need us to
654         // stop and start the VkRenderPass which we don't have access to.
655         if (rt->wrapsSecondaryCommandBuffer()) {
656             return false;
657         }
658         if (!rt->nonMSAAAttachment()) {
659             return false;
660         }
661         srcImage = rt->nonMSAAAttachment();
662     } else {
663         SkASSERT(surface->asTexture());
664         srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
665     }
666 
667     VkFormat format = srcImage->imageFormat();
668     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
669         return false;
670     }
671     SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
672 
673     // Set up copy region
674     VkBufferImageCopy region;
675     memset(&region, 0, sizeof(VkBufferImageCopy));
676     region.bufferOffset = offset;
677     region.bufferRowLength = rect.width();
678     region.bufferImageHeight = 0;
679     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
680     region.imageOffset = {rect.left(), rect.top(), 0};
681     region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
682 
683     srcImage->setImageLayout(this,
684                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
685                              VK_ACCESS_TRANSFER_READ_BIT,
686                              VK_PIPELINE_STAGE_TRANSFER_BIT,
687                              false);
688 
689     this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
690                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
691                                                     transferBuffer, 1, &region);
692 
693     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
694     // Make sure the copy to buffer has finished.
695     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
696                                VK_ACCESS_HOST_READ_BIT,
697                                VK_PIPELINE_STAGE_TRANSFER_BIT,
698                                VK_PIPELINE_STAGE_HOST_BIT,
699                                false);
700     return true;
701 }
702 
resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, const SkIPoint& dstPoint)703 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
704                            const SkIPoint& dstPoint) {
705     if (!this->currentCommandBuffer()) {
706         return;
707     }
708 
709     SkASSERT(dst);
710     SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
711 
712     VkImageResolve resolveInfo;
713     resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
714     resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
715     resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
716     resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
717     resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
718 
719     GrVkImage* dstImage;
720     GrRenderTarget* dstRT = dst->asRenderTarget();
721     GrTexture* dstTex = dst->asTexture();
722     if (dstTex) {
723         dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
724     } else {
725         SkASSERT(dst->asRenderTarget());
726         dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
727     }
728     SkASSERT(dstImage);
729 
730     dstImage->setImageLayout(this,
731                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
732                              VK_ACCESS_TRANSFER_WRITE_BIT,
733                              VK_PIPELINE_STAGE_TRANSFER_BIT,
734                              false);
735 
736     src->colorAttachment()->setImageLayout(this,
737                                            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
738                                            VK_ACCESS_TRANSFER_READ_BIT,
739                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
740                                            false);
741     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
742     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
743     this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
744                                                &resolveInfo);
745 }
746 
onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect)747 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
748     SkASSERT(target->numSamples() > 1);
749     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
750     SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
751 
752     if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
753         // We would have resolved the RT during the render pass;
754         return;
755     }
756 
757     this->resolveImage(target, rt, resolveRect,
758                        SkIPoint::Make(resolveRect.x(), resolveRect.y()));
759 }
760 
uploadTexDataLinear(GrVkImage* texImage, SkIRect rect, GrColorType dataColorType, const void* data, size_t rowBytes)761 bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
762                                   SkIRect rect,
763                                   GrColorType dataColorType,
764                                   const void* data,
765                                   size_t rowBytes) {
766     SkASSERT(data);
767     SkASSERT(texImage->isLinearTiled());
768 
769     SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
770 
771     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
772     size_t trimRowBytes = rect.width() * bpp;
773 
774     SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() ||
775              VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout());
776     const VkImageSubresource subres = {
777         VK_IMAGE_ASPECT_COLOR_BIT,
778         0,  // mipLevel
779         0,  // arraySlice
780     };
781     VkSubresourceLayout layout;
782 
783     const GrVkInterface* interface = this->vkInterface();
784 
785     GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
786                                                     texImage->image(),
787                                                     &subres,
788                                                     &layout));
789 
790     const GrVkAlloc& alloc = texImage->alloc();
791     if (VK_NULL_HANDLE == alloc.fMemory) {
792         return false;
793     }
794     VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
795     VkDeviceSize size = rect.height()*layout.rowPitch;
796     SkASSERT(size + offset <= alloc.fSize);
797     void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
798     if (!mapPtr) {
799         return false;
800     }
801     mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
802 
803     SkRectMemcpy(mapPtr,
804                  static_cast<size_t>(layout.rowPitch),
805                  data,
806                  rowBytes,
807                  trimRowBytes,
808                  rect.height());
809 
810     GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
811     GrVkMemory::UnmapAlloc(this, alloc);
812 
813     return true;
814 }
815 
816 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
817 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager, SkTArray<VkBufferImageCopy>* regions, SkTArray<size_t>* individualMipOffsets, GrStagingBufferManager::Slice* slice, SkImage::CompressionType compression, VkFormat vkFormat, SkISize dimensions, GrMipmapped mipmapped)818 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
819                                          SkTArray<VkBufferImageCopy>* regions,
820                                          SkTArray<size_t>* individualMipOffsets,
821                                          GrStagingBufferManager::Slice* slice,
822                                          SkImage::CompressionType compression,
823                                          VkFormat vkFormat,
824                                          SkISize dimensions,
825                                          GrMipmapped mipmapped) {
826     SkASSERT(compression != SkImage::CompressionType::kNone);
827     int numMipLevels = 1;
828     if (mipmapped == GrMipmapped::kYes) {
829         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
830     }
831 
832     regions->reserve_back(numMipLevels);
833     individualMipOffsets->reserve_back(numMipLevels);
834 
835     size_t bytesPerBlock = GrVkFormatBytesPerBlock(vkFormat);
836 
837     size_t bufferSize = SkCompressedDataSize(compression,
838                                              dimensions,
839                                              individualMipOffsets,
840                                              mipmapped == GrMipmapped::kYes);
841     SkASSERT(individualMipOffsets->count() == numMipLevels);
842 
843     // Get a staging buffer slice to hold our mip data.
844     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
845     size_t alignment = bytesPerBlock;
846     switch (alignment & 0b11) {
847         case 0:                     break;   // alignment is already a multiple of 4.
848         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
849         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
850     }
851     *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
852     if (!slice->fBuffer) {
853         return 0;
854     }
855 
856     for (int i = 0; i < numMipLevels; ++i) {
857         VkBufferImageCopy& region = regions->push_back();
858         memset(&region, 0, sizeof(VkBufferImageCopy));
859         region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
860         SkISize revisedDimensions = GrCompressedDimensions(compression, dimensions);
861         region.bufferRowLength = revisedDimensions.width();
862         region.bufferImageHeight = revisedDimensions.height();
863         region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
864         region.imageOffset = {0, 0, 0};
865         region.imageExtent = {SkToU32(dimensions.width()),
866                               SkToU32(dimensions.height()), 1};
867 
868         dimensions = {std::max(1, dimensions.width() /2),
869                       std::max(1, dimensions.height()/2)};
870     }
871 
872     return bufferSize;
873 }
874 
fill_in_compressed_regions(SkTArray<VkBufferImageCopy>* regions, SkTArray<size_t>* individualMipOffsets, SkImage::CompressionType compression, SkISize dimensions, GrMipmapped mipmapped)875 static size_t fill_in_compressed_regions(SkTArray<VkBufferImageCopy>* regions,
876                                          SkTArray<size_t>* individualMipOffsets,
877                                          SkImage::CompressionType compression,
878                                          SkISize dimensions,
879                                          GrMipmapped mipmapped) {
880     SkASSERT(regions);
881     SkASSERT(individualMipOffsets);
882     SkASSERT(compression != SkImage::CompressionType::kNone);
883 
884     int mipmapLevelCount = 1;
885     if (mipmapped == GrMipmapped::kYes) {
886         mipmapLevelCount = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
887     }
888     regions->reserve_back(mipmapLevelCount);
889     individualMipOffsets->reserve_back(mipmapLevelCount);
890 
891     size_t bufferSize = SkCompressedDataSize(compression,
892                                              dimensions,
893                                              individualMipOffsets,
894                                              mipmapped == GrMipmapped::kYes);
895     SkASSERT(individualMipOffsets->count() == mipmapLevelCount);
896 
897     for (int i = 0; i < mipmapLevelCount; ++i) {
898         VkBufferImageCopy &region = regions->push_back();
899         region.bufferOffset = (*individualMipOffsets)[i];
900         if (compression == SkImage::CompressionType::kASTC_RGBA8_4x4 ||
901             compression == SkImage::CompressionType::kASTC_RGBA8_6x6 ||
902             compression == SkImage::CompressionType::kASTC_RGBA8_8x8) {
903             region.bufferOffset += ASTC_HEADER_SIZE;
904         }
905         SkISize compressedDimensions = GrCompressedDimensions(compression, dimensions);
906         region.bufferRowLength = compressedDimensions.width();
907         region.bufferImageHeight = compressedDimensions.height();
908         region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
909         region.imageOffset = {0, 0, 0};
910         region.imageExtent.width = SkToU32(dimensions.width());
911         region.imageExtent.height = SkToU32(dimensions.height());
912         region.imageExtent.depth = 1;
913 
914         dimensions = {std::max(1, dimensions.width() / 2),
915                       std::max(1, dimensions.height() / 2)};
916     }
917 
918     return bufferSize;
919 }
920 
uploadTexDataOptimal(GrVkImage* texImage, SkIRect rect, GrColorType dataColorType, const GrMipLevel texels[], int mipLevelCount)921 bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
922                                    SkIRect rect,
923                                    GrColorType dataColorType,
924                                    const GrMipLevel texels[],
925                                    int mipLevelCount) {
926     if (!this->currentCommandBuffer()) {
927         return false;
928     }
929 
930     SkASSERT(!texImage->isLinearTiled());
931     // The assumption is either that we have no mipmaps, or that our rect is the entire texture
932     SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
933 
934     // We assume that if the texture has mip levels, we either upload to all the levels or just the
935     // first.
936     SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
937 
938     SkASSERT(!rect.isEmpty());
939 
940     SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
941 
942     SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
943     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
944 
945     // texels is const.
946     // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
947     // Because of this we need to make a non-const shallow copy of texels.
948     SkAutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
949     std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
950 
951     SkTArray<size_t> individualMipOffsets;
952     size_t combinedBufferSize;
953     if (mipLevelCount > 1) {
954         combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
955                                                               rect.size(),
956                                                               &individualMipOffsets,
957                                                               mipLevelCount);
958     } else {
959         SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
960         combinedBufferSize = rect.width()*rect.height()*bpp;
961         individualMipOffsets.push_back(0);
962     }
963     SkASSERT(combinedBufferSize);
964 
965     // Get a staging buffer slice to hold our mip data.
966     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
967     size_t alignment = bpp;
968     switch (alignment & 0b11) {
969         case 0:                     break;   // alignment is already a multiple of 4.
970         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
971         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
972     }
973     GrStagingBufferManager::Slice slice =
974             fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
975     if (!slice.fBuffer) {
976         return false;
977     }
978 
979     int uploadLeft = rect.left();
980     int uploadTop = rect.top();
981 
982     char* buffer = (char*) slice.fOffsetMapPtr;
983     SkTArray<VkBufferImageCopy> regions(mipLevelCount);
984 
985     int currentWidth = rect.width();
986     int currentHeight = rect.height();
987     for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
988         if (texelsShallowCopy[currentMipLevel].fPixels) {
989             const size_t trimRowBytes = currentWidth * bpp;
990             const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
991 
992             // copy data into the buffer, skipping the trailing bytes
993             char* dst = buffer + individualMipOffsets[currentMipLevel];
994             const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
995             SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
996 
997             VkBufferImageCopy& region = regions.push_back();
998             memset(&region, 0, sizeof(VkBufferImageCopy));
999             region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
1000             region.bufferRowLength = currentWidth;
1001             region.bufferImageHeight = currentHeight;
1002             region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
1003             region.imageOffset = {uploadLeft, uploadTop, 0};
1004             region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1005         }
1006 
1007         currentWidth  = std::max(1,  currentWidth/2);
1008         currentHeight = std::max(1, currentHeight/2);
1009     }
1010 
1011     // Change layout of our target so it can be copied to
1012     texImage->setImageLayout(this,
1013                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1014                              VK_ACCESS_TRANSFER_WRITE_BIT,
1015                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1016                              false);
1017 
1018     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1019     // because we don't need the command buffer to ref the buffer here. The reason being is that
1020     // the buffer is coming from the staging manager and the staging manager will make sure the
1021     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1022     // upload in the frame.
1023     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1024     this->currentCommandBuffer()->copyBufferToImage(this,
1025                                                     vkBuffer->vkBuffer(),
1026                                                     texImage,
1027                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1028                                                     regions.count(),
1029                                                     regions.begin());
1030     return true;
1031 }
1032 
1033 // It's probably possible to roll this into uploadTexDataOptimal,
1034 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkImage* uploadTexture, SkImage::CompressionType compression, VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, const void* data, size_t dataSize)1035 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1036                                       SkImage::CompressionType compression, VkFormat vkFormat,
1037                                       SkISize dimensions, GrMipmapped mipMapped,
1038                                       const void* data, size_t dataSize) {
1039     if (!this->currentCommandBuffer()) {
1040         return false;
1041     }
1042     SkASSERT(data);
1043     SkASSERT(!uploadTexture->isLinearTiled());
1044     // For now the assumption is that our rect is the entire texture.
1045     // Compressed textures are read-only so this should be a reasonable assumption.
1046     SkASSERT(dimensions.fWidth == uploadTexture->width() &&
1047              dimensions.fHeight == uploadTexture->height());
1048 
1049     if (dimensions.fWidth == 0 || dimensions.fHeight  == 0) {
1050         return false;
1051     }
1052 
1053     SkASSERT(uploadTexture->imageFormat() == vkFormat);
1054     SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1055 
1056 
1057     GrStagingBufferManager::Slice slice;
1058     SkTArray<VkBufferImageCopy> regions;
1059     SkTArray<size_t> individualMipOffsets;
1060     SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1061                                                                         &regions,
1062                                                                         &individualMipOffsets,
1063                                                                         &slice,
1064                                                                         compression,
1065                                                                         vkFormat,
1066                                                                         dimensions,
1067                                                                         mipMapped);
1068     if (!slice.fBuffer) {
1069         return false;
1070     }
1071     SkASSERT(dataSize == combinedBufferSize);
1072 
1073     {
1074         char* buffer = (char*)slice.fOffsetMapPtr;
1075         memcpy(buffer, data, dataSize);
1076     }
1077 
1078     // Change layout of our target so it can be copied to
1079     uploadTexture->setImageLayout(this,
1080                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1081                                   VK_ACCESS_TRANSFER_WRITE_BIT,
1082                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
1083                                   false);
1084 
1085     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1086     // because we don't need the command buffer to ref the buffer here. The reason being is that
1087     // the buffer is coming from the staging manager and the staging manager will make sure the
1088     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1089     // upload in the frame.
1090     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1091     this->currentCommandBuffer()->copyBufferToImage(this,
1092                                                     vkBuffer->vkBuffer(),
1093                                                     uploadTexture,
1094                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1095                                                     regions.count(),
1096                                                     regions.begin());
1097 
1098     return true;
1099 }
1100 
uploadTexDataCompressed(GrVkImage* uploadTexture, SkImage::CompressionType compression, VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, OH_NativeBuffer* nativeBuffer, size_t bufferSize)1101 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1102                                       SkImage::CompressionType compression, VkFormat vkFormat,
1103                                       SkISize dimensions, GrMipmapped mipMapped,
1104                                       OH_NativeBuffer* nativeBuffer, size_t bufferSize) {
1105     if (!this->currentCommandBuffer()) {
1106         return false;
1107     }
1108     SkASSERT(uploadTexture);
1109     SkASSERT(nativeBuffer);
1110     SkASSERT(!uploadTexture->isLinearTiled());
1111 
1112     if (dimensions.width() == 0 || dimensions.height()  == 0) {
1113         return false;
1114     }
1115     SkASSERT(dimensions.width() == uploadTexture->width() && dimensions.height() == uploadTexture->height());
1116 
1117     SkASSERT(uploadTexture->imageFormat() == vkFormat);
1118     SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1119 
1120     SkTArray<VkBufferImageCopy> regions;
1121     SkTArray<size_t> individualMipOffsets;
1122     SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&regions, &individualMipOffsets,
1123                                                                         compression, dimensions, mipMapped);
1124     SkASSERT(bufferSize == combinedBufferSize);
1125 
1126     // Import external memory.
1127     sk_sp<GrVkBuffer> vkBuffer = GrVkBuffer::MakeFromOHNativeBuffer(this, nativeBuffer, bufferSize,
1128                                                                     GrGpuBufferType::kXferCpuToGpu,
1129                                                                     kDynamic_GrAccessPattern);
1130 
1131     // Change image layout so it can be copied to.
1132     uploadTexture->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1133                                   VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1134 
1135     // Copy the buffer to the image.
1136     this->currentCommandBuffer()->copyBufferToImage(this, vkBuffer->vkBuffer(), uploadTexture,
1137                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1138                                                     regions.count(), regions.begin());
1139     this->takeOwnershipOfBuffer(std::move(vkBuffer));
1140 
1141     return true;
1142 }
1143 
1144 ////////////////////////////////////////////////////////////////////////////////
1145 // TODO: make this take a GrMipmapped
onCreateTexture(SkISize dimensions, const GrBackendFormat& format, GrRenderable renderable, int renderTargetSampleCnt, SkBudgeted budgeted, GrProtected isProtected, int mipLevelCount, uint32_t levelClearMask)1146 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1147                                           const GrBackendFormat& format,
1148                                           GrRenderable renderable,
1149                                           int renderTargetSampleCnt,
1150                                           SkBudgeted budgeted,
1151                                           GrProtected isProtected,
1152                                           int mipLevelCount,
1153                                           uint32_t levelClearMask) {
1154     VkFormat pixelFormat;
1155     SkAssertResult(format.asVkFormat(&pixelFormat));
1156     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1157     SkASSERT(mipLevelCount > 0);
1158 
1159     HITRACE_OHOS_NAME_FMT_ALWAYS("onCreateTexture width = %d, height = %d",
1160         dimensions.width(), dimensions.height());
1161     GrMipmapStatus mipmapStatus =
1162             mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1163 
1164     sk_sp<GrVkTexture> tex;
1165     if (renderable == GrRenderable::kYes) {
1166         tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1167                 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1168                 mipmapStatus, isProtected);
1169     } else {
1170         tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1171                                           mipLevelCount, isProtected, mipmapStatus);
1172     }
1173 
1174     if (!tex) {
1175         return nullptr;
1176     }
1177 
1178     if (levelClearMask) {
1179         if (!this->currentCommandBuffer()) {
1180             return nullptr;
1181         }
1182         SkSTArray<1, VkImageSubresourceRange> ranges;
1183         bool inRange = false;
1184         GrVkImage* texImage = tex->textureImage();
1185         for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1186             if (levelClearMask & (1U << i)) {
1187                 if (inRange) {
1188                     ranges.back().levelCount++;
1189                 } else {
1190                     auto& range = ranges.push_back();
1191                     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1192                     range.baseArrayLayer = 0;
1193                     range.baseMipLevel = i;
1194                     range.layerCount = 1;
1195                     range.levelCount = 1;
1196                     inRange = true;
1197                 }
1198             } else if (inRange) {
1199                 inRange = false;
1200             }
1201         }
1202         SkASSERT(!ranges.empty());
1203         static constexpr VkClearColorValue kZeroClearColor = {};
1204         texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1205                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1206         this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1207                                                       ranges.count(), ranges.begin());
1208     }
1209     return std::move(tex);
1210 }
1211 
onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat& format, SkBudgeted budgeted, GrMipmapped mipMapped, GrProtected isProtected, const void* data, size_t dataSize)1212 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1213                                                     const GrBackendFormat& format,
1214                                                     SkBudgeted budgeted,
1215                                                     GrMipmapped mipMapped,
1216                                                     GrProtected isProtected,
1217                                                     const void* data, size_t dataSize) {
1218     VkFormat pixelFormat;
1219     SkAssertResult(format.asVkFormat(&pixelFormat));
1220     SkASSERT(GrVkFormatIsCompressed(pixelFormat));
1221 
1222     int numMipLevels = 1;
1223     if (mipMapped == GrMipmapped::kYes) {
1224         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1225     }
1226 
1227     GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid
1228                                                                    : GrMipmapStatus::kNotAllocated;
1229 
1230     auto tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1231                                            numMipLevels, isProtected, mipmapStatus);
1232     if (!tex) {
1233         return nullptr;
1234     }
1235 
1236     SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1237     if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1238                                        dimensions, mipMapped, data, dataSize)) {
1239         return nullptr;
1240     }
1241 
1242     return std::move(tex);
1243 }
1244 
onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat& format, SkBudgeted budgeted, GrMipmapped mipMapped, GrProtected isProtected, OH_NativeBuffer* nativeBuffer, size_t bufferSize)1245 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1246                                                     const GrBackendFormat& format,
1247                                                     SkBudgeted budgeted,
1248                                                     GrMipmapped mipMapped,
1249                                                     GrProtected isProtected,
1250                                                     OH_NativeBuffer* nativeBuffer,
1251                                                     size_t bufferSize) {
1252     VkFormat pixelFormat;
1253     SkAssertResult(format.asVkFormat(&pixelFormat));
1254     SkASSERT(GrVkFormatIsCompressed(pixelFormat));
1255 
1256     int mipmapLevelCount = 1;
1257     GrMipmapStatus mipmapStatus = GrMipmapStatus::kNotAllocated;
1258     if (mipMapped == GrMipmapped::kYes) {
1259         mipmapLevelCount = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1260         mipmapStatus = GrMipmapStatus::kValid;
1261     }
1262 
1263     sk_sp<GrVkTexture> texture = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1264                                                              mipmapLevelCount, isProtected, mipmapStatus);
1265     if (!texture) {
1266         return nullptr;
1267     }
1268 
1269     SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1270     if (!this->uploadTexDataCompressed(texture->textureImage(), compression, pixelFormat,
1271                                        dimensions, mipMapped, nativeBuffer, bufferSize)) {
1272         return nullptr;
1273     }
1274 
1275     return std::move(texture);
1276 }
1277 
1278 ////////////////////////////////////////////////////////////////////////////////
1279 
copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, sk_sp<GrGpuBuffer> dstBuffer, VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size)1280 void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
1281                          sk_sp<GrGpuBuffer> dstBuffer,
1282                          VkDeviceSize srcOffset,
1283                          VkDeviceSize dstOffset,
1284                          VkDeviceSize size) {
1285     if (!this->currentCommandBuffer()) {
1286         return;
1287     }
1288     VkBufferCopy copyRegion;
1289     copyRegion.srcOffset = srcOffset;
1290     copyRegion.dstOffset = dstOffset;
1291     copyRegion.size = size;
1292     this->currentCommandBuffer()->copyBuffer(this, std::move(srcBuffer), std::move(dstBuffer), 1,
1293                                              &copyRegion);
1294 }
1295 
updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset, VkDeviceSize size)1296 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1297                            VkDeviceSize offset, VkDeviceSize size) {
1298     if (!this->currentCommandBuffer()) {
1299         return false;
1300     }
1301     // Update the buffer
1302     this->currentCommandBuffer()->updateBuffer(this, std::move(buffer), offset, size, src);
1303 
1304     return true;
1305 }
1306 
1307 ////////////////////////////////////////////////////////////////////////////////
1308 
check_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool needsAllocation, uint32_t graphicsQueueIndex)1309 static bool check_image_info(const GrVkCaps& caps,
1310                              const GrVkImageInfo& info,
1311                              bool needsAllocation,
1312                              uint32_t graphicsQueueIndex) {
1313     if (VK_NULL_HANDLE == info.fImage) {
1314         return false;
1315     }
1316 
1317     if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1318         return false;
1319     }
1320 
1321     if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1322         return false;
1323     }
1324 
1325     if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1326         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1327         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1328         if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1329             if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1330                 return false;
1331             }
1332         } else {
1333             return false;
1334         }
1335     }
1336 
1337     if (info.fYcbcrConversionInfo.isValid()) {
1338         if (!caps.supportsYcbcrConversion()) {
1339             return false;
1340         }
1341         if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1342             return true;
1343         }
1344     }
1345 
1346     // We currently require everything to be made with transfer bits set
1347     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1348         !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1349         return false;
1350     }
1351 
1352     return true;
1353 }
1354 
check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info)1355 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1356     // We don't support directly importing multisampled textures for sampling from shaders.
1357     if (info.fSampleCount != 1) {
1358         return false;
1359     }
1360 
1361     if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1362         return true;
1363     }
1364     if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1365         if (!caps.isVkFormatTexturable(info.fFormat)) {
1366             return false;
1367         }
1368     } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1369         if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1370             return false;
1371         }
1372     } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1373         if (!caps.supportsDRMFormatModifiers()) {
1374             return false;
1375         }
1376         // To be technically correct we should query the vulkan support for VkFormat and
1377         // drmFormatModifier pairs to confirm the required feature support is there. However, we
1378         // currently don't have our caps and format tables set up to do this effeciently. So
1379         // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1380         // up using valid features (checked below). In practice this should all be safe because
1381         // currently we are setting all drm format modifier textures to have a
1382         // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1383         // a shader. The video decoder isn't going to give us VkImages that don't support being
1384         // sampled.
1385     } else {
1386         SkUNREACHABLE;
1387     }
1388 
1389     // We currently require all textures to be made with sample support
1390     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1391         return false;
1392     }
1393 
1394     return true;
1395 }
1396 
check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly)1397 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1398     if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1399         return false;
1400     }
1401     if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1402         return false;
1403     }
1404     return true;
1405 }
1406 
onWrapBackendTexture(const GrBackendTexture& backendTex, GrWrapOwnership ownership, GrWrapCacheable cacheable, GrIOType ioType)1407 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1408                                                GrWrapOwnership ownership,
1409                                                GrWrapCacheable cacheable,
1410                                                GrIOType ioType) {
1411     GrVkImageInfo imageInfo;
1412     if (!backendTex.getVkImageInfo(&imageInfo)) {
1413         return nullptr;
1414     }
1415 
1416     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1417                           this->queueIndex())) {
1418         return nullptr;
1419     }
1420 
1421     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1422         return nullptr;
1423     }
1424 
1425     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1426         return nullptr;
1427     }
1428 
1429     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1430     SkASSERT(mutableState);
1431     return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1432                                            ioType, imageInfo, std::move(mutableState));
1433 }
1434 
onWrapCompressedBackendTexture(const GrBackendTexture& beTex, GrWrapOwnership ownership, GrWrapCacheable cacheable)1435 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1436                                                          GrWrapOwnership ownership,
1437                                                          GrWrapCacheable cacheable) {
1438     return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1439 }
1440 
onWrapRenderableBackendTexture(const GrBackendTexture& backendTex, int sampleCnt, GrWrapOwnership ownership, GrWrapCacheable cacheable)1441 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1442                                                          int sampleCnt,
1443                                                          GrWrapOwnership ownership,
1444                                                          GrWrapCacheable cacheable) {
1445     GrVkImageInfo imageInfo;
1446     if (!backendTex.getVkImageInfo(&imageInfo)) {
1447         return nullptr;
1448     }
1449 
1450     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1451                           this->queueIndex())) {
1452         return nullptr;
1453     }
1454 
1455     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1456         return nullptr;
1457     }
1458     // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1459     // the wrapped VkImage.
1460     bool resolveOnly = sampleCnt > 1;
1461     if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1462         return nullptr;
1463     }
1464 
1465     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1466         return nullptr;
1467     }
1468 
1469     sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1470 
1471     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1472     SkASSERT(mutableState);
1473 
1474     return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1475                                                                    sampleCnt, ownership, cacheable,
1476                                                                    imageInfo,
1477                                                                    std::move(mutableState));
1478 }
1479 
onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT)1480 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1481     GrVkImageInfo info;
1482     if (!backendRT.getVkImageInfo(&info)) {
1483         return nullptr;
1484     }
1485 
1486     if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1487         return nullptr;
1488     }
1489 
1490     // We will always render directly to this VkImage.
1491     static bool kResolveOnly = false;
1492     if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1493         return nullptr;
1494     }
1495 
1496     if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1497         return nullptr;
1498     }
1499 
1500     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendRT.getMutableState();
1501     SkASSERT(mutableState);
1502 
1503     sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1504             this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1505 
1506     // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1507     SkASSERT(!backendRT.stencilBits());
1508     if (tgt) {
1509         SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1510     }
1511 
1512     return std::move(tgt);
1513 }
1514 
onWrapVulkanSecondaryCBAsRenderTarget( const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo)1515 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1516         const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1517     int maxSize = this->caps()->maxTextureSize();
1518     if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1519         return nullptr;
1520     }
1521 
1522     GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
1523     if (!backendFormat.isValid()) {
1524         return nullptr;
1525     }
1526     int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1527     if (!sampleCnt) {
1528         return nullptr;
1529     }
1530 
1531     return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1532 }
1533 
loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, const GrVkRenderPass& renderPass, GrAttachment* dst, GrVkImage* src, const SkIRect& srcRect)1534 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1535                                   const GrVkRenderPass& renderPass,
1536                                   GrAttachment* dst,
1537                                   GrVkImage* src,
1538                                   const SkIRect& srcRect) {
1539     return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1540 }
1541 
onRegenerateMipMapLevels(GrTexture* tex)1542 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1543     if (!this->currentCommandBuffer()) {
1544         return false;
1545     }
1546     auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1547     // don't do anything for linearly tiled textures (can't have mipmaps)
1548     if (vkTex->isLinearTiled()) {
1549         SkDebugf("Trying to create mipmap for linear tiled texture");
1550         return false;
1551     }
1552     SkASSERT(tex->textureType() == GrTextureType::k2D);
1553 
1554     // determine if we can blit to and from this format
1555     const GrVkCaps& caps = this->vkCaps();
1556     if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1557         !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1558         !caps.mipmapSupport()) {
1559         return false;
1560     }
1561 
1562     int width = tex->width();
1563     int height = tex->height();
1564     VkImageBlit blitRegion;
1565     memset(&blitRegion, 0, sizeof(VkImageBlit));
1566 
1567     // SkMipmap doesn't include the base level in the level count so we have to add 1
1568     uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1569     SkASSERT(levelCount == vkTex->mipLevels());
1570 
1571     // change layout of the layers so we can write to them.
1572     vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1573                           VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1574 
1575     // setup memory barrier
1576     SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1577     VkImageMemoryBarrier imageMemoryBarrier = {
1578             VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,  // sType
1579             nullptr,                                 // pNext
1580             VK_ACCESS_TRANSFER_WRITE_BIT,            // srcAccessMask
1581             VK_ACCESS_TRANSFER_READ_BIT,             // dstAccessMask
1582             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,    // oldLayout
1583             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,    // newLayout
1584             VK_QUEUE_FAMILY_IGNORED,                 // srcQueueFamilyIndex
1585             VK_QUEUE_FAMILY_IGNORED,                 // dstQueueFamilyIndex
1586             vkTex->image(),                          // image
1587             {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}  // subresourceRange
1588     };
1589 
1590     // Blit the miplevels
1591     uint32_t mipLevel = 1;
1592     while (mipLevel < levelCount) {
1593         int prevWidth = width;
1594         int prevHeight = height;
1595         width = std::max(1, width / 2);
1596         height = std::max(1, height / 2);
1597 
1598         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1599         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1600                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1601 
1602         blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1603         blitRegion.srcOffsets[0] = { 0, 0, 0 };
1604         blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1605         blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1606         blitRegion.dstOffsets[0] = { 0, 0, 0 };
1607         blitRegion.dstOffsets[1] = { width, height, 1 };
1608         this->currentCommandBuffer()->blitImage(this,
1609                                                 vkTex->resource(),
1610                                                 vkTex->image(),
1611                                                 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1612                                                 vkTex->resource(),
1613                                                 vkTex->image(),
1614                                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1615                                                 1,
1616                                                 &blitRegion,
1617                                                 VK_FILTER_LINEAR);
1618         ++mipLevel;
1619     }
1620     if (levelCount > 1) {
1621         // This barrier logically is not needed, but it changes the final level to the same layout
1622         // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1623         // layouts and future layout changes easier. The alternative here would be to track layout
1624         // and memory accesses per layer which doesn't seem work it.
1625         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1626         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1627                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1628         vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1629     }
1630     return true;
1631 }
1632 
1633 ////////////////////////////////////////////////////////////////////////////////
1634 
makeStencilAttachment(const GrBackendFormat& , SkISize dimensions, int numStencilSamples)1635 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1636                                                    SkISize dimensions, int numStencilSamples) {
1637     VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1638 
1639     fStats.incStencilAttachmentCreates();
1640     return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1641 }
1642 
makeMSAAAttachment(SkISize dimensions, const GrBackendFormat& format, int numSamples, GrProtected isProtected, GrMemoryless memoryless)1643 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1644                                                 const GrBackendFormat& format,
1645                                                 int numSamples,
1646                                                 GrProtected isProtected,
1647                                                 GrMemoryless memoryless) {
1648     VkFormat pixelFormat;
1649     SkAssertResult(format.asVkFormat(&pixelFormat));
1650     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1651     SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1652 
1653     fStats.incMSAAAttachmentCreates();
1654     return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1655 }
1656 
1657 ////////////////////////////////////////////////////////////////////////////////
1658 
copy_src_data(char* mapPtr, VkFormat vkFormat, const SkTArray<size_t>& individualMipOffsets, const GrPixmap srcData[], int numMipLevels)1659 bool copy_src_data(char* mapPtr,
1660                    VkFormat vkFormat,
1661                    const SkTArray<size_t>& individualMipOffsets,
1662                    const GrPixmap srcData[],
1663                    int numMipLevels) {
1664     SkASSERT(srcData && numMipLevels);
1665     SkASSERT(!GrVkFormatIsCompressed(vkFormat));
1666     SkASSERT(individualMipOffsets.count() == numMipLevels);
1667     SkASSERT(mapPtr);
1668 
1669     size_t bytesPerPixel = GrVkFormatBytesPerBlock(vkFormat);
1670 
1671     for (int level = 0; level < numMipLevels; ++level) {
1672         const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1673 
1674         SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1675                      srcData[level].addr(), srcData[level].rowBytes(),
1676                      trimRB, srcData[level].height());
1677     }
1678     return true;
1679 }
1680 
createVkImageForBackendSurface(VkFormat vkFormat, SkISize dimensions, int sampleCnt, GrTexturable texturable, GrRenderable renderable, GrMipmapped mipMapped, GrVkImageInfo* info, GrProtected isProtected)1681 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1682                                              SkISize dimensions,
1683                                              int sampleCnt,
1684                                              GrTexturable texturable,
1685                                              GrRenderable renderable,
1686                                              GrMipmapped mipMapped,
1687                                              GrVkImageInfo* info,
1688                                              GrProtected isProtected) {
1689     SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1690 
1691     if (fProtectedContext != isProtected) {
1692         return false;
1693     }
1694 
1695     if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1696         return false;
1697     }
1698 
1699     // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1700     if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1701         return false;
1702     }
1703 
1704     if (renderable == GrRenderable::kYes) {
1705         sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1706         if (!sampleCnt) {
1707             return false;
1708         }
1709     }
1710 
1711 
1712     int numMipLevels = 1;
1713     if (mipMapped == GrMipmapped::kYes) {
1714         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1715     }
1716 
1717     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1718                                    VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1719     if (texturable == GrTexturable::kYes) {
1720         usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1721     }
1722     if (renderable == GrRenderable::kYes) {
1723         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1724         // We always make our render targets support being used as input attachments
1725         usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1726     }
1727 
1728     GrVkImage::ImageDesc imageDesc;
1729     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1730     imageDesc.fFormat = vkFormat;
1731     imageDesc.fWidth = dimensions.width();
1732     imageDesc.fHeight = dimensions.height();
1733     imageDesc.fLevels = numMipLevels;
1734     imageDesc.fSamples = sampleCnt;
1735     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1736     imageDesc.fUsageFlags = usageFlags;
1737     imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1738     imageDesc.fIsProtected = fProtectedContext;
1739 
1740     if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1741         SkDebugf("Failed to init image info\n");
1742         return false;
1743     }
1744 
1745     return true;
1746 }
1747 
onClearBackendTexture(const GrBackendTexture& backendTexture, sk_sp<GrRefCntedCallback> finishedCallback, std::array<float, 4> color)1748 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1749                                     sk_sp<GrRefCntedCallback> finishedCallback,
1750                                     std::array<float, 4> color) {
1751     GrVkImageInfo info;
1752     SkAssertResult(backendTexture.getVkImageInfo(&info));
1753 
1754     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1755     SkASSERT(mutableState);
1756     sk_sp<GrVkTexture> texture =
1757                 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1758                                                 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1759                                                 kRW_GrIOType, info, std::move(mutableState));
1760     if (!texture) {
1761         return false;
1762     }
1763     GrVkImage* texImage = texture->textureImage();
1764 
1765     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1766     if (!cmdBuffer) {
1767         return false;
1768     }
1769 
1770     texImage->setImageLayout(this,
1771                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1772                              VK_ACCESS_TRANSFER_WRITE_BIT,
1773                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1774                              false);
1775 
1776     // CmdClearColorImage doesn't work for compressed formats
1777     SkASSERT(!GrVkFormatIsCompressed(info.fFormat));
1778 
1779     VkClearColorValue vkColor;
1780     // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1781     // uint32 union members in those cases.
1782     vkColor.float32[0] = color[0];
1783     vkColor.float32[1] = color[1];
1784     vkColor.float32[2] = color[2];
1785     vkColor.float32[3] = color[3];
1786     VkImageSubresourceRange range;
1787     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1788     range.baseArrayLayer = 0;
1789     range.baseMipLevel = 0;
1790     range.layerCount = 1;
1791     range.levelCount = info.fLevelCount;
1792     cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1793 
1794     // Change image layout to shader read since if we use this texture as a borrowed
1795     // texture within Ganesh we require that its layout be set to that
1796     texImage->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1797                                   VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1798                                   false);
1799 
1800     if (finishedCallback) {
1801         this->addFinishedCallback(std::move(finishedCallback));
1802     }
1803     return true;
1804 }
1805 
onCreateBackendTexture(SkISize dimensions, const GrBackendFormat& format, GrRenderable renderable, GrMipmapped mipMapped, GrProtected isProtected)1806 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1807                                                  const GrBackendFormat& format,
1808                                                  GrRenderable renderable,
1809                                                  GrMipmapped mipMapped,
1810                                                  GrProtected isProtected) {
1811     const GrVkCaps& caps = this->vkCaps();
1812 
1813     if (fProtectedContext != isProtected) {
1814         return {};
1815     }
1816 
1817     VkFormat vkFormat;
1818     if (!format.asVkFormat(&vkFormat)) {
1819         return {};
1820     }
1821 
1822     // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1823     if (!caps.isVkFormatTexturable(vkFormat)) {
1824         return {};
1825     }
1826 
1827     if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
1828         return {};
1829     }
1830 
1831     GrVkImageInfo info;
1832     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1833                                               renderable, mipMapped, &info, isProtected)) {
1834         return {};
1835     }
1836 
1837     return GrBackendTexture(dimensions.width(), dimensions.height(), info);
1838 }
1839 
onCreateCompressedBackendTexture( SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped, GrProtected isProtected)1840 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(
1841         SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1842         GrProtected isProtected) {
1843     return this->onCreateBackendTexture(dimensions, format, GrRenderable::kNo, mipMapped,
1844                                         isProtected);
1845 }
1846 
onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture, sk_sp<GrRefCntedCallback> finishedCallback, const void* data, size_t size)1847 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1848                                                sk_sp<GrRefCntedCallback> finishedCallback,
1849                                                const void* data,
1850                                                size_t size) {
1851     GrVkImageInfo info;
1852     SkAssertResult(backendTexture.getVkImageInfo(&info));
1853 
1854     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1855     SkASSERT(mutableState);
1856     sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1857                                                                  backendTexture.dimensions(),
1858                                                                  kBorrow_GrWrapOwnership,
1859                                                                  GrWrapCacheable::kNo,
1860                                                                  kRW_GrIOType,
1861                                                                  info,
1862                                                                  std::move(mutableState));
1863     if (!texture) {
1864         return false;
1865     }
1866 
1867     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1868     if (!cmdBuffer) {
1869         return false;
1870     }
1871     GrVkImage* image = texture->textureImage();
1872     image->setImageLayout(this,
1873                           VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1874                           VK_ACCESS_TRANSFER_WRITE_BIT,
1875                           VK_PIPELINE_STAGE_TRANSFER_BIT,
1876                           false);
1877 
1878     SkImage::CompressionType compression =
1879             GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1880 
1881     SkTArray<VkBufferImageCopy> regions;
1882     SkTArray<size_t> individualMipOffsets;
1883     GrStagingBufferManager::Slice slice;
1884 
1885     fill_in_compressed_regions(&fStagingBufferManager,
1886                                &regions,
1887                                &individualMipOffsets,
1888                                &slice,
1889                                compression,
1890                                info.fFormat,
1891                                backendTexture.dimensions(),
1892                                backendTexture.fMipmapped);
1893 
1894     if (!slice.fBuffer) {
1895         return false;
1896     }
1897 
1898     memcpy(slice.fOffsetMapPtr, data, size);
1899 
1900     cmdBuffer->addGrSurface(texture);
1901     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1902     // because we don't need the command buffer to ref the buffer here. The reason being is that
1903     // the buffer is coming from the staging manager and the staging manager will make sure the
1904     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1905     // every upload in the frame.
1906     cmdBuffer->copyBufferToImage(this,
1907                                  static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1908                                  image,
1909                                  image->currentLayout(),
1910                                  regions.count(),
1911                                  regions.begin());
1912 
1913     // Change image layout to shader read since if we use this texture as a borrowed
1914     // texture within Ganesh we require that its layout be set to that
1915     image->setImageLayout(this,
1916                           VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1917                           VK_ACCESS_SHADER_READ_BIT,
1918                           VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1919                           false);
1920 
1921     if (finishedCallback) {
1922         this->addFinishedCallback(std::move(finishedCallback));
1923     }
1924     return true;
1925 }
1926 
set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image, const GrVkSharedImageInfo& newInfo)1927 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1928                                              const GrVkSharedImageInfo& newInfo) {
1929     // Even though internally we use this helper for getting src access flags and stages they
1930     // can also be used for general dst flags since we don't know exactly what the client
1931     // plans on using the image for.
1932     VkImageLayout newLayout = newInfo.getImageLayout();
1933     if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1934         newLayout = image->currentLayout();
1935     }
1936     VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1937     VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1938 
1939     uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1940     uint32_t newQueueFamilyIndex = newInfo.getQueueFamilyIndex();
1941     auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1942         return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1943                queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1944     };
1945     if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1946         // It is illegal to have both the new and old queue be special queue families (i.e. external
1947         // or foreign).
1948         return;
1949     }
1950 
1951     image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1952                                        newQueueFamilyIndex);
1953 }
1954 
setBackendSurfaceState(GrVkImageInfo info, sk_sp<GrBackendSurfaceMutableStateImpl> currentState, SkISize dimensions, const GrVkSharedImageInfo& newInfo, GrBackendSurfaceMutableState* previousState, sk_sp<GrRefCntedCallback> finishedCallback)1955 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1956                                      sk_sp<GrBackendSurfaceMutableStateImpl> currentState,
1957                                      SkISize dimensions,
1958                                      const GrVkSharedImageInfo& newInfo,
1959                                      GrBackendSurfaceMutableState* previousState,
1960                                      sk_sp<GrRefCntedCallback> finishedCallback) {
1961     sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(this,
1962                                                       dimensions,
1963                                                       info,
1964                                                       std::move(currentState),
1965                                                       GrVkImage::UsageFlags::kColorAttachment,
1966                                                       kBorrow_GrWrapOwnership,
1967                                                       GrWrapCacheable::kNo,
1968                                                       /*forSecondaryCB=*/false);
1969     SkASSERT(texture);
1970     if (!texture) {
1971         return false;
1972     }
1973     if (previousState) {
1974         previousState->setVulkanState(texture->currentLayout(),
1975                                       texture->currentQueueFamilyIndex());
1976     }
1977     set_layout_and_queue_from_mutable_state(this, texture.get(), newInfo);
1978     if (finishedCallback) {
1979         this->addFinishedCallback(std::move(finishedCallback));
1980     }
1981     return true;
1982 }
1983 
setBackendTextureState(const GrBackendTexture& backendTeture, const GrBackendSurfaceMutableState& newState, GrBackendSurfaceMutableState* previousState, sk_sp<GrRefCntedCallback> finishedCallback)1984 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
1985                                      const GrBackendSurfaceMutableState& newState,
1986                                      GrBackendSurfaceMutableState* previousState,
1987                                      sk_sp<GrRefCntedCallback> finishedCallback) {
1988     GrVkImageInfo info;
1989     SkAssertResult(backendTeture.getVkImageInfo(&info));
1990     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendTeture.getMutableState();
1991     SkASSERT(currentState);
1992     SkASSERT(newState.isValid() && newState.fBackend == GrBackend::kVulkan);
1993     return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1994                                         newState.fVkState, previousState,
1995                                         std::move(finishedCallback));
1996 }
1997 
setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget, const GrBackendSurfaceMutableState& newState, GrBackendSurfaceMutableState* previousState, sk_sp<GrRefCntedCallback> finishedCallback)1998 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1999                                           const GrBackendSurfaceMutableState& newState,
2000                                           GrBackendSurfaceMutableState* previousState,
2001                                           sk_sp<GrRefCntedCallback> finishedCallback) {
2002     GrVkImageInfo info;
2003     SkAssertResult(backendRenderTarget.getVkImageInfo(&info));
2004     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendRenderTarget.getMutableState();
2005     SkASSERT(currentState);
2006     SkASSERT(newState.fBackend == GrBackend::kVulkan);
2007     return this->setBackendSurfaceState(info, std::move(currentState),
2008                                         backendRenderTarget.dimensions(), newState.fVkState,
2009                                         previousState, std::move(finishedCallback));
2010 }
2011 
xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType)2012 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
2013     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2014     VkPipelineStageFlags dstStage;
2015     VkAccessFlags dstAccess;
2016     if (barrierType == kBlend_GrXferBarrierType) {
2017         dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
2018         dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
2019     } else {
2020         SkASSERT(barrierType == kTexture_GrXferBarrierType);
2021         dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
2022         dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
2023     }
2024     GrVkImage* image = vkRT->colorAttachment();
2025     VkImageMemoryBarrier barrier;
2026     barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
2027     barrier.pNext = nullptr;
2028     barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
2029     barrier.dstAccessMask = dstAccess;
2030     barrier.oldLayout = image->currentLayout();
2031     barrier.newLayout = barrier.oldLayout;
2032     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2033     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2034     barrier.image = image->image();
2035     barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2036     this->addImageMemoryBarrier(image->resource(),
2037                                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
2038                                 dstStage, true, &barrier);
2039 }
2040 
deleteBackendTexture(const GrBackendTexture& tex)2041 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
2042     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2043 
2044     GrVkImageInfo info;
2045     if (tex.getVkImageInfo(&info)) {
2046         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2047     }
2048 }
2049 
compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo)2050 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
2051     GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2052     GrVkRenderPass::AttachmentFlags attachmentFlags;
2053     GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
2054                                                        &attachmentsDescriptor, &attachmentFlags);
2055 
2056     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
2057     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2058         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
2059     }
2060     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
2061         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
2062     }
2063 
2064     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
2065     if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2066         programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2067         loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
2068     }
2069     sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2070             &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2071     if (!renderPass) {
2072         return false;
2073     }
2074 
2075     GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
2076 
2077     auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2078                                     desc,
2079                                     programInfo,
2080                                     renderPass->vkRenderPass(),
2081                                     &stat);
2082     if (!pipelineState) {
2083         return false;
2084     }
2085 
2086     return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
2087 }
2088 
2089 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture& tex) const2090 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
2091     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2092 
2093     GrVkImageInfo backend;
2094     if (!tex.getVkImageInfo(&backend)) {
2095         return false;
2096     }
2097 
2098     if (backend.fImage && backend.fAlloc.fMemory) {
2099         VkMemoryRequirements req;
2100         memset(&req, 0, sizeof(req));
2101         GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
2102                                                                    backend.fImage,
2103                                                                    &req));
2104         // TODO: find a better check
2105         // This will probably fail with a different driver
2106         return (req.size > 0) && (req.size <= 8192 * 8192);
2107     }
2108 
2109     return false;
2110 }
2111 
createTestingOnlyBackendRenderTarget(SkISize dimensions, GrColorType ct, int sampleCnt, GrProtected isProtected)2112 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2113                                                                     GrColorType ct,
2114                                                                     int sampleCnt,
2115                                                                     GrProtected isProtected) {
2116     if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
2117         dimensions.height() > this->caps()->maxRenderTargetSize()) {
2118         return {};
2119     }
2120 
2121     VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2122 
2123     GrVkImageInfo info;
2124     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt, GrTexturable::kNo,
2125                                               GrRenderable::kYes, GrMipmapped::kNo, &info,
2126                                               isProtected)) {
2127         return {};
2128     }
2129     return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 0, info);
2130 }
2131 
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt)2132 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2133     SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2134 
2135     GrVkImageInfo info;
2136     if (rt.getVkImageInfo(&info)) {
2137         // something in the command buffer may still be using this, so force submit
2138         SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2139         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2140     }
2141 }
2142 #endif
2143 
2144 ////////////////////////////////////////////////////////////////////////////////
2145 
addBufferMemoryBarrier(const GrManagedResource* resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier* barrier) const2146 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2147                                      VkPipelineStageFlags srcStageMask,
2148                                      VkPipelineStageFlags dstStageMask,
2149                                      bool byRegion,
2150                                      VkBufferMemoryBarrier* barrier) const {
2151     if (!this->currentCommandBuffer()) {
2152         return;
2153     }
2154     SkASSERT(resource);
2155     this->currentCommandBuffer()->pipelineBarrier(this,
2156                                                   resource,
2157                                                   srcStageMask,
2158                                                   dstStageMask,
2159                                                   byRegion,
2160                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2161                                                   barrier);
2162 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier* barrier) const2163 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
2164                                      VkPipelineStageFlags dstStageMask,
2165                                      bool byRegion,
2166                                      VkBufferMemoryBarrier* barrier) const {
2167     if (!this->currentCommandBuffer()) {
2168         return;
2169     }
2170     // We don't pass in a resource here to the command buffer. The command buffer only is using it
2171     // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2172     // command with the buffer on the command buffer. Thus those other commands will already cause
2173     // the command buffer to be holding a ref to the buffer.
2174     this->currentCommandBuffer()->pipelineBarrier(this,
2175                                                   /*resource=*/nullptr,
2176                                                   srcStageMask,
2177                                                   dstStageMask,
2178                                                   byRegion,
2179                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2180                                                   barrier);
2181 }
2182 
addImageMemoryBarrier(const GrManagedResource* resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier* barrier) const2183 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2184                                     VkPipelineStageFlags srcStageMask,
2185                                     VkPipelineStageFlags dstStageMask,
2186                                     bool byRegion,
2187                                     VkImageMemoryBarrier* barrier) const {
2188     // If we are in the middle of destroying or abandoning the context we may hit a release proc
2189     // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2190     // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2191     // have a current command buffer. Thus we won't do the queue transfer.
2192     if (!this->currentCommandBuffer()) {
2193         return;
2194     }
2195     SkASSERT(resource);
2196     this->currentCommandBuffer()->pipelineBarrier(this,
2197                                                   resource,
2198                                                   srcStageMask,
2199                                                   dstStageMask,
2200                                                   byRegion,
2201                                                   GrVkCommandBuffer::kImageMemory_BarrierType,
2202                                                   barrier);
2203 }
2204 
prepareSurfacesForBackendAccessAndStateUpdates( SkSpan<GrSurfaceProxy*> proxies, SkSurface::BackendSurfaceAccess access, const GrBackendSurfaceMutableState* newState)2205 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2206         SkSpan<GrSurfaceProxy*> proxies,
2207         SkSurface::BackendSurfaceAccess access,
2208         const GrBackendSurfaceMutableState* newState) {
2209     // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2210     // not effect what we do here.
2211     if (!proxies.empty() && (access == SkSurface::BackendSurfaceAccess::kPresent || newState)) {
2212         // We currently don't support passing in new surface state for multiple proxies here. The
2213         // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2214         // state updates anyways. Additionally if we have a newState than we must not have any
2215         // BackendSurfaceAccess.
2216         SkASSERT(!newState || proxies.size() == 1);
2217         SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
2218         GrVkImage* image;
2219         for (GrSurfaceProxy* proxy : proxies) {
2220             SkASSERT(proxy->isInstantiated());
2221             if (GrTexture* tex = proxy->peekTexture()) {
2222                 image = static_cast<GrVkTexture*>(tex)->textureImage();
2223             } else {
2224                 GrRenderTarget* rt = proxy->peekRenderTarget();
2225                 SkASSERT(rt);
2226                 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2227                 image = vkRT->externalAttachment();
2228             }
2229             if (newState) {
2230                 const GrVkSharedImageInfo& newInfo = newState->fVkState;
2231                 set_layout_and_queue_from_mutable_state(this, image, newInfo);
2232             } else {
2233                 SkASSERT(access == SkSurface::BackendSurfaceAccess::kPresent);
2234                 image->prepareForPresent(this);
2235             }
2236         }
2237     }
2238 }
2239 
addFinishedProc(GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)2240 void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2241                               GrGpuFinishedContext finishedContext) {
2242     SkASSERT(finishedProc);
2243     this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
2244 }
2245 
addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback)2246 void GrVkGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
2247     SkASSERT(finishedCallback);
2248     fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2249 }
2250 
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2251 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2252     this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2253 }
2254 
onSubmitToGpu(bool syncCpu)2255 bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
2256     if (syncCpu) {
2257         return this->submitCommandBuffer(kForce_SyncQueue);
2258     } else {
2259         return this->submitCommandBuffer(kSkip_SyncQueue);
2260     }
2261 }
2262 
finishOutstandingGpuWork()2263 void GrVkGpu::finishOutstandingGpuWork() {
2264     VK_CALL(QueueWaitIdle(fQueue));
2265 
2266     if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2267         fResourceProvider.forceSyncAllCommandBuffers();
2268     }
2269 }
2270 
onReportSubmitHistograms()2271 void GrVkGpu::onReportSubmitHistograms() {
2272 #if SK_HISTOGRAMS_ENABLED
2273     uint64_t allocatedMemory = fMemoryAllocator->totalAllocatedMemory();
2274     uint64_t usedMemory = fMemoryAllocator->totalUsedMemory();
2275     SkASSERT(usedMemory <= allocatedMemory);
2276     if (allocatedMemory > 0) {
2277         SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2278                                 (usedMemory * 100) / allocatedMemory);
2279     }
2280     // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2281     // supports samples up to around 500MB which should support the amounts of memory we allocate.
2282     SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2283 #endif
2284 }
2285 
copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage, GrVkImage* srcImage, const SkIRect& srcRect, const SkIPoint& dstPoint)2286 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2287                                      GrSurface* src,
2288                                      GrVkImage* dstImage,
2289                                      GrVkImage* srcImage,
2290                                      const SkIRect& srcRect,
2291                                      const SkIPoint& dstPoint) {
2292     if (!this->currentCommandBuffer()) {
2293         return;
2294     }
2295 
2296 #ifdef SK_DEBUG
2297     int dstSampleCnt = dstImage->numSamples();
2298     int srcSampleCnt = srcImage->numSamples();
2299     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2300     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2301     VkFormat dstFormat = dstImage->imageFormat();
2302     VkFormat srcFormat;
2303     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2304     SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2305                                          srcFormat, srcSampleCnt, srcHasYcbcr));
2306 #endif
2307     if (src->isProtected() && !dst->isProtected()) {
2308         SkDebugf("Can't copy from protected memory to non-protected");
2309         return;
2310     }
2311 
2312     // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2313     // the cache is flushed since it is only being written to.
2314     dstImage->setImageLayout(this,
2315                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2316                              VK_ACCESS_TRANSFER_WRITE_BIT,
2317                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2318                              false);
2319 
2320     srcImage->setImageLayout(this,
2321                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2322                              VK_ACCESS_TRANSFER_READ_BIT,
2323                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2324                              false);
2325 
2326     VkImageCopy copyRegion;
2327     memset(&copyRegion, 0, sizeof(VkImageCopy));
2328     copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2329     copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2330     copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2331     copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2332     copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2333 
2334     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2335     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2336     this->currentCommandBuffer()->copyImage(this,
2337                                             srcImage,
2338                                             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2339                                             dstImage,
2340                                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2341                                             1,
2342                                             &copyRegion);
2343 
2344     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2345                                         srcRect.width(), srcRect.height());
2346     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2347     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2348 }
2349 
copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage, GrVkImage* srcImage, const SkIRect& srcRect, const SkIPoint& dstPoint)2350 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2351                                 GrSurface* src,
2352                                 GrVkImage* dstImage,
2353                                 GrVkImage* srcImage,
2354                                 const SkIRect& srcRect,
2355                                 const SkIPoint& dstPoint) {
2356     if (!this->currentCommandBuffer()) {
2357         return;
2358     }
2359 
2360 #ifdef SK_DEBUG
2361     int dstSampleCnt = dstImage->numSamples();
2362     int srcSampleCnt = srcImage->numSamples();
2363     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2364     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2365     VkFormat dstFormat = dstImage->imageFormat();
2366     VkFormat srcFormat;
2367     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2368     SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2369                                           dstSampleCnt,
2370                                           dstImage->isLinearTiled(),
2371                                           dstHasYcbcr,
2372                                           srcFormat,
2373                                           srcSampleCnt,
2374                                           srcImage->isLinearTiled(),
2375                                           srcHasYcbcr));
2376 
2377 #endif
2378     if (src->isProtected() && !dst->isProtected()) {
2379         SkDebugf("Can't copy from protected memory to non-protected");
2380         return;
2381     }
2382 
2383     dstImage->setImageLayout(this,
2384                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2385                              VK_ACCESS_TRANSFER_WRITE_BIT,
2386                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2387                              false);
2388 
2389     srcImage->setImageLayout(this,
2390                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2391                              VK_ACCESS_TRANSFER_READ_BIT,
2392                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2393                              false);
2394 
2395     // Flip rect if necessary
2396     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
2397                                         srcRect.height());
2398 
2399     VkImageBlit blitRegion;
2400     memset(&blitRegion, 0, sizeof(VkImageBlit));
2401     blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2402     blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2403     blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2404     blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2405     blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2406     blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2407 
2408     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2409     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2410     this->currentCommandBuffer()->blitImage(this,
2411                                             *srcImage,
2412                                             *dstImage,
2413                                             1,
2414                                             &blitRegion,
2415                                             VK_FILTER_NEAREST); // We never scale so any filter works here
2416 
2417     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2418     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2419 }
2420 
copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint)2421 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2422                                    const SkIPoint& dstPoint) {
2423     if (src->isProtected() && !dst->isProtected()) {
2424         SkDebugf("Can't copy from protected memory to non-protected");
2425         return;
2426     }
2427     GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2428     this->resolveImage(dst, srcRT, srcRect, dstPoint);
2429     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2430                                         srcRect.width(), srcRect.height());
2431     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2432     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2433 }
2434 
onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint)2435 bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2436                             const SkIPoint& dstPoint) {
2437 #ifdef SK_DEBUG
2438     if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2439         SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2440     }
2441     if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2442         SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2443     }
2444 #endif
2445     if (src->isProtected() && !dst->isProtected()) {
2446         SkDebugf("Can't copy from protected memory to non-protected");
2447         return false;
2448     }
2449 
2450     GrVkImage* dstImage;
2451     GrVkImage* srcImage;
2452     GrRenderTarget* dstRT = dst->asRenderTarget();
2453     if (dstRT) {
2454         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2455         if (vkRT->wrapsSecondaryCommandBuffer()) {
2456             return false;
2457         }
2458         // This will technically return true for single sample rts that used DMSAA in which case we
2459         // don't have to pick the resolve attachment. But in that case the resolve and color
2460         // attachments will be the same anyways.
2461         if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2462             dstImage = vkRT->resolveAttachment();
2463         } else {
2464             dstImage = vkRT->colorAttachment();
2465         }
2466     } else if (dst->asTexture()) {
2467         dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2468     } else {
2469         // The surface in a GrAttachment already
2470         dstImage = static_cast<GrVkImage*>(dst);
2471     }
2472     GrRenderTarget* srcRT = src->asRenderTarget();
2473     if (srcRT) {
2474         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2475         // This will technically return true for single sample rts that used DMSAA in which case we
2476         // don't have to pick the resolve attachment. But in that case the resolve and color
2477         // attachments will be the same anyways.
2478         if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2479             srcImage = vkRT->resolveAttachment();
2480         } else {
2481             srcImage = vkRT->colorAttachment();
2482         }
2483     } else if (src->asTexture()) {
2484         SkASSERT(src->asTexture());
2485         srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2486     } else {
2487         // The surface in a GrAttachment already
2488         srcImage = static_cast<GrVkImage*>(src);
2489     }
2490 
2491     VkFormat dstFormat = dstImage->imageFormat();
2492     VkFormat srcFormat = srcImage->imageFormat();
2493 
2494     int dstSampleCnt = dstImage->numSamples();
2495     int srcSampleCnt = srcImage->numSamples();
2496 
2497     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2498     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2499 
2500     if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2501                                         srcFormat, srcSampleCnt, srcHasYcbcr)) {
2502         this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2503         return true;
2504     }
2505 
2506     if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2507                                     srcFormat, srcSampleCnt, srcHasYcbcr)) {
2508         this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2509         return true;
2510     }
2511 
2512     if (this->vkCaps().canCopyAsBlit(dstFormat,
2513                                      dstSampleCnt,
2514                                      dstImage->isLinearTiled(),
2515                                      dstHasYcbcr,
2516                                      srcFormat,
2517                                      srcSampleCnt,
2518                                      srcImage->isLinearTiled(),
2519                                      srcHasYcbcr)) {
2520         this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
2521         return true;
2522     }
2523 
2524     return false;
2525 }
2526 
onReadPixels(GrSurface* surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, size_t rowBytes)2527 bool GrVkGpu::onReadPixels(GrSurface* surface,
2528                            SkIRect rect,
2529                            GrColorType surfaceColorType,
2530                            GrColorType dstColorType,
2531                            void* buffer,
2532                            size_t rowBytes) {
2533     if (surface->isProtected()) {
2534         return false;
2535     }
2536 
2537     if (!this->currentCommandBuffer()) {
2538         return false;
2539     }
2540 
2541     GrVkImage* image = nullptr;
2542     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2543     if (rt) {
2544         // Reading from render targets that wrap a secondary command buffer is not allowed since
2545         // it would require us to know the VkImage, which we don't have, as well as need us to
2546         // stop and start the VkRenderPass which we don't have access to.
2547         if (rt->wrapsSecondaryCommandBuffer()) {
2548             return false;
2549         }
2550         image = rt->nonMSAAAttachment();
2551     } else {
2552         image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2553     }
2554 
2555     if (!image) {
2556         return false;
2557     }
2558 
2559     if (dstColorType == GrColorType::kUnknown ||
2560         dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2561         return false;
2562     }
2563 
2564     // Change layout of our target so it can be used as copy
2565     image->setImageLayout(this,
2566                           VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2567                           VK_ACCESS_TRANSFER_READ_BIT,
2568                           VK_PIPELINE_STAGE_TRANSFER_BIT,
2569                           false);
2570 
2571     size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2572     if (GrVkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2573         return false;
2574     }
2575     size_t tightRowBytes = bpp*rect.width();
2576 
2577     VkBufferImageCopy region;
2578     memset(&region, 0, sizeof(VkBufferImageCopy));
2579     VkOffset3D offset = { rect.left(), rect.top(), 0 };
2580     region.imageOffset = offset;
2581     region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2582 
2583     size_t transBufferRowBytes = bpp * region.imageExtent.width;
2584     size_t imageRows = region.imageExtent.height;
2585     GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2586     sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2587             transBufferRowBytes * imageRows, GrGpuBufferType::kXferGpuToCpu,
2588             kDynamic_GrAccessPattern);
2589 
2590     if (!transferBuffer) {
2591         return false;
2592     }
2593 
2594     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2595 
2596     // Copy the image to a buffer so we can map it to cpu memory
2597     region.bufferOffset = 0;
2598     region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2599     region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2600     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2601 
2602     this->currentCommandBuffer()->copyImageToBuffer(this,
2603                                                     image,
2604                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2605                                                     transferBuffer,
2606                                                     1,
2607                                                     &region);
2608 
2609     // make sure the copy to buffer has finished
2610     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2611                                VK_ACCESS_HOST_READ_BIT,
2612                                VK_PIPELINE_STAGE_TRANSFER_BIT,
2613                                VK_PIPELINE_STAGE_HOST_BIT,
2614                                false);
2615 
2616     // We need to submit the current command buffer to the Queue and make sure it finishes before
2617     // we can copy the data out of the buffer.
2618     if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2619         return false;
2620     }
2621     void* mappedMemory = transferBuffer->map();
2622 
2623     SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2624 
2625     transferBuffer->unmap();
2626     return true;
2627 }
2628 
beginRenderPass(const GrVkRenderPass* renderPass, sk_sp<const GrVkFramebuffer> framebuffer, const VkClearValue* colorClear, const GrSurface* target, const SkIRect& renderPassBounds, bool forSecondaryCB)2629 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2630                               sk_sp<const GrVkFramebuffer> framebuffer,
2631                               const VkClearValue* colorClear,
2632                               const GrSurface* target,
2633                               const SkIRect& renderPassBounds,
2634                               bool forSecondaryCB) {
2635     if (!this->currentCommandBuffer()) {
2636         return false;
2637     }
2638     SkASSERT (!framebuffer->isExternal());
2639 
2640 #ifdef SK_DEBUG
2641     uint32_t index;
2642     bool result = renderPass->colorAttachmentIndex(&index);
2643     SkASSERT(result && 0 == index);
2644     result = renderPass->stencilAttachmentIndex(&index);
2645     if (result) {
2646         SkASSERT(1 == index);
2647     }
2648 #endif
2649     VkClearValue clears[3];
2650     int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2651     clears[0].color = colorClear->color;
2652     clears[stencilIndex].depthStencil.depth = 0.0f;
2653     clears[stencilIndex].depthStencil.stencil = 0;
2654 
2655    return this->currentCommandBuffer()->beginRenderPass(
2656         this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2657 }
2658 
endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds)2659 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2660                             const SkIRect& bounds) {
2661     // We had a command buffer when we started the render pass, we should have one now as well.
2662     SkASSERT(this->currentCommandBuffer());
2663     this->currentCommandBuffer()->endRenderPass(this);
2664     this->didWriteToSurface(target, origin, &bounds);
2665 }
2666 
checkVkResult(VkResult result)2667 bool GrVkGpu::checkVkResult(VkResult result) {
2668     switch (result) {
2669         case VK_SUCCESS:
2670             return true;
2671         case VK_ERROR_DEVICE_LOST:
2672 #ifdef SKIA_DFX_FOR_OHOS
2673             {
2674                 auto context = getContext();
2675                 if (context) {
2676                     auto cache = context->priv().getResourceCache();
2677                     if (cache) {
2678                         auto cacheInfo = cache->cacheInfo();
2679                         SK_LOGE("GrVkGpu::checkVkResult VK_ERROR_DEVICE_LOST, cacheInfo = %{public}s",
2680                             cacheInfo.c_str());
2681                     }
2682                 }
2683             }
2684 #endif
2685             fDeviceIsLost = true;
2686             return false;
2687         case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2688         case VK_ERROR_OUT_OF_HOST_MEMORY:
2689             this->setOOMed();
2690             return false;
2691         default:
2692             return false;
2693     }
2694 }
2695 
GetHpsDimension(const SkBlurArg& blurArg) const2696 std::array<int, 2> GrVkGpu::GetHpsDimension(const SkBlurArg& blurArg) const
2697 {
2698     int width = 0;
2699     int height = 0;
2700     VkRect2D srcRegion;
2701     srcRegion.offset = { blurArg.srcRect.fLeft, blurArg.srcRect.fTop };
2702     srcRegion.extent = { (uint32_t)blurArg.srcRect.width(), (uint32_t)blurArg.srcRect.height() };
2703 
2704     VkRect2D dstRegion;
2705     dstRegion.offset = { blurArg.dstRect.fLeft, blurArg.dstRect.fTop };
2706     dstRegion.extent = { (uint32_t)blurArg.dstRect.width(), (uint32_t)blurArg.dstRect.height() };
2707 
2708     VkDrawBlurImageInfoHUAWEI drawBlurImageInfo {};
2709     drawBlurImageInfo.sType = VkStructureTypeHUAWEI::VK_STRUCTURE_TYPE_DRAW_BLUR_IMAGE_INFO_HUAWEI;
2710     drawBlurImageInfo.pNext = nullptr;
2711     drawBlurImageInfo.sigma = blurArg.sigma;
2712     drawBlurImageInfo.srcRegion = srcRegion;
2713     drawBlurImageInfo.dstRegion = dstRegion;
2714     drawBlurImageInfo.srcImageView = VK_NULL_HANDLE;
2715 
2716     VkRect2D hpsDimension {};
2717     auto grVkInterface = this->vkInterface();
2718     if (grVkInterface != nullptr && grVkInterface->fFunctions.fGetBlurImageSizeHUAWEI != nullptr) {
2719         VK_CALL(GetBlurImageSizeHUAWEI(this->device(), &drawBlurImageInfo, &hpsDimension));
2720         width = static_cast<int>(hpsDimension.extent.width);
2721         height = static_cast<int>(hpsDimension.extent.height);
2722     }
2723 
2724     std::array<int, 2> res = {width, height}; // There are 2 variables here.
2725     return res;
2726 }
2727 
dumpVmaStats(SkString *out)2728 void GrVkGpu::dumpVmaStats(SkString *out) {
2729     if (out == nullptr) {
2730         return;
2731     }
2732     out->appendf("dumpVmaCacheStats:\n");
2733     fMemoryAllocatorCacheImage->dumpVmaStats(out, "\n");
2734 }
2735 
2736 // OH ISSUE: asyn memory reclaimer
setGpuMemoryAsyncReclaimerSwitch(bool enabled)2737 void GrVkGpu::setGpuMemoryAsyncReclaimerSwitch(bool enabled)
2738 {
2739     if (!fMemoryReclaimer) {
2740         fMemoryReclaimer = std::make_unique<GrVkMemoryReclaimer>();
2741     }
2742     fMemoryReclaimer->setGpuMemoryAsyncReclaimerSwitch(enabled);
2743 }
2744 
2745 // OH ISSUE: asyn memory reclaimer
flushGpuMemoryInWaitQueue()2746 void GrVkGpu::flushGpuMemoryInWaitQueue()
2747 {
2748     if (fMemoryReclaimer) {
2749         fMemoryReclaimer->flushGpuMemoryInWaitQueue();
2750     }
2751 }
2752 
2753 
2754 #ifdef SKIA_DFX_FOR_OHOS
addAllocImageBytes(size_t bytes)2755 void GrVkGpu::addAllocImageBytes(size_t bytes)
2756 {
2757     auto cache = getContext()->priv().getResourceCache();
2758     if (!cache) {
2759         return;
2760     }
2761     cache->addAllocImageBytes(bytes);
2762 }
2763 
removeAllocImageBytes(size_t bytes)2764 void GrVkGpu::removeAllocImageBytes(size_t bytes)
2765 {
2766     auto cache = getContext()->priv().getResourceCache();
2767     if (!cache) {
2768         return;
2769     }
2770     cache->removeAllocImageBytes(bytes);
2771 }
2772 
addAllocBufferBytes(size_t bytes)2773 void GrVkGpu::addAllocBufferBytes(size_t bytes)
2774 {
2775     auto cache = getContext()->priv().getResourceCache();
2776     if (!cache) {
2777         return;
2778     }
2779     cache->addAllocBufferBytes(bytes);
2780 }
2781 
removeAllocBufferBytes(size_t bytes)2782 void GrVkGpu::removeAllocBufferBytes(size_t bytes)
2783 {
2784     auto cache = getContext()->priv().getResourceCache();
2785     if (!cache) {
2786         return;
2787     }
2788     cache->removeAllocBufferBytes(bytes);
2789 }
2790 #endif
2791 
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2792 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2793     if (!this->currentCommandBuffer()) {
2794         return;
2795     }
2796     this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2797 }
2798 
submit(GrOpsRenderPass* renderPass)2799 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2800     SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2801 
2802     fCachedOpsRenderPass->submit();
2803     fCachedOpsRenderPass->reset();
2804 }
2805 
insertFence()2806 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2807     VkFenceCreateInfo createInfo;
2808     memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2809     createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2810     createInfo.pNext = nullptr;
2811     createInfo.flags = 0;
2812     VkFence fence = VK_NULL_HANDLE;
2813     VkResult result;
2814 
2815     VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
2816     if (result != VK_SUCCESS) {
2817         return 0;
2818     }
2819     VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
2820     if (result != VK_SUCCESS) {
2821         VK_CALL(DestroyFence(this->device(), fence, nullptr));
2822         return 0;
2823     }
2824 
2825     static_assert(sizeof(GrFence) >= sizeof(VkFence));
2826     return (GrFence)fence;
2827 }
2828 
waitFence(GrFence fence)2829 bool GrVkGpu::waitFence(GrFence fence) {
2830     SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2831 
2832     VkResult result;
2833     VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, 0));
2834     return (VK_SUCCESS == result);
2835 }
2836 
deleteFence(GrFence fence) const2837 void GrVkGpu::deleteFence(GrFence fence) const {
2838     VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2839 }
2840 
makeSemaphore(bool isOwned)2841 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2842     return GrVkSemaphore::Make(this, isOwned);
2843 }
2844 
wrapBackendSemaphore(const GrBackendSemaphore& semaphore, GrSemaphoreWrapType wrapType, GrWrapOwnership ownership)2845 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2846                                                            GrSemaphoreWrapType wrapType,
2847                                                            GrWrapOwnership ownership) {
2848     return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2849 }
2850 
insertSemaphore(GrSemaphore* semaphore)2851 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2852     SkASSERT(semaphore);
2853 
2854     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2855 
2856     GrVkSemaphore::Resource* resource = vkSem->getResource();
2857     if (resource->shouldSignal()) {
2858         resource->ref();
2859         fSemaphoresToSignal.push_back(resource);
2860     }
2861 }
2862 
waitSemaphore(GrSemaphore* semaphore)2863 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2864     SkASSERT(semaphore);
2865 
2866     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2867 
2868     GrVkSemaphore::Resource* resource = vkSem->getResource();
2869     if (resource->shouldWait()) {
2870         resource->ref();
2871         fSemaphoresToWaitOn.push_back(resource);
2872     }
2873 }
2874 
prepareTextureForCrossContextUsage(GrTexture* texture)2875 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2876     SkASSERT(texture);
2877     GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2878     vkTexture->setImageLayout(this,
2879                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2880                               VK_ACCESS_SHADER_READ_BIT,
2881                               VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2882                               false);
2883     // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2884     // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2885     // Eventually we will abandon the whole GPU if this fails.
2886     this->submitToGpu(false);
2887 
2888     // The image layout change serves as a barrier, so no semaphore is needed.
2889     // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2890     // thread safe so that only the first thread that tries to use the semaphore actually submits
2891     // it. This additionally would also require thread safety in command buffer submissions to
2892     // queues in general.
2893     return nullptr;
2894 }
2895 
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2896 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2897     fDrawables.emplace_back(std::move(drawable));
2898 }
2899 
storeVkPipelineCacheData()2900 void GrVkGpu::storeVkPipelineCacheData() {
2901     if (this->getContext()->priv().getPersistentCache()) {
2902         this->resourceProvider().storePipelineCacheData();
2903     }
2904 }
2905