1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkGpu_DEFINED 9 #define GrVkGpu_DEFINED 10 11 #include "include/gpu/vk/GrVkBackendContext.h" 12 #include "include/gpu/vk/GrVkTypes.h" 13 #include "src/gpu/GrGpu.h" 14 #include "src/gpu/GrStagingBufferManager.h" 15 #include "src/gpu/vk/GrVkCaps.h" 16 #include "src/gpu/vk/GrVkMSAALoadManager.h" 17 #include "src/gpu/vk/GrVkMemory.h" 18 #include "src/gpu/vk/GrVkMemoryReclaimer.h" 19 #include "src/gpu/vk/GrVkResourceProvider.h" 20 #include "src/gpu/vk/GrVkSemaphore.h" 21 #include "src/gpu/vk/GrVkUtil.h" 22 23 class GrDirectContext; 24 class GrPipeline; 25 26 class GrVkBuffer; 27 class GrVkCommandPool; 28 class GrVkFramebuffer; 29 class GrVkMemoryAllocator; 30 class GrVkPipeline; 31 class GrVkPipelineState; 32 class GrVkPrimaryCommandBuffer; 33 class GrVkOpsRenderPass; 34 class GrVkRenderPass; 35 class GrVkSecondaryCommandBuffer; 36 class GrVkTexture; 37 struct GrVkInterface; 38 39 class GrVkGpu : public GrGpu { 40 public: 41 static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrDirectContext*); 42 43 ~GrVkGpu() override; 44 45 void disconnect(DisconnectType) override; disconnected() const46 bool disconnected() const { return fDisconnected; } 47 48 void releaseUnlockedBackendObjects() override { 49 fResourceProvider.releaseUnlockedBackendObjects(); 50 } 51 52 GrThreadSafePipelineBuilder* pipelineBuilder() override; 53 sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override; 54 vkInterface() const55 const GrVkInterface* vkInterface() const { return fInterface.get(); } vkCaps() const56 const GrVkCaps& vkCaps() const { return *fVkCaps; } 57 58 GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; } 59 void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override; 60 61 bool isDeviceLost() const override { return fDeviceIsLost; } 62 memoryAllocator() const63 GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } memoryAllocatorCacheImage() const64 GrVkMemoryAllocator* memoryAllocatorCacheImage() const { return fMemoryAllocatorCacheImage.get(); } 65 physicalDevice() const66 VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; } device() const67 VkDevice device() const { return fDevice; } queue() const68 VkQueue queue() const { return fQueue; } queueIndex() const69 uint32_t queueIndex() const { return fQueueIndex; } cmdPool() const70 GrVkCommandPool* cmdPool() const { return fMainCmdPool; } physicalDeviceProperties() const71 const VkPhysicalDeviceProperties& physicalDeviceProperties() const { 72 return fPhysDevProps; 73 } physicalDeviceMemoryProperties() const74 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const { 75 return fPhysDevMemProps; 76 } protectedContext() const77 bool protectedContext() const { return fProtectedContext == GrProtected::kYes; } 78 resourceProvider()79 GrVkResourceProvider& resourceProvider() { return fResourceProvider; } 80 currentCommandBuffer() const81 GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; } 82 83 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override; 84 85 bool setBackendTextureState(const GrBackendTexture&, 86 const GrBackendSurfaceMutableState&, 87 GrBackendSurfaceMutableState* previousState, 88 sk_sp<GrRefCntedCallback> finishedCallback) override; 89 90 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 91 const GrBackendSurfaceMutableState&, 92 GrBackendSurfaceMutableState* previousState, 93 sk_sp<GrRefCntedCallback> finishedCallback) override; 94 95 void deleteBackendTexture(const GrBackendTexture&) override; 96 97 bool compile(const GrProgramDesc&, const GrProgramInfo&) override; 98 99 static void AsyncFreeVMAMemoryBetweenFrames(std::function<bool(void)> nextFrameHasArrived); 100 101 #if GR_TEST_UTILS 102 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; 103 104 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions, 105 GrColorType, 106 int sampleCnt, 107 GrProtected) override; 108 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; 109 110 void resetShaderCacheForTesting() const override { 111 fResourceProvider.resetShaderCacheForTesting(); 112 } 113 #endif 114 115 sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, 116 SkISize dimensions, int numStencilSamples) override; 117 118 GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override { 119 return GrBackendFormat::MakeVk(this->vkCaps().preferredStencilFormat()); 120 } 121 122 sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions, 123 const GrBackendFormat& format, 124 int numSamples, 125 GrProtected isProtected, 126 GrMemoryless isMemoryless) override; 127 128 void addBufferMemoryBarrier(const GrManagedResource*, 129 VkPipelineStageFlags srcStageMask, 130 VkPipelineStageFlags dstStageMask, 131 bool byRegion, 132 VkBufferMemoryBarrier* barrier) const; 133 void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 134 VkPipelineStageFlags dstStageMask, 135 bool byRegion, 136 VkBufferMemoryBarrier* barrier) const; 137 void addImageMemoryBarrier(const GrManagedResource*, 138 VkPipelineStageFlags srcStageMask, 139 VkPipelineStageFlags dstStageMask, 140 bool byRegion, 141 VkImageMemoryBarrier* barrier) const; 142 143 bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, 144 const GrVkRenderPass& renderPass, 145 GrAttachment* dst, 146 GrVkImage* src, 147 const SkIRect& srcRect); 148 149 bool onRegenerateMipMapLevels(GrTexture* tex) override; 150 151 void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override; 152 153 void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>); 154 155 void submit(GrOpsRenderPass*) override; 156 157 GrFence SK_WARN_UNUSED_RESULT insertFence() override; 158 bool waitFence(GrFence) override; 159 void deleteFence(GrFence) const override; 160 161 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override; 162 std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&, 163 GrSemaphoreWrapType, 164 GrWrapOwnership) override; 165 void insertSemaphore(GrSemaphore* semaphore) override; 166 void waitSemaphore(GrSemaphore* semaphore) override; 167 168 // These match the definitions in SkDrawable, from whence they came 169 typedef void* SubmitContext; 170 typedef void (*SubmitProc)(SubmitContext submitContext); 171 172 // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary 173 // command buffer to the gpu. 174 void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable); 175 176 void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); } 177 void finishOutstandingGpuWork() override; 178 179 std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override; 180 181 void copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, sk_sp<GrGpuBuffer> dstBuffer, 182 VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size); 183 bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset, 184 VkDeviceSize size); 185 186 enum PersistentCacheKeyType : uint32_t { 187 kShader_PersistentCacheKeyType = 0, 188 kPipelineCache_PersistentCacheKeyType = 1, 189 }; 190 191 void storeVkPipelineCacheData() override; 192 193 bool beginRenderPass(const GrVkRenderPass*, 194 sk_sp<const GrVkFramebuffer>, 195 const VkClearValue* colorClear, 196 const GrSurface*, 197 const SkIRect& renderPassBounds, 198 bool forSecondaryCB); 199 void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds); 200 201 // Returns true if VkResult indicates success and also checks for device lost or OOM. Every 202 // Vulkan call (and GrVkMemoryAllocator call that returns VkResult) made on behalf of the 203 // GrVkGpu should be processed by this function so that we respond to OOMs and lost devices. 204 bool checkVkResult(VkResult); 205 206 std::array<int, 2> GetHpsDimension(const SkBlurArg& blurArg) const override; 207 208 void vmaDefragment() override { fMemoryAllocatorCacheImage->vmaDefragment(); } 209 void dumpVmaStats(SkString *out) override; 210 211 // OH ISSUE: asyn memory reclaimer 212 void setGpuMemoryAsyncReclaimerSwitch(bool enabled) override; 213 void flushGpuMemoryInWaitQueue() override; memoryReclaimer() const214 GrVkMemoryReclaimer* memoryReclaimer() const { return fMemoryReclaimer.get(); } 215 216 #ifdef SKIA_DFX_FOR_OHOS 217 void addAllocImageBytes(size_t bytes); 218 void removeAllocImageBytes(size_t bytes); 219 void addAllocBufferBytes(size_t bytes); 220 void removeAllocBufferBytes(size_t bytes); 221 #endif 222 223 private: 224 enum SyncQueue { 225 kForce_SyncQueue, 226 kSkip_SyncQueue 227 }; 228 229 GrVkGpu(GrDirectContext*, const GrVkBackendContext&, const sk_sp<GrVkCaps> caps, 230 sk_sp<const GrVkInterface>, uint32_t instanceVersion, uint32_t physicalDeviceVersion, 231 sk_sp<GrVkMemoryAllocator>, sk_sp<GrVkMemoryAllocator>); 232 233 void destroyResources(); 234 235 GrBackendTexture onCreateBackendTexture(SkISize dimensions, 236 const GrBackendFormat&, 237 GrRenderable, 238 GrMipmapped, 239 GrProtected) override; 240 GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, 241 const GrBackendFormat&, 242 GrMipmapped, 243 GrProtected) override; 244 245 bool onClearBackendTexture(const GrBackendTexture&, 246 sk_sp<GrRefCntedCallback> finishedCallback, 247 std::array<float, 4> color) override; 248 249 bool onUpdateCompressedBackendTexture(const GrBackendTexture&, 250 sk_sp<GrRefCntedCallback> finishedCallback, 251 const void* data, 252 size_t length) override; 253 254 bool setBackendSurfaceState(GrVkImageInfo info, 255 sk_sp<GrBackendSurfaceMutableStateImpl> currentState, 256 SkISize dimensions, 257 const GrVkSharedImageInfo& newInfo, 258 GrBackendSurfaceMutableState* previousState, 259 sk_sp<GrRefCntedCallback> finishedCallback); 260 261 sk_sp<GrTexture> onCreateTexture(SkISize, 262 const GrBackendFormat&, 263 GrRenderable, 264 int renderTargetSampleCnt, 265 SkBudgeted, 266 GrProtected, 267 int mipLevelCount, 268 uint32_t levelClearMask) override; 269 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 270 const GrBackendFormat&, 271 SkBudgeted, 272 GrMipmapped, 273 GrProtected, 274 const void* data, size_t dataSize) override; 275 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 276 const GrBackendFormat&, 277 SkBudgeted, 278 GrMipmapped, 279 GrProtected, 280 OH_NativeBuffer* nativeBuffer, 281 size_t bufferSize) override; 282 283 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, 284 GrWrapOwnership, 285 GrWrapCacheable, 286 GrIOType) override; 287 sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, 288 GrWrapOwnership, 289 GrWrapCacheable) override; 290 sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 291 int sampleCnt, 292 GrWrapOwnership, 293 GrWrapCacheable) override; 294 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; 295 296 sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 297 const GrVkDrawableInfo&) override; 298 299 sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern, 300 const void* data) override; 301 302 bool onReadPixels(GrSurface*, 303 SkIRect, 304 GrColorType surfaceColorType, 305 GrColorType dstColorType, 306 void* buffer, 307 size_t rowBytes) override; 308 309 bool onWritePixels(GrSurface*, 310 SkIRect, 311 GrColorType surfaceColorType, 312 GrColorType srcColorType, 313 const GrMipLevel[], 314 int mipLevelCount, 315 bool prepForTexSampling) override; 316 317 bool onTransferPixelsTo(GrTexture*, 318 SkIRect, 319 GrColorType textureColorType, 320 GrColorType bufferColorType, 321 sk_sp<GrGpuBuffer>, 322 size_t offset, 323 size_t rowBytes) override; 324 325 bool onTransferPixelsFrom(GrSurface*, 326 SkIRect, 327 GrColorType surfaceColorType, 328 GrColorType bufferColorType, 329 sk_sp<GrGpuBuffer>, 330 size_t offset) override; 331 332 bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 333 const SkIPoint& dstPoint) override; 334 335 void addFinishedProc(GrGpuFinishedProc finishedProc, 336 GrGpuFinishedContext finishedContext) override; 337 338 void addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback); 339 340 GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*, 341 bool useMSAASurface, 342 GrAttachment* stencil, 343 GrSurfaceOrigin, 344 const SkIRect&, 345 const GrOpsRenderPass::LoadAndStoreInfo&, 346 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 347 const SkTArray<GrSurfaceProxy*, true>& sampledProxies, 348 GrXferBarrierFlags renderPassXferBarriers) override; 349 350 void prepareSurfacesForBackendAccessAndStateUpdates( 351 SkSpan<GrSurfaceProxy*> proxies, 352 SkSurface::BackendSurfaceAccess access, 353 const GrBackendSurfaceMutableState* newState) override; 354 355 bool onSubmitToGpu(bool syncCpu) override; 356 357 void onReportSubmitHistograms() override; 358 359 // Ends and submits the current command buffer to the queue and then creates a new command 360 // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all 361 // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in 362 // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command 363 // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those 364 // wait semaphores to the submission of this command buffer. 365 bool submitCommandBuffer(SyncQueue sync); 366 367 void copySurfaceAsCopyImage(GrSurface* dst, 368 GrSurface* src, 369 GrVkImage* dstImage, 370 GrVkImage* srcImage, 371 const SkIRect& srcRect, 372 const SkIPoint& dstPoint); 373 374 void copySurfaceAsBlit(GrSurface* dst, 375 GrSurface* src, 376 GrVkImage* dstImage, 377 GrVkImage* srcImage, 378 const SkIRect& srcRect, 379 const SkIPoint& dstPoint); 380 381 void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 382 const SkIPoint& dstPoint); 383 384 // helpers for onCreateTexture and writeTexturePixels 385 bool uploadTexDataLinear(GrVkImage* tex, 386 SkIRect rect, 387 GrColorType colorType, 388 const void* data, 389 size_t rowBytes); 390 bool uploadTexDataOptimal(GrVkImage* tex, 391 SkIRect rect, 392 GrColorType colorType, 393 const GrMipLevel texels[], 394 int mipLevelCount); 395 bool uploadTexDataCompressed(GrVkImage* tex, SkImage::CompressionType compression, 396 VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, 397 const void* data, size_t dataSize); 398 bool uploadTexDataCompressed(GrVkImage* tex, SkImage::CompressionType compression, 399 VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, 400 OH_NativeBuffer* nativeBuffer, size_t bufferSize); 401 402 void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, 403 const SkIPoint& dstPoint); 404 405 bool createVkImageForBackendSurface(VkFormat, 406 SkISize dimensions, 407 int sampleCnt, 408 GrTexturable, 409 GrRenderable, 410 GrMipmapped, 411 GrVkImageInfo*, 412 GrProtected); 413 414 sk_sp<const GrVkInterface> fInterface; 415 sk_sp<GrVkMemoryAllocator> fMemoryAllocator; 416 sk_sp<GrVkMemoryAllocator> fMemoryAllocatorCacheImage; 417 sk_sp<GrVkCaps> fVkCaps; 418 bool fDeviceIsLost = false; 419 420 VkPhysicalDevice fPhysicalDevice; 421 VkDevice fDevice; 422 VkQueue fQueue; // Must be Graphics queue 423 uint32_t fQueueIndex; 424 425 // Created by GrVkGpu 426 GrVkResourceProvider fResourceProvider; 427 GrStagingBufferManager fStagingBufferManager; 428 429 GrVkMSAALoadManager fMSAALoadManager; 430 431 GrVkCommandPool* fMainCmdPool; 432 // just a raw pointer; object's lifespan is managed by fCmdPool 433 GrVkPrimaryCommandBuffer* fMainCmdBuffer; 434 435 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; 436 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; 437 438 SkTArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables; 439 440 VkPhysicalDeviceProperties fPhysDevProps; 441 VkPhysicalDeviceMemoryProperties fPhysDevMemProps; 442 443 // We need a bool to track whether or not we've already disconnected all the gpu resources from 444 // vulkan context. 445 bool fDisconnected; 446 447 GrProtected fProtectedContext; 448 449 std::unique_ptr<GrVkOpsRenderPass> fCachedOpsRenderPass; 450 451 std::unique_ptr<GrVkMemoryReclaimer> fMemoryReclaimer; 452 453 using INHERITED = GrGpu; 454 }; 455 456 #endif 457