1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "include/gpu/GrDirectContext.h"
10
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkTaskGroup.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/GrBackendUtils.h"
18 #include "src/gpu/GrClientMappedBufferManager.h"
19 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrDrawingManager.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrResourceProvider.h"
24 #include "src/gpu/GrSemaphore.h"
25 #include "src/gpu/GrShaderUtils.h"
26 #include "src/gpu/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/SurfaceContext.h"
28 #include "src/gpu/effects/GrSkSLFP.h"
29 #include "src/gpu/mock/GrMockGpu.h"
30 #include "src/gpu/text/GrAtlasManager.h"
31 #include "src/gpu/text/GrStrikeCache.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #if SK_GPU_V1
34 #include "src/gpu/ops/SmallPathAtlasMgr.h"
35 #else
36 // A vestigial definition for v2 that will never be instantiated
37 namespace skgpu::v1 {
38 class SmallPathAtlasMgr {
39 public:
SmallPathAtlasMgr()40 SmallPathAtlasMgr() { SkASSERT(0); }
reset()41 void reset() { SkASSERT(0); }
42 };
43 }
44 #endif
45 #ifdef SK_GL
46 #include "src/gpu/gl/GrGLGpu.h"
47 #endif
48 #ifdef SK_METAL
49 #include "include/gpu/mtl/GrMtlBackendContext.h"
50 #include "src/gpu/mtl/GrMtlTrampoline.h"
51 #endif
52 #ifdef SK_VULKAN
53 #include "src/gpu/vk/GrVkGpu.h"
54 #endif
55 #ifdef SK_DIRECT3D
56 #include "src/gpu/d3d/GrD3DGpu.h"
57 #endif
58 #ifdef SK_DAWN
59 #include "src/gpu/dawn/GrDawnGpu.h"
60 #endif
61 #include <memory>
62
63 #if GR_TEST_UTILS
64 # include "include/utils/SkRandom.h"
65 # if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
66 # include <sanitizer/lsan_interface.h>
67 # endif
68 #endif
69
70 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
71
Next()72 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
73 static std::atomic<uint32_t> nextID{1};
74 uint32_t id;
75 do {
76 id = nextID.fetch_add(1, std::memory_order_relaxed);
77 } while (id == SK_InvalidUniqueID);
78 return DirectContextID(id);
79 }
80
GrDirectContext(GrBackendApi backend, const GrContextOptions& options)81 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
82 : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
83 , fDirectContextID(DirectContextID::Next()) {
84 }
85
~GrDirectContext()86 GrDirectContext::~GrDirectContext() {
87 ASSERT_SINGLE_OWNER
88 // this if-test protects against the case where the context is being destroyed
89 // before having been fully created
90 if (fGpu) {
91 this->flushAndSubmit();
92 }
93
94 // We need to make sure all work is finished on the gpu before we start releasing resources.
95 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
96
97 this->destroyDrawingManager();
98
99 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
100 if (fResourceCache) {
101 fResourceCache->releaseAll();
102 }
103 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
104 // async pixel result don't try to destroy buffers off thread.
105 fMappedBufferManager.reset();
106 }
107
threadSafeProxy()108 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
109 return INHERITED::threadSafeProxy();
110 }
111
resetGLTextureBindings()112 void GrDirectContext::resetGLTextureBindings() {
113 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
114 return;
115 }
116 fGpu->resetTextureBindings();
117 }
118
resetContext(uint32_t state)119 void GrDirectContext::resetContext(uint32_t state) {
120 ASSERT_SINGLE_OWNER
121 fGpu->markContextDirty(state);
122 }
123
abandonContext()124 void GrDirectContext::abandonContext() {
125 if (INHERITED::abandoned()) {
126 return;
127 }
128
129 INHERITED::abandonContext();
130
131 // We need to make sure all work is finished on the gpu before we start releasing resources.
132 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
133
134 fStrikeCache->freeAll();
135
136 fMappedBufferManager->abandon();
137
138 fResourceProvider->abandon();
139
140 // abandon first so destructors don't try to free the resources in the API.
141 fResourceCache->abandonAll();
142
143 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
144
145 // Must be after GrResourceCache::abandonAll().
146 fMappedBufferManager.reset();
147
148 if (fSmallPathAtlasMgr) {
149 fSmallPathAtlasMgr->reset();
150 }
151 fAtlasManager->freeAll();
152 }
153
abandoned()154 bool GrDirectContext::abandoned() {
155 if (INHERITED::abandoned()) {
156 return true;
157 }
158
159 if (fGpu && fGpu->isDeviceLost()) {
160 this->abandonContext();
161 return true;
162 }
163 return false;
164 }
165
oomed()166 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
167
releaseResourcesAndAbandonContext()168 void GrDirectContext::releaseResourcesAndAbandonContext() {
169 if (INHERITED::abandoned()) {
170 return;
171 }
172
173 INHERITED::abandonContext();
174
175 // We need to make sure all work is finished on the gpu before we start releasing resources.
176 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
177
178 fResourceProvider->abandon();
179
180 // Release all resources in the backend 3D API.
181 fResourceCache->releaseAll();
182
183 // Must be after GrResourceCache::releaseAll().
184 fMappedBufferManager.reset();
185
186 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
187 if (fSmallPathAtlasMgr) {
188 fSmallPathAtlasMgr->reset();
189 }
190 fAtlasManager->freeAll();
191 }
192
freeGpuResources()193 void GrDirectContext::freeGpuResources() {
194 ASSERT_SINGLE_OWNER
195
196 if (this->abandoned()) {
197 return;
198 }
199
200 this->flushAndSubmit();
201 if (fSmallPathAtlasMgr) {
202 fSmallPathAtlasMgr->reset();
203 }
204 fAtlasManager->freeAll();
205 // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
206 // place to purge blobs.
207 this->getTextBlobCache()->freeAll();
208 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
209 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
210 fStrikeCache->freeAll();
211
212 this->drawingManager()->freeGpuResources();
213
214 fResourceCache->purgeUnlockedResources();
215 }
216
init()217 bool GrDirectContext::init() {
218 ASSERT_SINGLE_OWNER
219 if (!fGpu) {
220 return false;
221 }
222
223 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
224 if (!INHERITED::init()) {
225 return false;
226 }
227
228 SkASSERT(this->getTextBlobCache());
229 SkASSERT(this->threadSafeCache());
230
231 fStrikeCache = std::make_unique<GrStrikeCache>();
232 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
233 this->directContextID(),
234 this->contextID());
235 fResourceCache->setProxyProvider(this->proxyProvider());
236 fResourceCache->setThreadSafeCache(this->threadSafeCache());
237 #if GR_TEST_UTILS
238 if (this->options().fResourceCacheLimitOverride != -1) {
239 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
240 }
241 #endif
242 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
243 this->singleOwner());
244 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
245
246 fDidTestPMConversions = false;
247
248 // DDL TODO: we need to think through how the task group & persistent cache
249 // get passed on to/shared between all the DDLRecorders created with this context.
250 if (this->options().fExecutor) {
251 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
252 }
253
254 fPersistentCache = this->options().fPersistentCache;
255
256 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
257 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
258 // multitexturing supported only if range can represent the index + texcoords fully
259 !(this->caps()->shaderCaps()->floatIs32Bits() ||
260 this->caps()->shaderCaps()->integerSupport())) {
261 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
262 } else {
263 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
264 }
265
266 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
267
268 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
269 this->options().fGlyphCacheTextureMaximumBytes,
270 allowMultitexturing);
271 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
272
273 return true;
274 }
275
getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const276 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
277 ASSERT_SINGLE_OWNER
278
279 if (resourceCount) {
280 *resourceCount = fResourceCache->getBudgetedResourceCount();
281 }
282 if (resourceBytes) {
283 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
284 }
285 }
286
getResourceCachePurgeableBytes() const287 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
288 ASSERT_SINGLE_OWNER
289 return fResourceCache->getPurgeableBytes();
290 }
291
getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const292 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
293 ASSERT_SINGLE_OWNER
294 if (maxResources) {
295 *maxResources = -1;
296 }
297 if (maxResourceBytes) {
298 *maxResourceBytes = this->getResourceCacheLimit();
299 }
300 }
301
getResourceCacheLimit() const302 size_t GrDirectContext::getResourceCacheLimit() const {
303 ASSERT_SINGLE_OWNER
304 return fResourceCache->getMaxResourceBytes();
305 }
306
setResourceCacheLimits(int unused, size_t maxResourceBytes)307 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
308 ASSERT_SINGLE_OWNER
309 this->setResourceCacheLimit(maxResourceBytes);
310 }
311
setResourceCacheLimit(size_t maxResourceBytes)312 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
313 ASSERT_SINGLE_OWNER
314 fResourceCache->setLimit(maxResourceBytes);
315 }
316
purgeUnlockedResources(bool scratchResourcesOnly)317 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
318 ASSERT_SINGLE_OWNER
319
320 if (this->abandoned()) {
321 return;
322 }
323
324 fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
325 fResourceCache->purgeAsNeeded();
326
327 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
328 // place to purge stale blobs
329 this->getTextBlobCache()->purgeStaleBlobs();
330
331 fGpu->releaseUnlockedBackendObjects();
332 }
333
purgeUnlockAndSafeCacheGpuResources()334 void GrDirectContext::purgeUnlockAndSafeCacheGpuResources() {
335 ASSERT_SINGLE_OWNER
336
337 if (this->abandoned()) {
338 return;
339 }
340
341 fResourceCache->purgeUnlockAndSafeCacheGpuResources();
342 fResourceCache->purgeAsNeeded();
343
344 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
345 // place to purge stale blobs
346 this->getTextBlobCache()->purgeStaleBlobs();
347
348 fGpu->releaseUnlockedBackendObjects();
349 }
350
CalcHpsBluredImageDimension(const SkBlurArg& blurArg)351 std::array<int, 2> GrDirectContext::CalcHpsBluredImageDimension(const SkBlurArg& blurArg) {
352 return fGpu->GetHpsDimension(blurArg);
353 }
354
purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag)355 void GrDirectContext::purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag) {
356 ASSERT_SINGLE_OWNER
357 fResourceCache->purgeUnlockedResourcesByTag(scratchResourceseOnly, tag);
358 fResourceCache->purgeAsNeeded();
359
360 // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
361 // place to purge stale blobs
362 this->getTextBlobCache()->purgeStaleBlobs();
363 }
364
purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet)365 void GrDirectContext::purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet) {
366 ASSERT_SINGLE_OWNER
367 fResourceCache->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
368 fResourceCache->purgeAsNeeded();
369
370 // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
371 // place to purge blobs.
372 this->getTextBlobCache()->freeAll();
373 // The StrikeCache indirectly references typeface, and in order to dereference the typeface,
374 // it is necessary to clear the StrikeCache when the application exits.
375 fStrikeCache->freeAll();
376 }
377
purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet, const std::set<int>& protectedPidSet)378 void GrDirectContext::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
379 const std::set<int>& protectedPidSet)
380 {
381 ASSERT_SINGLE_OWNER
382
383 if (this->abandoned()) {
384 return;
385 }
386
387 fResourceCache->purgeCacheBetweenFrames(scratchResourcesOnly, exitedPidSet, protectedPidSet);
388 }
389
performDeferredCleanup(std::chrono::milliseconds msNotUsed, bool scratchResourcesOnly)390 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
391 bool scratchResourcesOnly) {
392 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
393
394 ASSERT_SINGLE_OWNER
395
396 if (this->abandoned()) {
397 return;
398 }
399
400 this->checkAsyncWorkCompletion();
401 fMappedBufferManager->process();
402 auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
403
404 fResourceCache->purgeAsNeeded();
405 fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
406
407 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
408 // place to purge stale blobs
409 this->getTextBlobCache()->purgeStaleBlobs();
410 }
411
purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources)412 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
413 ASSERT_SINGLE_OWNER
414
415 if (this->abandoned()) {
416 return;
417 }
418
419 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
420 }
421
422 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[], bool deleteSemaphoresAfterWait)423 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
424 bool deleteSemaphoresAfterWait) {
425 if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
426 return false;
427 }
428 GrWrapOwnership ownership =
429 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
430 for (int i = 0; i < numSemaphores; ++i) {
431 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
432 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
433 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
434 // to begin with. Therefore, it is fine to not wait on it.
435 if (sema) {
436 fGpu->waitSemaphore(sema.get());
437 }
438 }
439 return true;
440 }
441
onGetSmallPathAtlasMgr()442 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
443 #if SK_GPU_V1
444 if (!fSmallPathAtlasMgr) {
445 fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
446
447 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
448 }
449
450 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
451 return nullptr;
452 }
453 #endif
454
455 return fSmallPathAtlasMgr.get();
456 }
457
458 ////////////////////////////////////////////////////////////////////////////////
459
flush(const GrFlushInfo& info)460 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
461 ASSERT_SINGLE_OWNER
462 if (this->abandoned()) {
463 if (info.fFinishedProc) {
464 info.fFinishedProc(info.fFinishedContext);
465 }
466 if (info.fSubmittedProc) {
467 info.fSubmittedProc(info.fSubmittedContext, false);
468 }
469 return GrSemaphoresSubmitted::kNo;
470 }
471
472 return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
473 info, nullptr);
474 }
475
submit(bool syncCpu)476 bool GrDirectContext::submit(bool syncCpu) {
477 ASSERT_SINGLE_OWNER
478 if (this->abandoned()) {
479 return false;
480 }
481
482 if (!fGpu) {
483 return false;
484 }
485
486 return fGpu->submitToGpu(syncCpu);
487 }
488
489 ////////////////////////////////////////////////////////////////////////////////
490
checkAsyncWorkCompletion()491 void GrDirectContext::checkAsyncWorkCompletion() {
492 if (fGpu) {
493 fGpu->checkFinishProcs();
494 }
495 }
496
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)497 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
498 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
499 fGpu->finishOutstandingGpuWork();
500 this->checkAsyncWorkCompletion();
501 }
502 }
503
504 ////////////////////////////////////////////////////////////////////////////////
505
storeVkPipelineCacheData()506 void GrDirectContext::storeVkPipelineCacheData() {
507 if (fGpu) {
508 fGpu->storeVkPipelineCacheData();
509 }
510 }
511
512 ////////////////////////////////////////////////////////////////////////////////
513
supportsDistanceFieldText() const514 bool GrDirectContext::supportsDistanceFieldText() const {
515 return this->caps()->shaderCaps()->supportsDistanceFieldText();
516 }
517
518 //////////////////////////////////////////////////////////////////////////////
519
dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const520 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
521 ASSERT_SINGLE_OWNER
522 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
523 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
524 this->getTextBlobCache()->usedBytes());
525 }
526
dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const527 void GrDirectContext::dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
528 ASSERT_SINGLE_OWNER
529 fResourceCache->dumpMemoryStatistics(traceMemoryDump, tag);
530 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
531 this->getTextBlobCache()->usedBytes());
532 }
533
setCurrentGrResourceTag(const GrGpuResourceTag& tag)534 void GrDirectContext::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
535 if (fResourceCache) {
536 return fResourceCache->setCurrentGrResourceTag(tag);
537 }
538 }
539
popGrResourceTag()540 void GrDirectContext::popGrResourceTag()
541 {
542 if (fResourceCache) {
543 return fResourceCache->popGrResourceTag();
544 }
545 }
546
getCurrentGrResourceTag() const547 GrGpuResourceTag GrDirectContext::getCurrentGrResourceTag() const {
548 if (fResourceCache) {
549 return fResourceCache->getCurrentGrResourceTag();
550 }
551 return {};
552 }
releaseByTag(const GrGpuResourceTag& tag)553 void GrDirectContext::releaseByTag(const GrGpuResourceTag& tag) {
554 if (fResourceCache) {
555 fResourceCache->releaseByTag(tag);
556 }
557 }
getAllGrGpuResourceTags() const558 std::set<GrGpuResourceTag> GrDirectContext::getAllGrGpuResourceTags() const {
559 if (fResourceCache) {
560 return fResourceCache->getAllGrGpuResourceTags();
561 }
562 return {};
563 }
564
565 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)566 void GrDirectContext::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
567 {
568 if (fResourceCache) {
569 fResourceCache->getUpdatedMemoryMap(out);
570 }
571 }
572
573 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)574 void GrDirectContext::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
575 {
576 if (fResourceCache) {
577 fResourceCache->initGpuMemoryLimit(callback, size);
578 }
579 }
580
581 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const582 bool GrDirectContext::isPidAbnormal() const
583 {
584 if (fResourceCache) {
585 return fResourceCache->isPidAbnormal();
586 }
587 return false;
588 }
589
vmaDefragment()590 void GrDirectContext::vmaDefragment()
591 {
592 if (fGpu) {
593 fGpu->vmaDefragment();
594 }
595 }
596
dumpVmaStats(SkString *out)597 void GrDirectContext::dumpVmaStats(SkString *out)
598 {
599 if (out == nullptr) {
600 return;
601 }
602 if (fGpu) {
603 fGpu->dumpVmaStats(out);
604 }
605 }
606
607 // OH ISSUE: intra frame and inter frame identification
beginFrame()608 void GrDirectContext::beginFrame()
609 {
610 #ifdef SK_VULKAN
611 if (fResourceCache) {
612 fResourceCache->beginFrame();
613 }
614 #endif
615 }
616
617 // OH ISSUE: intra frame and inter frame identification
endFrame()618 void GrDirectContext::endFrame()
619 {
620 #ifdef SK_VULKAN
621 if (fResourceCache) {
622 fResourceCache->endFrame();
623 }
624 #endif
625 }
626
627 // OH ISSUE: asyn memory reclaimer
setGpuMemoryAsyncReclaimerSwitch(bool enabled)628 void GrDirectContext::setGpuMemoryAsyncReclaimerSwitch(bool enabled)
629 {
630 #ifdef SK_VULKAN
631 if (fGpu) {
632 fGpu->setGpuMemoryAsyncReclaimerSwitch(enabled);
633 }
634 #endif
635 }
636
637 // OH ISSUE: asyn memory reclaimer
flushGpuMemoryInWaitQueue()638 void GrDirectContext::flushGpuMemoryInWaitQueue()
639 {
640 #ifdef SK_VULKAN
641 if (fGpu) {
642 fGpu->flushGpuMemoryInWaitQueue();
643 }
644 #endif
645 }
646
647 // OH ISSUE: suppress release window
setGpuCacheSuppressWindowSwitch(bool enabled)648 void GrDirectContext::setGpuCacheSuppressWindowSwitch(bool enabled)
649 {
650 #ifdef SK_VULKAN
651 ASSERT_SINGLE_OWNER
652
653 if (this->abandoned()) {
654 return;
655 }
656
657 fResourceCache->setGpuCacheSuppressWindowSwitch(enabled);
658 #endif
659 }
660
661 // OH ISSUE: suppress release window
suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)662 void GrDirectContext::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)
663 {
664 #ifdef SK_VULKAN
665 ASSERT_SINGLE_OWNER
666
667 if (this->abandoned()) {
668 return;
669 }
670
671 fResourceCache->suppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
672 #endif
673 }
674
createBackendTexture(int width, int height, const GrBackendFormat& backendFormat, GrMipmapped mipMapped, GrRenderable renderable, GrProtected isProtected)675 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
676 const GrBackendFormat& backendFormat,
677 GrMipmapped mipMapped,
678 GrRenderable renderable,
679 GrProtected isProtected) {
680 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
681 if (this->abandoned()) {
682 return GrBackendTexture();
683 }
684
685 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
686 mipMapped, isProtected);
687 }
688
createBackendTexture(int width, int height, SkColorType skColorType, GrMipmapped mipMapped, GrRenderable renderable, GrProtected isProtected)689 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
690 SkColorType skColorType,
691 GrMipmapped mipMapped,
692 GrRenderable renderable,
693 GrProtected isProtected) {
694 if (this->abandoned()) {
695 return GrBackendTexture();
696 }
697
698 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
699
700 return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
701 }
702
create_and_clear_backend_texture(GrDirectContext* dContext, SkISize dimensions, const GrBackendFormat& backendFormat, GrMipmapped mipMapped, GrRenderable renderable, GrProtected isProtected, sk_sp<GrRefCntedCallback> finishedCallback, std::array<float, 4> color)703 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
704 SkISize dimensions,
705 const GrBackendFormat& backendFormat,
706 GrMipmapped mipMapped,
707 GrRenderable renderable,
708 GrProtected isProtected,
709 sk_sp<GrRefCntedCallback> finishedCallback,
710 std::array<float, 4> color) {
711 GrGpu* gpu = dContext->priv().getGpu();
712 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
713 mipMapped, isProtected);
714 if (!beTex.isValid()) {
715 return {};
716 }
717
718 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
719 std::move(finishedCallback),
720 color)) {
721 dContext->deleteBackendTexture(beTex);
722 return {};
723 }
724 return beTex;
725 }
726
update_texture_with_pixmaps(GrDirectContext* context, const SkPixmap src[], int numLevels, const GrBackendTexture& backendTexture, GrSurfaceOrigin textureOrigin, sk_sp<GrRefCntedCallback> finishedCallback)727 static bool update_texture_with_pixmaps(GrDirectContext* context,
728 const SkPixmap src[],
729 int numLevels,
730 const GrBackendTexture& backendTexture,
731 GrSurfaceOrigin textureOrigin,
732 sk_sp<GrRefCntedCallback> finishedCallback) {
733 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
734 const GrBackendFormat& format = backendTexture.getBackendFormat();
735
736 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
737 return false;
738 }
739
740 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
741 kBorrow_GrWrapOwnership,
742 GrWrapCacheable::kNo,
743 kRW_GrIOType,
744 std::move(finishedCallback));
745 if (!proxy) {
746 return false;
747 }
748
749 GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
750 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
751 skgpu::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
752 SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
753 for (int i = 0; i < numLevels; ++i) {
754 tmpSrc[i] = src[i];
755 }
756 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
757 return false;
758 }
759
760 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
761 GrFlushInfo info;
762 context->priv().drawingManager()->flushSurfaces({&p, 1},
763 SkSurface::BackendSurfaceAccess::kNoAccess,
764 info,
765 nullptr);
766 return true;
767 }
768
createBackendTexture(int width, int height, const GrBackendFormat& backendFormat, const SkColor4f& color, GrMipmapped mipMapped, GrRenderable renderable, GrProtected isProtected, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)769 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
770 const GrBackendFormat& backendFormat,
771 const SkColor4f& color,
772 GrMipmapped mipMapped,
773 GrRenderable renderable,
774 GrProtected isProtected,
775 GrGpuFinishedProc finishedProc,
776 GrGpuFinishedContext finishedContext) {
777 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
778
779 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
780 if (this->abandoned()) {
781 return {};
782 }
783
784 return create_and_clear_backend_texture(this,
785 {width, height},
786 backendFormat,
787 mipMapped,
788 renderable,
789 isProtected,
790 std::move(finishedCallback),
791 color.array());
792 }
793
createBackendTexture(int width, int height, SkColorType skColorType, const SkColor4f& color, GrMipmapped mipMapped, GrRenderable renderable, GrProtected isProtected, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)794 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
795 SkColorType skColorType,
796 const SkColor4f& color,
797 GrMipmapped mipMapped,
798 GrRenderable renderable,
799 GrProtected isProtected,
800 GrGpuFinishedProc finishedProc,
801 GrGpuFinishedContext finishedContext) {
802 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
803
804 if (this->abandoned()) {
805 return {};
806 }
807
808 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
809 if (!format.isValid()) {
810 return {};
811 }
812
813 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
814 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
815
816 return create_and_clear_backend_texture(this,
817 {width, height},
818 format,
819 mipMapped,
820 renderable,
821 isProtected,
822 std::move(finishedCallback),
823 swizzledColor.array());
824 }
825
createBackendTexture(const SkPixmap srcData[], int numProvidedLevels, GrSurfaceOrigin textureOrigin, GrRenderable renderable, GrProtected isProtected, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)826 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
827 int numProvidedLevels,
828 GrSurfaceOrigin textureOrigin,
829 GrRenderable renderable,
830 GrProtected isProtected,
831 GrGpuFinishedProc finishedProc,
832 GrGpuFinishedContext finishedContext) {
833 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
834
835 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
836
837 if (this->abandoned()) {
838 return {};
839 }
840
841 if (!srcData || numProvidedLevels <= 0) {
842 return {};
843 }
844
845 SkColorType colorType = srcData[0].colorType();
846
847 GrMipmapped mipMapped = GrMipmapped::kNo;
848 if (numProvidedLevels > 1) {
849 mipMapped = GrMipmapped::kYes;
850 }
851
852 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
853 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
854 srcData[0].height(),
855 backendFormat,
856 mipMapped,
857 renderable,
858 isProtected);
859 if (!beTex.isValid()) {
860 return {};
861 }
862 if (!update_texture_with_pixmaps(this,
863 srcData,
864 numProvidedLevels,
865 beTex,
866 textureOrigin,
867 std::move(finishedCallback))) {
868 this->deleteBackendTexture(beTex);
869 return {};
870 }
871 return beTex;
872 }
873
updateBackendTexture(const GrBackendTexture& backendTexture, const SkColor4f& color, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)874 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
875 const SkColor4f& color,
876 GrGpuFinishedProc finishedProc,
877 GrGpuFinishedContext finishedContext) {
878 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
879
880 if (this->abandoned()) {
881 return false;
882 }
883
884 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
885 }
886
updateBackendTexture(const GrBackendTexture& backendTexture, SkColorType skColorType, const SkColor4f& color, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)887 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
888 SkColorType skColorType,
889 const SkColor4f& color,
890 GrGpuFinishedProc finishedProc,
891 GrGpuFinishedContext finishedContext) {
892 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
893
894 if (this->abandoned()) {
895 return false;
896 }
897
898 GrBackendFormat format = backendTexture.getBackendFormat();
899 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
900
901 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
902 return false;
903 }
904
905 GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
906 SkColor4f swizzledColor = swizzle.applyTo(color);
907
908 return fGpu->clearBackendTexture(backendTexture,
909 std::move(finishedCallback),
910 swizzledColor.array());
911 }
912
updateBackendTexture(const GrBackendTexture& backendTexture, const SkPixmap srcData[], int numLevels, GrSurfaceOrigin textureOrigin, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)913 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
914 const SkPixmap srcData[],
915 int numLevels,
916 GrSurfaceOrigin textureOrigin,
917 GrGpuFinishedProc finishedProc,
918 GrGpuFinishedContext finishedContext) {
919 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
920
921 if (this->abandoned()) {
922 return false;
923 }
924
925 if (!srcData || numLevels <= 0) {
926 return false;
927 }
928
929 // If the texture has MIP levels then we require that the full set is overwritten.
930 int numExpectedLevels = 1;
931 if (backendTexture.hasMipmaps()) {
932 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
933 backendTexture.height()) + 1;
934 }
935 if (numLevels != numExpectedLevels) {
936 return false;
937 }
938 return update_texture_with_pixmaps(this,
939 srcData,
940 numLevels,
941 backendTexture,
942 textureOrigin,
943 std::move(finishedCallback));
944 }
945
946 //////////////////////////////////////////////////////////////////////////////
947
create_and_update_compressed_backend_texture( GrDirectContext* dContext, SkISize dimensions, const GrBackendFormat& backendFormat, GrMipmapped mipMapped, GrProtected isProtected, sk_sp<GrRefCntedCallback> finishedCallback, const void* data, size_t size)948 static GrBackendTexture create_and_update_compressed_backend_texture(
949 GrDirectContext* dContext,
950 SkISize dimensions,
951 const GrBackendFormat& backendFormat,
952 GrMipmapped mipMapped,
953 GrProtected isProtected,
954 sk_sp<GrRefCntedCallback> finishedCallback,
955 const void* data,
956 size_t size) {
957 GrGpu* gpu = dContext->priv().getGpu();
958
959 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
960 mipMapped, isProtected);
961 if (!beTex.isValid()) {
962 return {};
963 }
964
965 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
966 beTex, std::move(finishedCallback), data, size)) {
967 dContext->deleteBackendTexture(beTex);
968 return {};
969 }
970 return beTex;
971 }
972
createCompressedBackendTexture( int width, int height, const GrBackendFormat& backendFormat, const SkColor4f& color, GrMipmapped mipmapped, GrProtected isProtected, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)973 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
974 int width, int height,
975 const GrBackendFormat& backendFormat,
976 const SkColor4f& color,
977 GrMipmapped mipmapped,
978 GrProtected isProtected,
979 GrGpuFinishedProc finishedProc,
980 GrGpuFinishedContext finishedContext) {
981 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
982 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
983
984 if (this->abandoned()) {
985 return {};
986 }
987
988 SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
989 if (compression == SkImage::CompressionType::kNone) {
990 return {};
991 }
992
993 size_t size = SkCompressedDataSize(compression,
994 {width, height},
995 nullptr,
996 mipmapped == GrMipmapped::kYes);
997 auto storage = std::make_unique<char[]>(size);
998 GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
999 return create_and_update_compressed_backend_texture(this,
1000 {width, height},
1001 backendFormat,
1002 mipmapped,
1003 isProtected,
1004 std::move(finishedCallback),
1005 storage.get(),
1006 size);
1007 }
1008
createCompressedBackendTexture( int width, int height, SkImage::CompressionType compression, const SkColor4f& color, GrMipmapped mipMapped, GrProtected isProtected, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)1009 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1010 int width, int height,
1011 SkImage::CompressionType compression,
1012 const SkColor4f& color,
1013 GrMipmapped mipMapped,
1014 GrProtected isProtected,
1015 GrGpuFinishedProc finishedProc,
1016 GrGpuFinishedContext finishedContext) {
1017 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1018 GrBackendFormat format = this->compressedBackendFormat(compression);
1019 return this->createCompressedBackendTexture(width, height, format, color,
1020 mipMapped, isProtected, finishedProc,
1021 finishedContext);
1022 }
1023
createCompressedBackendTexture( int width, int height, const GrBackendFormat& backendFormat, const void* compressedData, size_t dataSize, GrMipmapped mipMapped, GrProtected isProtected, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)1024 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1025 int width, int height,
1026 const GrBackendFormat& backendFormat,
1027 const void* compressedData,
1028 size_t dataSize,
1029 GrMipmapped mipMapped,
1030 GrProtected isProtected,
1031 GrGpuFinishedProc finishedProc,
1032 GrGpuFinishedContext finishedContext) {
1033 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1034 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1035
1036 if (this->abandoned()) {
1037 return {};
1038 }
1039
1040 return create_and_update_compressed_backend_texture(this,
1041 {width, height},
1042 backendFormat,
1043 mipMapped,
1044 isProtected,
1045 std::move(finishedCallback),
1046 compressedData,
1047 dataSize);
1048 }
1049
createCompressedBackendTexture( int width, int height, SkImage::CompressionType compression, const void* data, size_t dataSize, GrMipmapped mipMapped, GrProtected isProtected, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)1050 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1051 int width, int height,
1052 SkImage::CompressionType compression,
1053 const void* data, size_t dataSize,
1054 GrMipmapped mipMapped,
1055 GrProtected isProtected,
1056 GrGpuFinishedProc finishedProc,
1057 GrGpuFinishedContext finishedContext) {
1058 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1059 GrBackendFormat format = this->compressedBackendFormat(compression);
1060 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
1061 isProtected, finishedProc, finishedContext);
1062 }
1063
updateCompressedBackendTexture(const GrBackendTexture& backendTexture, const SkColor4f& color, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)1064 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1065 const SkColor4f& color,
1066 GrGpuFinishedProc finishedProc,
1067 GrGpuFinishedContext finishedContext) {
1068 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1069
1070 if (this->abandoned()) {
1071 return false;
1072 }
1073
1074 SkImage::CompressionType compression =
1075 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1076 if (compression == SkImage::CompressionType::kNone) {
1077 return {};
1078 }
1079 size_t size = SkCompressedDataSize(compression,
1080 backendTexture.dimensions(),
1081 nullptr,
1082 backendTexture.hasMipmaps());
1083 SkAutoMalloc storage(size);
1084 GrFillInCompressedData(compression,
1085 backendTexture.dimensions(),
1086 backendTexture.mipmapped(),
1087 static_cast<char*>(storage.get()),
1088 color);
1089 return fGpu->updateCompressedBackendTexture(backendTexture,
1090 std::move(finishedCallback),
1091 storage.get(),
1092 size);
1093 }
1094
updateCompressedBackendTexture(const GrBackendTexture& backendTexture, const void* compressedData, size_t dataSize, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)1095 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1096 const void* compressedData,
1097 size_t dataSize,
1098 GrGpuFinishedProc finishedProc,
1099 GrGpuFinishedContext finishedContext) {
1100 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1101
1102 if (this->abandoned()) {
1103 return false;
1104 }
1105
1106 if (!compressedData) {
1107 return false;
1108 }
1109
1110 return fGpu->updateCompressedBackendTexture(backendTexture,
1111 std::move(finishedCallback),
1112 compressedData,
1113 dataSize);
1114 }
1115
1116 //////////////////////////////////////////////////////////////////////////////
1117
setBackendTextureState(const GrBackendTexture& backendTexture, const GrBackendSurfaceMutableState& state, GrBackendSurfaceMutableState* previousState, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)1118 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
1119 const GrBackendSurfaceMutableState& state,
1120 GrBackendSurfaceMutableState* previousState,
1121 GrGpuFinishedProc finishedProc,
1122 GrGpuFinishedContext finishedContext) {
1123 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1124
1125 if (this->abandoned()) {
1126 return false;
1127 }
1128
1129 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
1130 }
1131
1132
setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget, const GrBackendSurfaceMutableState& state, GrBackendSurfaceMutableState* previousState, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)1133 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1134 const GrBackendSurfaceMutableState& state,
1135 GrBackendSurfaceMutableState* previousState,
1136 GrGpuFinishedProc finishedProc,
1137 GrGpuFinishedContext finishedContext) {
1138 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1139
1140 if (this->abandoned()) {
1141 return false;
1142 }
1143
1144 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1145 std::move(callback));
1146 }
1147
deleteBackendTexture(GrBackendTexture backendTex)1148 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
1149 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1150 // For the Vulkan backend we still must destroy the backend texture when the context is
1151 // abandoned.
1152 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1153 return;
1154 }
1155
1156 fGpu->deleteBackendTexture(backendTex);
1157 }
1158
1159 //////////////////////////////////////////////////////////////////////////////
1160
precompileShader(const SkData& key, const SkData& data)1161 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1162 return fGpu->precompileShader(key, data);
1163 }
1164
1165 #ifdef SK_ENABLE_DUMP_GPU
1166 #include "include/core/SkString.h"
1167 #include "src/utils/SkJSONWriter.h"
dump() const1168 SkString GrDirectContext::dump() const {
1169 SkDynamicMemoryWStream stream;
1170 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1171 writer.beginObject();
1172
1173 writer.appendString("backend", GrBackendApiToStr(this->backend()));
1174
1175 writer.appendName("caps");
1176 this->caps()->dumpJSON(&writer);
1177
1178 writer.appendName("gpu");
1179 this->fGpu->dumpJSON(&writer);
1180
1181 writer.appendName("context");
1182 this->dumpJSON(&writer);
1183
1184 // Flush JSON to the memory stream
1185 writer.endObject();
1186 writer.flush();
1187
1188 // Null terminate the JSON data in the memory stream
1189 stream.write8(0);
1190
1191 // Allocate a string big enough to hold all the data, then copy out of the stream
1192 SkString result(stream.bytesWritten());
1193 stream.copyToAndReset(result.writable_str());
1194 return result;
1195 }
1196 #endif
1197
1198 #ifdef SK_GL
1199
1200 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)1201 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
1202 GrContextOptions defaultOptions;
1203 return MakeGL(std::move(glInterface), defaultOptions);
1204 }
1205
MakeGL(const GrContextOptions& options)1206 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1207 return MakeGL(nullptr, options);
1208 }
1209
MakeGL()1210 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1211 GrContextOptions defaultOptions;
1212 return MakeGL(nullptr, defaultOptions);
1213 }
1214
1215 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1216 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1217 // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1218 // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1219 // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1220 // on the thing it captures. So we leak the context.
1221 struct GetErrorContext {
1222 SkRandom fRandom;
1223 GrGLFunction<GrGLGetErrorFn> fGetError;
1224 };
1225
1226 auto errorContext = new GetErrorContext;
1227
1228 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1229 __lsan_ignore_object(errorContext);
1230 #endif
1231
1232 errorContext->fGetError = original;
1233
1234 return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1235 GrGLenum error = errorContext->fGetError();
1236 if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1237 error = GR_GL_OUT_OF_MEMORY;
1238 }
1239 return error;
1240 });
1241 }
1242 #endif
1243
1244 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1245 const GrContextOptions& options) {
1246 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1247 #if GR_TEST_UTILS
1248 if (options.fRandomGLOOM) {
1249 auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1250 copy->fFunctions.fGetError =
1251 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1252 #if GR_GL_CHECK_ERROR
1253 // Suppress logging GL errors since we'll be synthetically generating them.
1254 copy->suppressErrorLogging();
1255 #endif
1256 glInterface = std::move(copy);
1257 }
1258 #endif
1259 direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1260 if (!direct->init()) {
1261 return nullptr;
1262 }
1263 return direct;
1264 }
1265 #endif
1266
1267 /*************************************************************************************************/
1268 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1269 GrContextOptions defaultOptions;
1270 return MakeMock(mockOptions, defaultOptions);
1271 }
1272
1273 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1274 const GrContextOptions& options) {
1275 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1276
1277 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1278 if (!direct->init()) {
1279 return nullptr;
1280 }
1281
1282 return direct;
1283 }
1284
1285 #ifdef SK_VULKAN
1286 /*************************************************************************************************/
1287 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1288 GrContextOptions defaultOptions;
1289 return MakeVulkan(backendContext, defaultOptions);
1290 }
1291
1292 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1293 const GrContextOptions& options) {
1294 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1295
1296 direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1297 if (!direct->init()) {
1298 return nullptr;
1299 }
1300
1301 return direct;
1302 }
1303 #endif
1304
1305 #ifdef SK_METAL
1306 /*************************************************************************************************/
1307 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1308 GrContextOptions defaultOptions;
1309 return MakeMetal(backendContext, defaultOptions);
1310 }
1311
1312 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1313 const GrContextOptions& options) {
1314 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1315
1316 direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1317 if (!direct->init()) {
1318 return nullptr;
1319 }
1320
1321 return direct;
1322 }
1323
1324 // deprecated
1325 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1326 GrContextOptions defaultOptions;
1327 return MakeMetal(device, queue, defaultOptions);
1328 }
1329
1330 // deprecated
1331 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
1332 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1333 const GrContextOptions& options) {
1334 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1335 GrMtlBackendContext backendContext = {};
1336 backendContext.fDevice.reset(device);
1337 backendContext.fQueue.reset(queue);
1338
1339 return GrDirectContext::MakeMetal(backendContext, options);
1340 }
1341 #endif
1342
1343 #ifdef SK_DIRECT3D
1344 /*************************************************************************************************/
1345 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1346 GrContextOptions defaultOptions;
1347 return MakeDirect3D(backendContext, defaultOptions);
1348 }
1349
1350 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1351 const GrContextOptions& options) {
1352 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1353
1354 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1355 if (!direct->init()) {
1356 return nullptr;
1357 }
1358
1359 return direct;
1360 }
1361 #endif
1362
1363 #ifdef SK_DAWN
1364 /*************************************************************************************************/
1365 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1366 GrContextOptions defaultOptions;
1367 return MakeDawn(device, defaultOptions);
1368 }
1369
1370 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1371 const GrContextOptions& options) {
1372 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1373
1374 direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1375 if (!direct->init()) {
1376 return nullptr;
1377 }
1378
1379 return direct;
1380 }
1381
1382 #endif
1383