1 /*
2  * Copyright 2020 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDirectContext_DEFINED
9 #define GrDirectContext_DEFINED
10 
11 #include <set>
12 
13 #include <array>
14 
15 #include <unordered_map>
16 
17 #include "include/gpu/GrRecordingContext.h"
18 
19 #include "include/gpu/GrBackendSurface.h"
20 
21 #include "src/gpu/GrGpuResource.h"
22 
23 // We shouldn't need this but currently Android is relying on this being include transitively.
24 #include "include/core/SkUnPreMultiply.h"
25 
26 #include "include/core/SkBlurTypes.h"
27 
28 class GrAtlasManager;
29 class GrBackendSemaphore;
30 class GrClientMappedBufferManager;
31 class GrDirectContextPriv;
32 class GrContextThreadSafeProxy;
33 struct GrD3DBackendContext;
34 class GrFragmentProcessor;
35 class GrGpu;
36 struct GrGLInterface;
37 struct GrMtlBackendContext;
38 struct GrMockOptions;
39 class GrPath;
40 class GrResourceCache;
41 class GrResourceProvider;
42 class GrStrikeCache;
43 class GrSurfaceProxy;
44 class GrSwizzle;
45 class GrTextureProxy;
46 struct GrVkBackendContext;
47 
48 class SkImage;
49 class SkString;
50 class SkSurfaceCharacterization;
51 class SkSurfaceProps;
52 class SkTaskGroup;
53 class SkTraceMemoryDump;
54 
55 // OH ISSUE: callback for memory protect.
56 using MemoryOverflowCalllback = std::function<void(int32_t, size_t, bool)>;
57 
58 namespace skgpu { namespace v1 { class SmallPathAtlasMgr; }}
59 
60 class SK_API GrDirectContext : public GrRecordingContext {
61 public:
62 #ifdef SK_GL
63     /**
64      * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the
65      * result of GrGLMakeNativeInterface() is used if it succeeds.
66      */
67     static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
68     static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>);
69     static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&);
70     static sk_sp<GrDirectContext> MakeGL();
71 #endif
72 
73 #ifdef SK_VULKAN
74     /**
75      * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned
76      * GrDirectContext is destroyed. This also means that any objects created with this
77      * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold
78      * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released,
79      * then it is safe to delete the vulkan objects.
80      */
81     static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
82     static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&);
83 #endif
84 
85 #ifdef SK_METAL
86     /**
87      * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a
88      * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must
89      * have their own ref which will be released when the GrMtlBackendContext is destroyed.
90      * Ganesh will take its own ref on the objects which will be released when the GrDirectContext
91      * is destroyed.
92      */
93     static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&);
94     static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&);
95     /**
96      * Deprecated.
97      *
98      * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an
99      * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects
100      * must have a ref on them that can be transferred to Ganesh, which will release the ref
101      * when the GrDirectContext is destroyed.
102      */
103     static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&);
104     static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue);
105 #endif
106 
107 #ifdef SK_DIRECT3D
108     /**
109      * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
110      * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
111      */
112     static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
113     static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
114 #endif
115 
116 #ifdef SK_DAWN
117     static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&,
118                                            const GrContextOptions&);
119     static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&);
120 #endif
121 
122     static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
123     static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
124 
125     ~GrDirectContext() override;
126 
127     /**
128      * The context normally assumes that no outsider is setting state
129      * within the underlying 3D API's context/device/whatever. This call informs
130      * the context that the state was modified and it should resend. Shouldn't
131      * be called frequently for good performance.
132      * The flag bits, state, is dependent on which backend is used by the
133      * context, either GL or D3D (possible in future).
134      */
135     void resetContext(uint32_t state = kAll_GrBackendState);
136 
137     /**
138      * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
139      * the context has modified the bound texture will have texture id 0 bound. This does not
140      * flush the context. Calling resetContext() does not change the set that will be bound
141      * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
142      * all unit/target combinations are considered to have unmodified bindings until the context
143      * subsequently modifies them (meaning if this is called twice in a row with no intervening
144      * context usage then the second call is a no-op.)
145      */
146     void resetGLTextureBindings();
147 
148     /**
149      * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
150      * usable. Call this if you have lost the associated GPU context, and thus internal texture,
151      * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
152      * context and any of its created resource objects will not make backend 3D API calls. Content
153      * rendered but not previously flushed may be lost. After this function is called all subsequent
154      * calls on the context will fail or be no-ops.
155      *
156      * The typical use case for this function is that the underlying 3D context was lost and further
157      * API calls may crash.
158      *
159      * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
160      * create the context must be kept alive even after abandoning the context. Those objects must
161      * live for the lifetime of the context object itself. The reason for this is so that
162      * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
163      * cleaned up even in a device lost state.
164      */
165     void abandonContext() override;
166 
167     /**
168      * Returns true if the context was abandoned or if the if the backend specific context has
169      * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
170      * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
171      * context.
172      */
173     bool abandoned() override;
174 
175     // TODO: Remove this from public after migrating Chrome.
176     sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
177 
178     /**
179      * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
180      * reset and will return false until another out-of-memory error is reported by the 3D API. If
181      * the context is abandoned then this will report false.
182      *
183      * Currently this is implemented for:
184      *
185      * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
186      * therefore hide the error from Skia. Also, it is not advised to use this in combination with
187      * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
188      * checking the GL context for OOM.
189      *
190      * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
191      * occurred.
192      */
193     bool oomed();
194 
195     /**
196      * This is similar to abandonContext() however the underlying 3D context is not yet lost and
197      * the context will cleanup all allocated resources before returning. After returning it will
198      * assume that the underlying context may no longer be valid.
199      *
200      * The typical use case for this function is that the client is going to destroy the 3D context
201      * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
202      * elsewhere by either the client or Skia objects).
203      *
204      * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
205      * create the context must be alive before calling releaseResourcesAndAbandonContext.
206      */
207     void releaseResourcesAndAbandonContext();
208 
209     ///////////////////////////////////////////////////////////////////////////
210     // Resource Cache
211 
212     /** DEPRECATED
213      *  Return the current GPU resource cache limits.
214      *
215      *  @param maxResources If non-null, will be set to -1.
216      *  @param maxResourceBytes If non-null, returns maximum number of bytes of
217      *                          video memory that can be held in the cache.
218      */
219     void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
220 
221     /**
222      *  Return the current GPU resource cache limit in bytes.
223      */
224     size_t getResourceCacheLimit() const;
225 
226     /**
227      *  Gets the current GPU resource cache usage.
228      *
229      *  @param resourceCount If non-null, returns the number of resources that are held in the
230      *                       cache.
231      *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
232      *                          in the cache.
233      */
234     void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
235 
236     /**
237      *  Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
238      */
239     size_t getResourceCachePurgeableBytes() const;
240 
241     /** DEPRECATED
242      *  Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
243      *  limit, it will be purged (LRU) to keep the cache within the limit.
244      *
245      *  @param maxResources Unused.
246      *  @param maxResourceBytes The maximum number of bytes of video memory
247      *                          that can be held in the cache.
248      */
249     void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
250 
251     /**
252      *  Specify the GPU resource cache limit. If the cache currently exceeds this limit,
253      *  it will be purged (LRU) to keep the cache within the limit.
254      *
255      *  @param maxResourceBytes The maximum number of bytes of video memory
256      *                          that can be held in the cache.
257      */
258     void setResourceCacheLimit(size_t maxResourceBytes);
259 
260     /**
261      * Frees GPU created by the context. Can be called to reduce GPU memory
262      * pressure.
263      */
264     void freeGpuResources();
265 
266     /**
267      * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
268      * otherwise marked for deletion, regardless of whether the context is under budget.
269      *
270      * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will
271      * be purged but the unlocked resources with persistent data will remain. If
272      * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be
273      * purged.
274      *
275      * @param msNotUsed              Only unlocked resources not used in these last milliseconds
276      *                               will be cleaned up.
277      * @param scratchResourcesOnly   If true only unlocked scratch resources will be purged.
278      */
279     void performDeferredCleanup(std::chrono::milliseconds msNotUsed,
280                                 bool scratchResourcesOnly=false);
281 
282     // Temporary compatibility API for Android.
purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)283     void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
284         this->performDeferredCleanup(msNotUsed);
285     }
286 
287     /**
288      * Purge unlocked resources from the cache until the the provided byte count has been reached
289      * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
290      * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
291      * resource types.
292      *
293      * @param maxBytesToPurge the desired number of bytes to be purged.
294      * @param preferScratchResources If true scratch resources will be purged prior to other
295      *                               resource types.
296      */
297     void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
298     void purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag);
299     void purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet);
300     void purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
301         const std::set<int>& protectedPidSet);
302     void purgeUnlockAndSafeCacheGpuResources();
303 
304     std::array<int, 2> CalcHpsBluredImageDimension(const SkBlurArg& blurArg);
305     /**
306      * This entry point is intended for instances where an app has been backgrounded or
307      * suspended.
308      * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
309      * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
310      * then all unlocked resources will be purged.
311      * In either case, after the unlocked resources are purged a separate pass will be made to
312      * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
313      * some resources with persistent data may be purged to be under budget).
314      *
315      * @param scratchResourcesOnly   If true only unlocked scratch resources will be purged prior
316      *                               enforcing the budget requirements.
317      */
318     void purgeUnlockedResources(bool scratchResourcesOnly);
319 
320     /**
321      * Gets the maximum supported texture size.
322      */
323     using GrRecordingContext::maxTextureSize;
324 
325     /**
326      * Gets the maximum supported render target size.
327      */
328     using GrRecordingContext::maxRenderTargetSize;
329 
330     /**
331      * Can a SkImage be created with the given color type.
332      */
333     using GrRecordingContext::colorTypeSupportedAsImage;
334 
335     /**
336      * Can a SkSurface be created with the given color type. To check whether MSAA is supported
337      * use maxSurfaceSampleCountForColorType().
338      */
339     using GrRecordingContext::colorTypeSupportedAsSurface;
340 
341     /**
342      * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
343      * rendering is supported for the color type. 0 is returned if rendering to this color type
344      * is not supported at all.
345      */
346     using GrRecordingContext::maxSurfaceSampleCountForColorType;
347 
348     ///////////////////////////////////////////////////////////////////////////
349     // Misc.
350 
351     /**
352      * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
353      * executing any more commands on the GPU. If this call returns false, then the GPU back-end
354      * will not wait on any passed in semaphores, and the client will still own the semaphores,
355      * regardless of the value of deleteSemaphoresAfterWait.
356      *
357      * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
358      * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
359      * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
360      * flush calls.
361      */
362     bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
363               bool deleteSemaphoresAfterWait = true);
364 
365     /**
366      * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
367      * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
368      * GrContext::submit(syncCpu).
369      */
flushAndSubmit(bool syncCpu = false)370     void flushAndSubmit(bool syncCpu = false) {
371         this->flush(GrFlushInfo());
372         this->submit(syncCpu);
373     }
374 
375     /**
376      * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
377      * objects. A call to `submit` is always required to ensure work is actually sent to
378      * the gpu. Some specific API details:
379      *     GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
380      *         sync objects from the flush will not be valid until a submission occurs.
381      *
382      *     Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
383      *         buffer or encoder objects. However, these objects are not sent to the gpu until a
384      *         submission occurs.
385      *
386      * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
387      * submitted to the gpu during the next submit call (it is possible Skia failed to create a
388      * subset of the semaphores). The client should not wait on these semaphores until after submit
389      * has been called, and must keep them alive until then. If this call returns
390      * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
391      * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
392      * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
393      * client is still responsible for deleting any initialized semaphores.
394      * Regardleess of semaphore submission the context will still be flushed. It should be
395      * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
396      * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
397      * take this as a failure if they passed in semaphores to be submitted.
398      */
399     GrSemaphoresSubmitted flush(const GrFlushInfo& info);
400 
flush()401     void flush() { this->flush({}); }
402 
403     /**
404      * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
405      * value of the submit will indicate whether or not the submission to the GPU was successful.
406      *
407      * If the call returns true, all previously passed in semaphores in flush calls will have been
408      * submitted to the GPU and they can safely be waited on. The caller should wait on those
409      * semaphores or perform some other global synchronization before deleting the semaphores.
410      *
411      * If it returns false, then those same semaphores will not have been submitted and we will not
412      * try to submit them again. The caller is free to delete the semaphores at any time.
413      *
414      * If the syncCpu flag is true this function will return once the gpu has finished with all
415      * submitted work.
416      */
417     bool submit(bool syncCpu = false);
418 
419     /**
420      * Checks whether any asynchronous work is complete and if so calls related callbacks.
421      */
422     void checkAsyncWorkCompletion();
423 
424     /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
425     // Chrome is using this!
426     void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
427     void dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const;
428 
429     bool supportsDistanceFieldText() const;
430 
431     void storeVkPipelineCacheData();
432 
433     /**
434      * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
435      * It is guaranteed that this backend format will be the one used by the following
436      * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
437      *
438      * The caller should check that the returned format is valid.
439      */
440     using GrRecordingContext::defaultBackendFormat;
441 
442     /**
443      * The explicitly allocated backend texture API allows clients to use Skia to create backend
444      * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
445      *
446      * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
447      * before deleting the context used to create them. If the backend is Vulkan, the textures must
448      * be deleted before abandoning the context as well. Additionally, clients should only delete
449      * these objects on the thread for which that context is active.
450      *
451      * The client is responsible for ensuring synchronization between different uses
452      * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
453      * surface, rewrapping it in a image and drawing the image will require explicit
454      * synchronization on the client's part).
455      */
456 
457      /**
458       * If possible, create an uninitialized backend texture. The client should ensure that the
459       * returned backend texture is valid.
460       * For the Vulkan backend the layout of the created VkImage will be:
461       *      VK_IMAGE_LAYOUT_UNDEFINED.
462       */
463      GrBackendTexture createBackendTexture(int width, int height,
464                                            const GrBackendFormat&,
465                                            GrMipmapped,
466                                            GrRenderable,
467                                            GrProtected = GrProtected::kNo);
468 
469      /**
470       * If possible, create an uninitialized backend texture. The client should ensure that the
471       * returned backend texture is valid.
472       * If successful, the created backend texture will be compatible with the provided
473       * SkColorType.
474       * For the Vulkan backend the layout of the created VkImage will be:
475       *      VK_IMAGE_LAYOUT_UNDEFINED.
476       */
477      GrBackendTexture createBackendTexture(int width, int height,
478                                            SkColorType,
479                                            GrMipmapped,
480                                            GrRenderable,
481                                            GrProtected = GrProtected::kNo);
482 
483      /**
484       * If possible, create a backend texture initialized to a particular color. The client should
485       * ensure that the returned backend texture is valid. The client can pass in a finishedProc
486       * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
487       * client is required to call `submit` to send the upload work to the gpu. The
488       * finishedProc will always get called even if we failed to create the GrBackendTexture.
489       * For the Vulkan backend the layout of the created VkImage will be:
490       *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
491       */
492      GrBackendTexture createBackendTexture(int width, int height,
493                                            const GrBackendFormat&,
494                                            const SkColor4f& color,
495                                            GrMipmapped,
496                                            GrRenderable,
497                                            GrProtected = GrProtected::kNo,
498                                            GrGpuFinishedProc finishedProc = nullptr,
499                                            GrGpuFinishedContext finishedContext = nullptr);
500 
501      /**
502       * If possible, create a backend texture initialized to a particular color. The client should
503       * ensure that the returned backend texture is valid. The client can pass in a finishedProc
504       * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
505       * client is required to call `submit` to send the upload work to the gpu. The
506       * finishedProc will always get called even if we failed to create the GrBackendTexture.
507       * If successful, the created backend texture will be compatible with the provided
508       * SkColorType.
509       * For the Vulkan backend the layout of the created VkImage will be:
510       *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
511       */
512      GrBackendTexture createBackendTexture(int width, int height,
513                                            SkColorType,
514                                            const SkColor4f& color,
515                                            GrMipmapped,
516                                            GrRenderable,
517                                            GrProtected = GrProtected::kNo,
518                                            GrGpuFinishedProc finishedProc = nullptr,
519                                            GrGpuFinishedContext finishedContext = nullptr);
520 
521      /**
522       * If possible, create a backend texture initialized with the provided pixmap data. The client
523       * should ensure that the returned backend texture is valid. The client can pass in a
524       * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
525       * deleted. The client is required to call `submit` to send the upload work to the gpu.
526       * The finishedProc will always get called even if we failed to create the GrBackendTexture.
527       * If successful, the created backend texture will be compatible with the provided
528       * pixmap(s). Compatible, in this case, means that the backend format will be the result
529       * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
530       * when this call returns.
531       * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
532       * the data for all the mipmap levels must be provided. In the mipmapped case all the
533       * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
534       * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The
535       * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture.
536       * Note: the pixmap's alphatypes and colorspaces are ignored.
537       * For the Vulkan backend the layout of the created VkImage will be:
538       *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
539       */
540      GrBackendTexture createBackendTexture(const SkPixmap srcData[],
541                                            int numLevels,
542                                            GrSurfaceOrigin,
543                                            GrRenderable,
544                                            GrProtected,
545                                            GrGpuFinishedProc finishedProc = nullptr,
546                                            GrGpuFinishedContext finishedContext = nullptr);
547 
548     /**
549      * Convenience version createBackendTexture() that takes just a base level pixmap.
550      */
createBackendTexture(const SkPixmap& srcData, GrSurfaceOrigin textureOrigin, GrRenderable renderable, GrProtected isProtected, GrGpuFinishedProc finishedProc = nullptr, GrGpuFinishedContext finishedContext = nullptr)551      GrBackendTexture createBackendTexture(const SkPixmap& srcData,
552                                            GrSurfaceOrigin textureOrigin,
553                                            GrRenderable renderable,
554                                            GrProtected isProtected,
555                                            GrGpuFinishedProc finishedProc = nullptr,
556                                            GrGpuFinishedContext finishedContext = nullptr) {
557          return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected,
558                                            finishedProc, finishedContext);
559      }
560 
561     // Deprecated versions that do not take origin and assume top-left.
createBackendTexture(const SkPixmap srcData[], int numLevels, GrRenderable renderable, GrProtected isProtected, GrGpuFinishedProc finishedProc = nullptr, GrGpuFinishedContext finishedContext = nullptr)562     GrBackendTexture createBackendTexture(const SkPixmap srcData[],
563                                           int numLevels,
564                                           GrRenderable renderable,
565                                           GrProtected isProtected,
566                                           GrGpuFinishedProc finishedProc = nullptr,
567                                           GrGpuFinishedContext finishedContext = nullptr) {
568         return this->createBackendTexture(srcData,
569                                           numLevels,
570                                           kTopLeft_GrSurfaceOrigin,
571                                           renderable,
572                                           isProtected,
573                                           finishedProc,
574                                           finishedContext);
575     }
createBackendTexture(const SkPixmap& srcData, GrRenderable renderable, GrProtected isProtected, GrGpuFinishedProc finishedProc = nullptr, GrGpuFinishedContext finishedContext = nullptr)576     GrBackendTexture createBackendTexture(const SkPixmap& srcData,
577                                           GrRenderable renderable,
578                                           GrProtected isProtected,
579                                           GrGpuFinishedProc finishedProc = nullptr,
580                                           GrGpuFinishedContext finishedContext = nullptr) {
581         return this->createBackendTexture(&srcData,
582                                           1,
583                                           renderable,
584                                           isProtected,
585                                           finishedProc,
586                                           finishedContext);
587     }
588 
589     /**
590      * If possible, updates a backend texture to be filled to a particular color. The client should
591      * check the return value to see if the update was successful. The client can pass in a
592      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
593      * deleted. The client is required to call `submit` to send the upload work to the gpu.
594      * The finishedProc will always get called even if we failed to update the GrBackendTexture.
595      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
596      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
597      */
598     bool updateBackendTexture(const GrBackendTexture&,
599                               const SkColor4f& color,
600                               GrGpuFinishedProc finishedProc,
601                               GrGpuFinishedContext finishedContext);
602 
603     /**
604      * If possible, updates a backend texture to be filled to a particular color. The data in
605      * GrBackendTexture and passed in color is interpreted with respect to the passed in
606      * SkColorType. The client should check the return value to see if the update was successful.
607      * The client can pass in a finishedProc to be notified when the data has been uploaded by the
608      * gpu and the texture can be deleted. The client is required to call `submit` to send
609      * the upload work to the gpu. The finishedProc will always get called even if we failed to
610      * update the GrBackendTexture.
611      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
612      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
613      */
614     bool updateBackendTexture(const GrBackendTexture&,
615                               SkColorType skColorType,
616                               const SkColor4f& color,
617                               GrGpuFinishedProc finishedProc,
618                               GrGpuFinishedContext finishedContext);
619 
620     /**
621      * If possible, updates a backend texture filled with the provided pixmap data. The client
622      * should check the return value to see if the update was successful. The client can pass in a
623      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
624      * deleted. The client is required to call `submit` to send the upload work to the gpu.
625      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
626      * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
627      * means that the backend format is compatible with the base pixmap's colortype. The src data
628      * can be deleted when this call returns.
629      * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
630      * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
631      * Additionally, all the miplevels must be sized correctly (please see
632      * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the
633      * pixmap data is vertically flipped in the texture.
634      * Note: the pixmap's alphatypes and colorspaces are ignored.
635      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
636      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
637      */
638     bool updateBackendTexture(const GrBackendTexture&,
639                               const SkPixmap srcData[],
640                               int numLevels,
641                               GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
642                               GrGpuFinishedProc finishedProc = nullptr,
643                               GrGpuFinishedContext finishedContext = nullptr);
644 
645     /**
646      * Convenience version of updateBackendTexture that takes just a base level pixmap.
647      */
updateBackendTexture(const GrBackendTexture& texture, const SkPixmap& srcData, GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin, GrGpuFinishedProc finishedProc = nullptr, GrGpuFinishedContext finishedContext = nullptr)648     bool updateBackendTexture(const GrBackendTexture& texture,
649                               const SkPixmap& srcData,
650                               GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin,
651                               GrGpuFinishedProc finishedProc = nullptr,
652                               GrGpuFinishedContext finishedContext = nullptr) {
653         return this->updateBackendTexture(texture,
654                                           &srcData,
655                                           1,
656                                           textureOrigin,
657                                           finishedProc,
658                                           finishedContext);
659     }
660 
661     // Deprecated version that does not take origin and assumes top-left.
updateBackendTexture(const GrBackendTexture& texture, const SkPixmap srcData[], int numLevels, GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)662     bool updateBackendTexture(const GrBackendTexture& texture,
663                              const SkPixmap srcData[],
664                              int numLevels,
665                              GrGpuFinishedProc finishedProc,
666                              GrGpuFinishedContext finishedContext) {
667         return this->updateBackendTexture(texture,
668                                           srcData,
669                                           numLevels,
670                                           kTopLeft_GrSurfaceOrigin,
671                                           finishedProc,
672                                           finishedContext);
673     }
674 
675     /**
676      * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is
677      * guaranteed to match the backend format used by the following
678      * createCompressedBackendTexture methods that take a CompressionType.
679      *
680      * The caller should check that the returned format is valid.
681      */
682     using GrRecordingContext::compressedBackendFormat;
683 
684     /**
685      *If possible, create a compressed backend texture initialized to a particular color. The
686      * client should ensure that the returned backend texture is valid. The client can pass in a
687      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
688      * deleted. The client is required to call `submit` to send the upload work to the gpu.
689      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
690      * For the Vulkan backend the layout of the created VkImage will be:
691      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
692      */
693     GrBackendTexture createCompressedBackendTexture(int width, int height,
694                                                     const GrBackendFormat&,
695                                                     const SkColor4f& color,
696                                                     GrMipmapped,
697                                                     GrProtected = GrProtected::kNo,
698                                                     GrGpuFinishedProc finishedProc = nullptr,
699                                                     GrGpuFinishedContext finishedContext = nullptr);
700 
701     GrBackendTexture createCompressedBackendTexture(int width, int height,
702                                                     SkImage::CompressionType,
703                                                     const SkColor4f& color,
704                                                     GrMipmapped,
705                                                     GrProtected = GrProtected::kNo,
706                                                     GrGpuFinishedProc finishedProc = nullptr,
707                                                     GrGpuFinishedContext finishedContext = nullptr);
708 
709     /**
710      * If possible, create a backend texture initialized with the provided raw data. The client
711      * should ensure that the returned backend texture is valid. The client can pass in a
712      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
713      * deleted. The client is required to call `submit` to send the upload work to the gpu.
714      * The finishedProc will always get called even if we failed to create the GrBackendTexture
715      * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
716      * the data for all the mipmap levels must be provided. Additionally, all the miplevels
717      * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
718      * For the Vulkan backend the layout of the created VkImage will be:
719      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
720      */
721     GrBackendTexture createCompressedBackendTexture(int width, int height,
722                                                     const GrBackendFormat&,
723                                                     const void* data, size_t dataSize,
724                                                     GrMipmapped,
725                                                     GrProtected = GrProtected::kNo,
726                                                     GrGpuFinishedProc finishedProc = nullptr,
727                                                     GrGpuFinishedContext finishedContext = nullptr);
728 
729     GrBackendTexture createCompressedBackendTexture(int width, int height,
730                                                     SkImage::CompressionType,
731                                                     const void* data, size_t dataSize,
732                                                     GrMipmapped,
733                                                     GrProtected = GrProtected::kNo,
734                                                     GrGpuFinishedProc finishedProc = nullptr,
735                                                     GrGpuFinishedContext finishedContext = nullptr);
736 
737     /**
738      * If possible, updates a backend texture filled with the provided color. If the texture is
739      * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
740      * should check the return value to see if the update was successful. The client can pass in a
741      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
742      * deleted. The client is required to call `submit` to send the upload work to the gpu.
743      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
744      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
745      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
746      */
747     bool updateCompressedBackendTexture(const GrBackendTexture&,
748                                         const SkColor4f& color,
749                                         GrGpuFinishedProc finishedProc,
750                                         GrGpuFinishedContext finishedContext);
751 
752     /**
753      * If possible, updates a backend texture filled with the provided raw data. The client
754      * should check the return value to see if the update was successful. The client can pass in a
755      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
756      * deleted. The client is required to call `submit` to send the upload work to the gpu.
757      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
758      * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided.
759      * Additionally, all the miplevels must be sized correctly (please see
760      * SkMipMap::ComputeLevelSize and ComputeLevelCount).
761      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
762      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
763      */
764     bool updateCompressedBackendTexture(const GrBackendTexture&,
765                                         const void* data,
766                                         size_t dataSize,
767                                         GrGpuFinishedProc finishedProc,
768                                         GrGpuFinishedContext finishedContext);
769 
770     /**
771      * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
772      * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and
773      * SkImages) will also be aware of this state change. This call does not submit the state change
774      * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
775      * for this call is ordered linearly with all other calls that require GrContext::submit to be
776      * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
777      * called with finishedContext after the state transition is known to have occurred on the GPU.
778      *
779      * See GrBackendSurfaceMutableState to see what state can be set via this call.
780      *
781      * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's
782      * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
783      * tell Skia to not change those respective states.
784      *
785      * If previousState is not null and this returns true, then Skia will have filled in
786      * previousState to have the values of the state before this call.
787      */
788     bool setBackendTextureState(const GrBackendTexture&,
789                                 const GrBackendSurfaceMutableState&,
790                                 GrBackendSurfaceMutableState* previousState = nullptr,
791                                 GrGpuFinishedProc finishedProc = nullptr,
792                                 GrGpuFinishedContext finishedContext = nullptr);
793     bool setBackendRenderTargetState(const GrBackendRenderTarget&,
794                                      const GrBackendSurfaceMutableState&,
795                                      GrBackendSurfaceMutableState* previousState = nullptr,
796                                      GrGpuFinishedProc finishedProc = nullptr,
797                                      GrGpuFinishedContext finishedContext = nullptr);
798 
799     void deleteBackendTexture(GrBackendTexture);
800 
801     // This interface allows clients to pre-compile shaders and populate the runtime program cache.
802     // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
803     //
804     // Steps to use this API:
805     //
806     // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
807     //    something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
808     //    will ensure that the blobs are SkSL, and are suitable for pre-compilation.
809     // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
810     //
811     // 3) Switch over to shipping your application. Include the key/data pairs from above.
812     // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
813     //    This will compile the SkSL to create a GL program, and populate the runtime cache.
814     //
815     // This is only guaranteed to work if the context/device used in step #2 are created in the
816     // same way as the one used in step #4, and the same GrContextOptions are specified.
817     // Using cached shader blobs on a different device or driver are undefined.
818     bool precompileShader(const SkData& key, const SkData& data);
819 
820 #ifdef SK_ENABLE_DUMP_GPU
821     /** Returns a string with detailed information about the context & GPU, in JSON format. */
822     SkString dump() const;
823 #endif
824 
825     class DirectContextID {
826     public:
827         static GrDirectContext::DirectContextID Next();
828 
DirectContextID()829         DirectContextID() : fID(SK_InvalidUniqueID) {}
830 
operator ==(const DirectContextID& that) const831         bool operator==(const DirectContextID& that) const { return fID == that.fID; }
operator !=(const DirectContextID& that) const832         bool operator!=(const DirectContextID& that) const { return !(*this == that); }
833 
makeInvalid()834         void makeInvalid() { fID = SK_InvalidUniqueID; }
isValid() const835         bool isValid() const { return fID != SK_InvalidUniqueID; }
836 
837     private:
DirectContextID(uint32_t id)838         constexpr DirectContextID(uint32_t id) : fID(id) {}
839         uint32_t fID;
840     };
841 
directContextID() const842     DirectContextID directContextID() const { return fDirectContextID; }
843 
844     // Provides access to functions that aren't part of the public API.
845     GrDirectContextPriv priv();
846     const GrDirectContextPriv priv() const;  // NOLINT(readability-const-return-type)
847 
848     /**
849      * Set current resource tag for gpu cache recycle.
850      */
851     void setCurrentGrResourceTag(const GrGpuResourceTag& tag);
852 
853     /**
854      * Pop resource tag.
855      */
856     void popGrResourceTag();
857 
858 
859     /**
860      * Get current resource tag for gpu cache recycle.
861      *
862      * @return all GrGpuResourceTags.
863      */
864     GrGpuResourceTag getCurrentGrResourceTag() const;
865 
866     /**
867      * Releases GrGpuResource objects and removes them from the cache by tag.
868      */
869     void releaseByTag(const GrGpuResourceTag& tag);
870 
871     /**
872      * Get all GrGpuResource tag.
873      *
874      * @return all GrGpuResourceTags.
875      */
876     std::set<GrGpuResourceTag> getAllGrGpuResourceTags() const;
877 
878     void vmaDefragment();
879     void dumpVmaStats(SkString *out);
880 
881     // OH ISSUE: get the memory information of the updated pid.
882     void getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out);
883     // OH ISSUE: init gpu memory limit.
884     void initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size);
885     // OH ISSUE: check whether the PID is abnormal.
886     bool isPidAbnormal() const override;
887 
888     // OH ISSUE: intra frame and inter frame identification
889     void beginFrame();
890     void endFrame();
891 
892     // OH ISSUE: asyn memory reclaimer
893     void setGpuMemoryAsyncReclaimerSwitch(bool enabled);
894     void flushGpuMemoryInWaitQueue();
895 
896     // OH ISSUE: suppress release window
897     void setGpuCacheSuppressWindowSwitch(bool enabled);
898     void suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived);
899 
900 protected:
901     GrDirectContext(GrBackendApi backend, const GrContextOptions& options);
902 
903     bool init() override;
904 
onGetAtlasManager()905     GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
906     skgpu::v1::SmallPathAtlasMgr* onGetSmallPathAtlasMgr();
907 
908     GrDirectContext* asDirectContext() override { return this; }
909 
910 private:
911     // This call will make sure out work on the GPU is finished and will execute any outstanding
912     // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the
913     // outstanding work on the gpu. The main use currently for this function is when tearing down or
914     // abandoning the context.
915     //
916     // When we finish up work on the GPU it could trigger callbacks to the client. In the case we
917     // are abandoning the context we don't want the client to be able to use the GrDirectContext to
918     // issue more commands during the callback. Thus before calling this function we set the
919     // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded
920     // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned
921     // bool is used for this signal.
922     void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned);
923 
924     const DirectContextID                   fDirectContextID;
925     // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
926     // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
927     // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
928     // invoked after objects they depend upon have already been destroyed.
929     std::unique_ptr<SkTaskGroup>            fTaskGroup;
930     std::unique_ptr<GrStrikeCache>          fStrikeCache;
931     sk_sp<GrGpu>                            fGpu;
932     std::unique_ptr<GrResourceCache>        fResourceCache;
933     std::unique_ptr<GrResourceProvider>     fResourceProvider;
934 
935     bool                                    fDidTestPMConversions;
936     // true if the PM/UPM conversion succeeded; false otherwise
937     bool                                    fPMUPMConversionsRoundTrip;
938 
939     GrContextOptions::PersistentCache*      fPersistentCache;
940 
941     std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
942     std::unique_ptr<GrAtlasManager> fAtlasManager;
943 
944     std::unique_ptr<skgpu::v1::SmallPathAtlasMgr> fSmallPathAtlasMgr;
945 
946     friend class GrDirectContextPriv;
947 
948     using INHERITED = GrRecordingContext;
949 };
950 
951 
952 #endif
953