1/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrResourceCache.h"
9#include <atomic>
10#include <ctime>
11#include <vector>
12#include <map>
13#include <sstream>
14#ifdef NOT_BUILD_FOR_OHOS_SDK
15#include <parameters.h>
16#endif
17#include "include/core/SkString.h"
18#include "include/gpu/GrDirectContext.h"
19#include "include/private/GrSingleOwner.h"
20#include "include/private/SkTo.h"
21#include "include/utils/SkRandom.h"
22#include "src/core/SkMessageBus.h"
23#include "src/core/SkOpts.h"
24#include "src/core/SkScopeExit.h"
25#include "src/core/SkTSort.h"
26#include "src/gpu/GrCaps.h"
27#include "src/gpu/GrDirectContextPriv.h"
28#include "src/gpu/GrGpuResourceCacheAccess.h"
29#include "src/gpu/GrProxyProvider.h"
30#include "src/gpu/GrTexture.h"
31#include "src/gpu/GrTextureProxyCacheAccess.h"
32#include "src/gpu/GrThreadSafeCache.h"
33#include "src/gpu/GrTracing.h"
34#include "src/gpu/SkGr.h"
35
36DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true);
37
38DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
39
40#define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner)
41
42//////////////////////////////////////////////////////////////////////////////
43
44GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
45    static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
46
47    int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
48    if (type > SkTo<int32_t>(UINT16_MAX)) {
49        SK_ABORT("Too many Resource Types");
50    }
51
52    return static_cast<ResourceType>(type);
53}
54
55GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
56    static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
57
58    int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
59    if (domain > SkTo<int32_t>(UINT16_MAX)) {
60        SK_ABORT("Too many GrUniqueKey Domains");
61    }
62
63    return static_cast<Domain>(domain);
64}
65
66uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
67    return SkOpts::hash(data, size);
68}
69
70//////////////////////////////////////////////////////////////////////////////
71
72class GrResourceCache::AutoValidate : ::SkNoncopyable {
73public:
74    AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
75    ~AutoValidate() { fCache->validate(); }
76private:
77    GrResourceCache* fCache;
78};
79
80//////////////////////////////////////////////////////////////////////////////
81
82inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
83
84inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
85        : fTexture(texture), fNumUnrefs(1) {}
86
87inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
88    fTexture = std::exchange(that.fTexture, nullptr);
89    fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
90}
91
92inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
93        TextureAwaitingUnref&& that) {
94    fTexture = std::exchange(that.fTexture, nullptr);
95    fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
96    return *this;
97}
98
99inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
100    if (fTexture) {
101        for (int i = 0; i < fNumUnrefs; ++i) {
102            fTexture->unref();
103        }
104    }
105}
106
107inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
108
109inline void GrResourceCache::TextureAwaitingUnref::unref() {
110    SkASSERT(fNumUnrefs > 0);
111    fTexture->unref();
112    --fNumUnrefs;
113}
114
115inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
116
117//////////////////////////////////////////////////////////////////////////////
118
119GrResourceCache::GrResourceCache(GrSingleOwner* singleOwner,
120                                 GrDirectContext::DirectContextID owningContextID,
121                                 uint32_t familyID)
122        : fInvalidUniqueKeyInbox(familyID)
123        , fFreedTextureInbox(owningContextID)
124        , fOwningContextID(owningContextID)
125        , fContextUniqueID(familyID)
126        , fSingleOwner(singleOwner) {
127    SkASSERT(owningContextID.isValid());
128    SkASSERT(familyID != SK_InvalidUniqueID);
129#ifdef NOT_BUILD_FOR_OHOS_SDK
130    static int overtimeDuration = std::atoi(
131            OHOS::system::GetParameter("persist.sys.graphic.mem.async_free_cache_overtime", "600")
132                    .c_str());
133    static double maxBytesRate = std::atof(
134            OHOS::system::GetParameter("persist.sys.graphic.mem.async_free_cache_max_rate", "0.9")
135                    .c_str());
136#else
137    static int overtimeDuration = 600;
138    static double maxBytesRate = 0.9;
139#endif
140    fMaxBytesRate = maxBytesRate;
141    fOvertimeDuration = overtimeDuration;
142}
143
144GrResourceCache::~GrResourceCache() {
145    this->releaseAll();
146}
147
148void GrResourceCache::setLimit(size_t bytes) {
149    fMaxBytes = bytes;
150    this->purgeAsNeeded();
151}
152
153#ifdef SKIA_DFX_FOR_OHOS
154static constexpr int MB = 1024 * 1024;
155
156#ifdef SKIA_OHOS_FOR_OHOS_TRACE
157bool GrResourceCache::purgeUnlocakedResTraceEnabled_ =
158    std::atoi((OHOS::system::GetParameter("sys.graphic.skia.cache.debug", "0").c_str())) == 1;
159#endif
160
161void GrResourceCache::dumpInfo(SkString* out) {
162    if (out == nullptr) {
163        SkDebugf("OHOS GrResourceCache::dumpInfo outPtr is nullptr!");
164        return;
165    }
166    auto info = cacheInfo();
167    constexpr uint8_t STEP_INDEX = 1;
168    SkTArray<SkString> lines;
169    SkStrSplit(info.substr(STEP_INDEX, info.length() - STEP_INDEX).c_str(), ";", &lines);
170    for (int i = 0; i < lines.size(); ++i) {
171        out->appendf("    %s\n", lines[i].c_str());
172    }
173}
174
175std::string GrResourceCache::cacheInfo()
176{
177    auto fPurgeableQueueInfoStr = cacheInfoPurgeableQueue();
178    auto fNonpurgeableResourcesInfoStr = cacheInfoNoPurgeableQueue();
179    size_t fRealAllocBytes = cacheInfoRealAllocSize();
180    auto fRealAllocInfoStr = cacheInfoRealAllocQueue();
181    auto fRealBytesOfPidInfoStr = realBytesOfPid();
182
183    std::ostringstream cacheInfoStream;
184    cacheInfoStream << "[fPurgeableQueueInfoStr.count : " << fPurgeableQueue.count()
185        << "; fNonpurgeableResources.count : " << fNonpurgeableResources.count()
186        << "; fBudgetedBytes : " << fBudgetedBytes
187        << "(" << static_cast<size_t>(fBudgetedBytes / MB)
188        << " MB) / " << fMaxBytes
189        << "(" << static_cast<size_t>(fMaxBytes / MB)
190        << " MB); fBudgetedCount : " << fBudgetedCount
191        << "; fBytes : " << fBytes
192        << "(" << static_cast<size_t>(fBytes / MB)
193        << " MB); fPurgeableBytes : " << fPurgeableBytes
194        << "(" << static_cast<size_t>(fPurgeableBytes / MB)
195        << " MB); fAllocImageBytes : " << fAllocImageBytes
196        << "(" << static_cast<size_t>(fAllocImageBytes / MB)
197        << " MB); fAllocBufferBytes : " << fAllocBufferBytes
198        << "(" << static_cast<size_t>(fAllocBufferBytes / MB)
199        << " MB); fRealAllocBytes : " << fRealAllocBytes
200        << "(" << static_cast<size_t>(fRealAllocBytes / MB)
201        << " MB); fTimestamp : " << fTimestamp
202        << "; " << fPurgeableQueueInfoStr << "; " << fNonpurgeableResourcesInfoStr
203        << "; " << fRealAllocInfoStr << "; " << fRealBytesOfPidInfoStr;
204    return cacheInfoStream.str();
205}
206
207#ifdef SKIA_OHOS_FOR_OHOS_TRACE
208void GrResourceCache::traceBeforePurgeUnlockRes(const std::string& method, SimpleCacheInfo& simpleCacheInfo)
209{
210    if (purgeUnlocakedResTraceEnabled_) {
211        StartTrace(HITRACE_TAG_GRAPHIC_AGP, method + " begin cacheInfo = " + cacheInfo());
212    } else {
213        simpleCacheInfo.fPurgeableQueueCount = fPurgeableQueue.count();
214        simpleCacheInfo.fNonpurgeableResourcesCount = fNonpurgeableResources.count();
215        simpleCacheInfo.fPurgeableBytes = fPurgeableBytes;
216        simpleCacheInfo.fBudgetedCount = fBudgetedCount;
217        simpleCacheInfo.fBudgetedBytes = fBudgetedBytes;
218        simpleCacheInfo.fAllocImageBytes = fAllocImageBytes;
219        simpleCacheInfo.fAllocBufferBytes = fAllocBufferBytes;
220    }
221}
222
223void GrResourceCache::traceAfterPurgeUnlockRes(const std::string& method, const SimpleCacheInfo& simpleCacheInfo)
224{
225    if (purgeUnlocakedResTraceEnabled_) {
226        HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s", method.c_str(), cacheInfo().c_str());
227        FinishTrace(HITRACE_TAG_GRAPHIC_AGP);
228    } else {
229        HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s",
230            method.c_str(), cacheInfoComparison(simpleCacheInfo).c_str());
231    }
232}
233
234std::string GrResourceCache::cacheInfoComparison(const SimpleCacheInfo& simpleCacheInfo)
235{
236    std::ostringstream cacheInfoComparison;
237    cacheInfoComparison << "PurgeableCount : " << simpleCacheInfo.fPurgeableQueueCount
238        << " / " << fPurgeableQueue.count()
239        << "; NonpurgeableCount : " << simpleCacheInfo.fNonpurgeableResourcesCount
240        << " / " << fNonpurgeableResources.count()
241        << "; PurgeableBytes : " << simpleCacheInfo.fPurgeableBytes << " / " << fPurgeableBytes
242        << "; BudgetedCount : " << simpleCacheInfo.fBudgetedCount << " / " << fBudgetedCount
243        << "; BudgetedBytes : " << simpleCacheInfo.fBudgetedBytes << " / " << fBudgetedBytes
244        << "; AllocImageBytes : " << simpleCacheInfo.fAllocImageBytes << " / " << fAllocImageBytes
245        << "; AllocBufferBytes : " << simpleCacheInfo.fAllocBufferBytes << " / " << fAllocBufferBytes;
246    return cacheInfoComparison.str();
247}
248#endif // SKIA_OHOS_FOR_OHOS_TRACE
249
250std::string GrResourceCache::cacheInfoPurgeableQueue()
251{
252    std::map<uint32_t, int> purgSizeInfoWid;
253    std::map<uint32_t, int> purgCountInfoWid;
254    std::map<uint32_t, std::string> purgNameInfoWid;
255    std::map<uint32_t, int> purgPidInfoWid;
256
257    std::map<uint32_t, int> purgSizeInfoPid;
258    std::map<uint32_t, int> purgCountInfoPid;
259    std::map<uint32_t, std::string> purgNameInfoPid;
260
261    std::map<uint32_t, int> purgSizeInfoFid;
262    std::map<uint32_t, int> purgCountInfoFid;
263    std::map<uint32_t, std::string> purgNameInfoFid;
264
265    int purgCountUnknown = 0;
266    int purgSizeUnknown = 0;
267
268    for (int i = 0; i < fPurgeableQueue.count(); i++) {
269        auto resource = fPurgeableQueue.at(i);
270        auto resourceTag = resource->getResourceTag();
271        if (resourceTag.fWid != 0) {
272            updatePurgeableWidMap(resource, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
273        } else if (resourceTag.fPid != 0) {
274            updatePurgeablePidMap(resource, purgNameInfoPid, purgSizeInfoPid, purgCountInfoPid);
275        } else if (resourceTag.fFid != 0) {
276            updatePurgeableFidMap(resource, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
277        } else {
278            purgCountUnknown++;
279            purgSizeUnknown += resource->gpuMemorySize();
280        }
281    }
282
283    std::string infoStr;
284    if (purgSizeInfoWid.size() > 0) {
285        infoStr += ";PurgeableInfo_Node:[";
286        updatePurgeableWidInfo(infoStr, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
287    }
288    if (purgSizeInfoPid.size() > 0) {
289        infoStr += ";PurgeableInfo_Pid:[";
290        updatePurgeablePidInfo(infoStr, purgNameInfoWid, purgSizeInfoWid, purgCountInfoWid);
291    }
292    if (purgSizeInfoFid.size() > 0) {
293        infoStr += ";PurgeableInfo_Fid:[";
294        updatePurgeableFidInfo(infoStr, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
295    }
296    updatePurgeableUnknownInfo(infoStr, ";PurgeableInfo_Unknown:", purgCountUnknown, purgSizeUnknown);
297    return infoStr;
298}
299
300std::string GrResourceCache::cacheInfoNoPurgeableQueue()
301{
302    std::map<uint32_t, int> noPurgSizeInfoWid;
303    std::map<uint32_t, int> noPurgCountInfoWid;
304    std::map<uint32_t, std::string> noPurgNameInfoWid;
305    std::map<uint32_t, int> noPurgPidInfoWid;
306
307    std::map<uint32_t, int> noPurgSizeInfoPid;
308    std::map<uint32_t, int> noPurgCountInfoPid;
309    std::map<uint32_t, std::string> noPurgNameInfoPid;
310
311    std::map<uint32_t, int> noPurgSizeInfoFid;
312    std::map<uint32_t, int> noPurgCountInfoFid;
313    std::map<uint32_t, std::string> noPurgNameInfoFid;
314
315    int noPurgCountUnknown = 0;
316    int noPurgSizeUnknown = 0;
317
318    for (int i = 0; i < fNonpurgeableResources.count(); i++) {
319        auto resource = fNonpurgeableResources[i];
320        if (resource == nullptr) {
321            continue;
322        }
323        auto resourceTag = resource->getResourceTag();
324        if (resourceTag.fWid != 0) {
325            updatePurgeableWidMap(resource, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
326        } else if (resourceTag.fPid != 0) {
327            updatePurgeablePidMap(resource, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
328        } else if (resourceTag.fFid != 0) {
329            updatePurgeableFidMap(resource, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
330        } else {
331            noPurgCountUnknown++;
332            noPurgSizeUnknown += resource->gpuMemorySize();
333        }
334    }
335
336    std::string infoStr;
337    if (noPurgSizeInfoWid.size() > 0) {
338        infoStr += ";NonPurgeableInfo_Node:[";
339        updatePurgeableWidInfo(infoStr, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
340    }
341    if (noPurgSizeInfoPid.size() > 0) {
342        infoStr += ";NonPurgeableInfo_Pid:[";
343        updatePurgeablePidInfo(infoStr, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
344    }
345    if (noPurgSizeInfoFid.size() > 0) {
346        infoStr += ";NonPurgeableInfo_Fid:[";
347        updatePurgeableFidInfo(infoStr, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
348    }
349    updatePurgeableUnknownInfo(infoStr, ";NonPurgeableInfo_Unknown:", noPurgCountUnknown, noPurgSizeUnknown);
350    return infoStr;
351}
352
353size_t GrResourceCache::cacheInfoRealAllocSize()
354{
355    size_t realAllocImageSize = 0;
356    for (int i = 0; i < fPurgeableQueue.count(); i++) {
357        auto resource = fPurgeableQueue.at(i);
358        if (resource == nullptr || !resource->isRealAlloc()) {
359            continue;
360        }
361        realAllocImageSize += resource->getRealAllocSize();
362    }
363    for (int i = 0; i < fNonpurgeableResources.count(); i++) {
364        auto resource = fNonpurgeableResources[i];
365        if (resource == nullptr || !resource->isRealAlloc()) {
366            continue;
367        }
368        realAllocImageSize += resource->getRealAllocSize();
369    }
370    return realAllocImageSize;
371}
372
373std::string GrResourceCache::cacheInfoRealAllocQueue()
374{
375    std::map<uint32_t, std::string> realAllocNameInfoWid;
376    std::map<uint32_t, int> realAllocSizeInfoWid;
377    std::map<uint32_t, int> realAllocPidInfoWid;
378    std::map<uint32_t, int> realAllocCountInfoWid;
379
380    std::map<uint32_t, std::string> realAllocNameInfoPid;
381    std::map<uint32_t, int> realAllocSizeInfoPid;
382    std::map<uint32_t, int> realAllocCountInfoPid;
383
384    std::map<uint32_t, std::string> realAllocNameInfoFid;
385    std::map<uint32_t, int> realAllocSizeInfoFid;
386    std::map<uint32_t, int> realAllocCountInfoFid;
387
388    int realAllocCountUnknown = 0;
389    int realAllocSizeUnknown = 0;
390
391    for (int i = 0; i < fNonpurgeableResources.count(); i++) {
392        auto resource = fNonpurgeableResources[i];
393        if (resource == nullptr || !resource->isRealAlloc()) {
394            continue;
395        }
396        auto resourceTag = resource->getResourceTag();
397        if (resourceTag.fWid != 0) {
398            updateRealAllocWidMap(resource, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
399        } else if (resourceTag.fPid != 0) {
400            updateRealAllocPidMap(resource, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
401        } else if (resourceTag.fFid != 0) {
402            updateRealAllocFidMap(resource, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
403        } else {
404            realAllocCountUnknown++;
405            realAllocSizeUnknown += resource->getRealAllocSize();
406        }
407    }
408
409    for (int i = 0; i < fPurgeableQueue.count(); i++) {
410        auto resource = fPurgeableQueue.at(i);
411        if (resource == nullptr || !resource->isRealAlloc()) {
412            continue;
413        }
414        auto resourceTag = resource->getResourceTag();
415        if (resourceTag.fWid != 0) {
416            updateRealAllocWidMap(resource, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
417        } else if (resourceTag.fPid != 0) {
418            updateRealAllocPidMap(resource, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
419        } else if (resourceTag.fFid != 0) {
420            updateRealAllocFidMap(resource, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
421        } else {
422            realAllocCountUnknown++;
423            realAllocSizeUnknown += resource->getRealAllocSize();
424        }
425    }
426
427    std::string infoStr;
428    if (realAllocSizeInfoWid.size() > 0) {
429        infoStr += ";RealAllocInfo_Node:[";
430        updatePurgeableWidInfo(infoStr, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
431    }
432    if (realAllocSizeInfoPid.size() > 0) {
433        infoStr += ";RealAllocInfo_Pid:[";
434        updatePurgeablePidInfo(infoStr, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
435    }
436    if (realAllocSizeInfoFid.size() > 0) {
437        infoStr += ";RealAllocInfo_Fid:[";
438        updatePurgeableFidInfo(infoStr, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
439    }
440    updatePurgeableUnknownInfo(infoStr, ";RealAllocInfo_Unknown:", realAllocCountUnknown, realAllocSizeUnknown);
441    return infoStr;
442}
443
444std::string GrResourceCache::realBytesOfPid()
445{
446    std::string infoStr;
447    infoStr += ";fBytesOfPid : [";
448    if (fBytesOfPid.size() > 0) {
449        for (auto it = fBytesOfPid.begin(); it != fBytesOfPid.end(); it++) {
450            infoStr += std::to_string(it->first) + ":" + std::to_string(it->second) + ", ";
451        }
452    }
453    infoStr += "]";
454    return infoStr;
455}
456
457void GrResourceCache::updatePurgeableWidMap(GrGpuResource* resource,
458                                            std::map<uint32_t, std::string>& nameInfoWid,
459                                            std::map<uint32_t, int>& sizeInfoWid,
460                                            std::map<uint32_t, int>& pidInfoWid,
461                                            std::map<uint32_t, int>& countInfoWid)
462{
463    auto resourceTag = resource->getResourceTag();
464    auto it = sizeInfoWid.find(resourceTag.fWid);
465    if (it != sizeInfoWid.end()) {
466        sizeInfoWid[resourceTag.fWid] = it->second + resource->gpuMemorySize();
467        countInfoWid[resourceTag.fWid]++;
468    } else {
469        sizeInfoWid[resourceTag.fWid] = resource->gpuMemorySize();
470        nameInfoWid[resourceTag.fWid] = resourceTag.fName;
471        pidInfoWid[resourceTag.fWid] = resourceTag.fPid;
472        countInfoWid[resourceTag.fWid] = 1;
473    }
474}
475
476void GrResourceCache::updatePurgeablePidMap(GrGpuResource* resource,
477                                            std::map<uint32_t, std::string>& nameInfoPid,
478                                            std::map<uint32_t, int>& sizeInfoPid,
479                                            std::map<uint32_t, int>& countInfoPid)
480{
481    auto resourceTag = resource->getResourceTag();
482    auto it = sizeInfoPid.find(resourceTag.fPid);
483    if (it != sizeInfoPid.end()) {
484        sizeInfoPid[resourceTag.fPid] = it->second + resource->gpuMemorySize();
485        countInfoPid[resourceTag.fPid]++;
486    } else {
487        sizeInfoPid[resourceTag.fPid] = resource->gpuMemorySize();
488        nameInfoPid[resourceTag.fPid] = resourceTag.fName;
489        countInfoPid[resourceTag.fPid] = 1;
490    }
491}
492
493void GrResourceCache::updatePurgeableFidMap(GrGpuResource* resource,
494                                            std::map<uint32_t, std::string>& nameInfoFid,
495                                            std::map<uint32_t, int>& sizeInfoFid,
496                                            std::map<uint32_t, int>& countInfoFid)
497{
498    auto resourceTag = resource->getResourceTag();
499    auto it = sizeInfoFid.find(resourceTag.fFid);
500    if (it != sizeInfoFid.end()) {
501        sizeInfoFid[resourceTag.fFid] = it->second + resource->gpuMemorySize();
502        countInfoFid[resourceTag.fFid]++;
503    } else {
504        sizeInfoFid[resourceTag.fFid] = resource->gpuMemorySize();
505        nameInfoFid[resourceTag.fFid] = resourceTag.fName;
506        countInfoFid[resourceTag.fFid] = 1;
507    }
508}
509
510void GrResourceCache::updateRealAllocWidMap(GrGpuResource* resource,
511                                            std::map<uint32_t, std::string>& nameInfoWid,
512                                            std::map<uint32_t, int>& sizeInfoWid,
513                                            std::map<uint32_t, int>& pidInfoWid,
514                                            std::map<uint32_t, int>& countInfoWid)
515{
516    size_t size = resource->getRealAllocSize();
517    auto resourceTag = resource->getResourceTag();
518    auto it = sizeInfoWid.find(resourceTag.fWid);
519    if (it != sizeInfoWid.end()) {
520        sizeInfoWid[resourceTag.fWid] = it->second + size;
521        countInfoWid[resourceTag.fWid]++;
522    } else {
523        sizeInfoWid[resourceTag.fWid] = size;
524        nameInfoWid[resourceTag.fWid] = resourceTag.fName;
525        pidInfoWid[resourceTag.fWid] = resourceTag.fPid;
526        countInfoWid[resourceTag.fWid] = 1;
527    }
528}
529
530void GrResourceCache::updateRealAllocPidMap(GrGpuResource* resource,
531                                            std::map<uint32_t, std::string>& nameInfoPid,
532                                            std::map<uint32_t, int>& sizeInfoPid,
533                                            std::map<uint32_t, int>& countInfoPid)
534{
535    size_t size = resource->getRealAllocSize();
536    auto resourceTag = resource->getResourceTag();
537    auto it = sizeInfoPid.find(resourceTag.fPid);
538    if (it != sizeInfoPid.end()) {
539        sizeInfoPid[resourceTag.fPid] = it->second + size;
540        countInfoPid[resourceTag.fPid]++;
541    } else {
542        sizeInfoPid[resourceTag.fPid] = size;
543        nameInfoPid[resourceTag.fPid] = resourceTag.fName;
544        countInfoPid[resourceTag.fPid] = 1;
545    }
546}
547
548void GrResourceCache::updateRealAllocFidMap(GrGpuResource* resource,
549                                            std::map<uint32_t, std::string>& nameInfoFid,
550                                            std::map<uint32_t, int>& sizeInfoFid,
551                                            std::map<uint32_t, int>& countInfoFid)
552{
553    size_t size = resource->getRealAllocSize();
554    auto resourceTag = resource->getResourceTag();
555    auto it = sizeInfoFid.find(resourceTag.fFid);
556    if (it != sizeInfoFid.end()) {
557        sizeInfoFid[resourceTag.fFid] = it->second + size;
558        countInfoFid[resourceTag.fFid]++;
559    } else {
560        sizeInfoFid[resourceTag.fFid] = size;
561        nameInfoFid[resourceTag.fFid] = resourceTag.fName;
562        countInfoFid[resourceTag.fFid] = 1;
563    }
564}
565
566void GrResourceCache::updatePurgeableWidInfo(std::string& infoStr,
567                                             std::map<uint32_t, std::string>& nameInfoWid,
568                                             std::map<uint32_t, int>& sizeInfoWid,
569                                             std::map<uint32_t, int>& pidInfoWid,
570                                             std::map<uint32_t, int>& countInfoWid)
571{
572    for (auto it = sizeInfoWid.begin(); it != sizeInfoWid.end(); it++) {
573        infoStr += "[" + nameInfoWid[it->first] +
574            ",pid=" + std::to_string(pidInfoWid[it->first]) +
575            ",NodeId=" + std::to_string(it->first & 0xFFFFFFFF) +
576            ",count=" + std::to_string(countInfoWid[it->first]) +
577            ",size=" + std::to_string(it->second) +
578            "(" + std::to_string(it->second / MB) + " MB)],";
579    }
580    infoStr += ']';
581}
582
583void GrResourceCache::updatePurgeablePidInfo(std::string& infoStr,
584                                             std::map<uint32_t, std::string>& nameInfoPid,
585                                             std::map<uint32_t, int>& sizeInfoPid,
586                                             std::map<uint32_t, int>& countInfoPid)
587{
588    for (auto it = sizeInfoPid.begin(); it != sizeInfoPid.end(); it++) {
589        infoStr += "[" + nameInfoPid[it->first] +
590            ",pid=" + std::to_string(it->first) +
591            ",count=" + std::to_string(countInfoPid[it->first]) +
592            ",size=" + std::to_string(it->second) +
593            "(" + std::to_string(it->second / MB) + " MB)],";
594    }
595    infoStr += ']';
596}
597
598void GrResourceCache::updatePurgeableFidInfo(std::string& infoStr,
599                                             std::map<uint32_t, std::string>& nameInfoFid,
600                                             std::map<uint32_t, int>& sizeInfoFid,
601                                             std::map<uint32_t, int>& countInfoFid)
602{
603    for (auto it = sizeInfoFid.begin(); it != sizeInfoFid.end(); it++) {
604        infoStr += "[" + nameInfoFid[it->first] +
605            ",typeid=" + std::to_string(it->first) +
606            ",count=" + std::to_string(countInfoFid[it->first]) +
607            ",size=" + std::to_string(it->second) +
608            "(" + std::to_string(it->second / MB) + " MB)],";
609    }
610    infoStr += ']';
611}
612
613void GrResourceCache::updatePurgeableUnknownInfo(
614    std::string& infoStr, const std::string& unknownPrefix, const int countUnknown, const int sizeUnknown)
615{
616    if (countUnknown > 0) {
617        infoStr += unknownPrefix +
618            "[count=" + std::to_string(countUnknown) +
619            ",size=" + std::to_string(sizeUnknown) +
620            "(" + std::to_string(sizeUnknown / MB) + "MB)]";
621    }
622}
623#endif
624
625void GrResourceCache::insertResource(GrGpuResource* resource)
626{
627    ASSERT_SINGLE_OWNER
628    SkASSERT(resource);
629    SkASSERT(!this->isInCache(resource));
630    SkASSERT(!resource->wasDestroyed());
631    SkASSERT(!resource->resourcePriv().isPurgeable());
632    if (!resource || this->isInCache(resource) || resource->wasDestroyed() || resource->resourcePriv().isPurgeable()) {
633        SkDebugf("OHOS GrResourceCache::insertResource resource is invalid!!!");
634        return;
635    }
636    // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
637    // up iterating over all the resources that already have timestamps.
638    resource->cacheAccess().setTimestamp(this->getNextTimestamp());
639
640    this->addToNonpurgeableArray(resource);
641
642    size_t size = resource->gpuMemorySize();
643    SkDEBUGCODE(++fCount;)
644    fBytes += size;
645
646    // OH ISSUE: memory count
647    auto pid = resource->getResourceTag().fPid;
648    if (pid && resource->isRealAlloc()) {
649        auto& pidSize = fBytesOfPid[pid];
650        pidSize += resource->getRealAllocSize();
651        fUpdatedBytesOfPid[pid] = pidSize;
652        if (pidSize >= fMemoryControl_ && fExitedPid_.find(pid) == fExitedPid_.end() && fMemoryOverflowCallback_) {
653            fMemoryOverflowCallback_(pid, pidSize, true);
654            fExitedPid_.insert(pid);
655            SkDebugf("OHOS resource overflow! pid[%{public}d], size[%{public}zu]", pid, pidSize);
656#ifdef SKIA_OHOS_FOR_OHOS_TRACE
657            HITRACE_OHOS_NAME_FMT_ALWAYS("OHOS gpu resource overflow: pid(%u), size:(%u)", pid, pidSize);
658#endif
659        }
660    }
661
662#if GR_CACHE_STATS
663    fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
664    fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
665#endif
666    if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
667        ++fBudgetedCount;
668        fBudgetedBytes += size;
669        TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
670                       fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
671#if GR_CACHE_STATS
672        fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
673        fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
674#endif
675    }
676    SkASSERT(!resource->cacheAccess().isUsableAsScratch());
677#ifdef SKIA_OHOS_FOR_OHOS_TRACE
678    if (fBudgetedBytes >= fMaxBytes) {
679        HITRACE_OHOS_NAME_FMT_ALWAYS("cache over fBudgetedBytes:(%u), fMaxBytes:(%u)", fBudgetedBytes, fMaxBytes);
680#ifdef SKIA_DFX_FOR_OHOS
681        SimpleCacheInfo simpleCacheInfo;
682        traceBeforePurgeUnlockRes("insertResource", simpleCacheInfo);
683#endif
684        this->purgeAsNeeded();
685#ifdef SKIA_DFX_FOR_OHOS
686        traceAfterPurgeUnlockRes("insertResource", simpleCacheInfo);
687#endif
688    } else {
689        this->purgeAsNeeded();
690    }
691#else
692    this->purgeAsNeeded();
693#endif
694}
695
696void GrResourceCache::removeResource(GrGpuResource* resource) {
697    ASSERT_SINGLE_OWNER
698    this->validate();
699    SkASSERT(this->isInCache(resource));
700
701    size_t size = resource->gpuMemorySize();
702    if (resource->resourcePriv().isPurgeable() && this->isInPurgeableCache(resource)) {
703        fPurgeableQueue.remove(resource);
704        fPurgeableBytes -= size;
705    } else if (this->isInNonpurgeableCache(resource)) {
706        this->removeFromNonpurgeableArray(resource);
707    }
708
709    SkDEBUGCODE(--fCount;)
710    fBytes -= size;
711
712    // OH ISSUE: memory count
713    auto pid = resource->getResourceTag().fPid;
714    if (pid && resource->isRealAlloc()) {
715        auto& pidSize = fBytesOfPid[pid];
716        pidSize -= resource->getRealAllocSize();
717        fUpdatedBytesOfPid[pid] = pidSize;
718        if (pidSize == 0) {
719            fBytesOfPid.erase(pid);
720        }
721    }
722
723    if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
724        --fBudgetedCount;
725        fBudgetedBytes -= size;
726        TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
727                       fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
728    }
729
730    if (resource->cacheAccess().isUsableAsScratch()) {
731        fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
732    }
733    if (resource->getUniqueKey().isValid()) {
734        fUniqueHash.remove(resource->getUniqueKey());
735    }
736    this->validate();
737}
738
739void GrResourceCache::abandonAll() {
740    AutoValidate av(this);
741
742    // We need to make sure to free any resources that were waiting on a free message but never
743    // received one.
744    fTexturesAwaitingUnref.reset();
745
746    while (fNonpurgeableResources.count()) {
747        GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
748        SkASSERT(!back->wasDestroyed());
749        back->cacheAccess().abandon();
750    }
751
752    while (fPurgeableQueue.count()) {
753        GrGpuResource* top = fPurgeableQueue.peek();
754        SkASSERT(!top->wasDestroyed());
755        top->cacheAccess().abandon();
756    }
757
758    fThreadSafeCache->dropAllRefs();
759
760    SkASSERT(!fScratchMap.count());
761    SkASSERT(!fUniqueHash.count());
762    SkASSERT(!fCount);
763    SkASSERT(!this->getResourceCount());
764    SkASSERT(!fBytes);
765    SkASSERT(!fBudgetedCount);
766    SkASSERT(!fBudgetedBytes);
767    SkASSERT(!fPurgeableBytes);
768    SkASSERT(!fTexturesAwaitingUnref.count());
769}
770
771void GrResourceCache::releaseAll() {
772    AutoValidate av(this);
773
774    fThreadSafeCache->dropAllRefs();
775
776    this->processFreedGpuResources();
777
778    // We need to make sure to free any resources that were waiting on a free message but never
779    // received one.
780    fTexturesAwaitingUnref.reset();
781
782    SkASSERT(fProxyProvider); // better have called setProxyProvider
783    SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
784
785    // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
786    // they also have a raw pointer back to this class (which is presumably going away)!
787    fProxyProvider->removeAllUniqueKeys();
788
789    while (fNonpurgeableResources.count()) {
790        GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
791        SkASSERT(!back->wasDestroyed());
792        back->cacheAccess().release();
793    }
794
795    while (fPurgeableQueue.count()) {
796        GrGpuResource* top = fPurgeableQueue.peek();
797        SkASSERT(!top->wasDestroyed());
798        top->cacheAccess().release();
799    }
800
801    SkASSERT(!fScratchMap.count());
802    SkASSERT(!fUniqueHash.count());
803    SkASSERT(!fCount);
804    SkASSERT(!this->getResourceCount());
805    SkASSERT(!fBytes);
806    SkASSERT(!fBudgetedCount);
807    SkASSERT(!fBudgetedBytes);
808    SkASSERT(!fPurgeableBytes);
809    SkASSERT(!fTexturesAwaitingUnref.count());
810}
811
812void GrResourceCache::releaseByTag(const GrGpuResourceTag& tag) {
813    AutoValidate av(this);
814    this->processFreedGpuResources();
815    SkASSERT(fProxyProvider); // better have called setProxyProvider
816    std::vector<GrGpuResource*> recycleVector;
817    for (int i = 0; i < fNonpurgeableResources.count(); i++) {
818        GrGpuResource* resource = fNonpurgeableResources[i];
819        if (tag.filter(resource->getResourceTag())) {
820            recycleVector.emplace_back(resource);
821            if (resource->getUniqueKey().isValid()) {
822                fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
823                    GrProxyProvider::InvalidateGPUResource::kNo);
824            }
825        }
826    }
827
828    for (int i = 0; i < fPurgeableQueue.count(); i++) {
829        GrGpuResource* resource = fPurgeableQueue.at(i);
830        if (tag.filter(resource->getResourceTag())) {
831            recycleVector.emplace_back(resource);
832            if (resource->getUniqueKey().isValid()) {
833                fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
834                    GrProxyProvider::InvalidateGPUResource::kNo);
835            }
836        }
837    }
838
839    for (auto resource : recycleVector) {
840        SkASSERT(!resource->wasDestroyed());
841        resource->cacheAccess().release();
842    }
843}
844
845void GrResourceCache::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
846    if (tag.isGrTagValid()) {
847        grResourceTagCacheStack.push(tag);
848        return;
849    }
850    if (!grResourceTagCacheStack.empty()) {
851        grResourceTagCacheStack.pop();
852    }
853}
854
855void GrResourceCache::popGrResourceTag()
856{
857    if (!grResourceTagCacheStack.empty()) {
858        grResourceTagCacheStack.pop();
859    }
860}
861
862GrGpuResourceTag GrResourceCache::getCurrentGrResourceTag() const {
863    if (grResourceTagCacheStack.empty()) {
864        return{};
865    }
866    return grResourceTagCacheStack.top();
867}
868
869std::set<GrGpuResourceTag> GrResourceCache::getAllGrGpuResourceTags() const {
870    std::set<GrGpuResourceTag> result;
871    for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
872        auto tag = fNonpurgeableResources[i]->getResourceTag();
873        result.insert(tag);
874    }
875    return result;
876}
877
878// OH ISSUE: get the memory information of the updated pid.
879void GrResourceCache::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
880{
881    fUpdatedBytesOfPid.swap(out);
882}
883
884// OH ISSUE: init gpu memory limit.
885void GrResourceCache::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
886{
887    if (fMemoryOverflowCallback_ == nullptr) {
888        fMemoryOverflowCallback_ = callback;
889        fMemoryControl_ = size;
890    }
891}
892
893// OH ISSUE: check whether the PID is abnormal.
894bool GrResourceCache::isPidAbnormal() const
895{
896    return fExitedPid_.find(getCurrentGrResourceTag().fPid) != fExitedPid_.end();
897}
898
899// OH ISSUE: change the fbyte when the resource tag changes.
900void GrResourceCache::changeByteOfPid(int32_t beforePid, int32_t afterPid, size_t bytes)
901{
902    if (beforePid) {
903        auto& pidSize = fBytesOfPid[beforePid];
904        pidSize -= bytes;
905        fUpdatedBytesOfPid[beforePid] = pidSize;
906        if (pidSize == 0) {
907            fBytesOfPid.erase(beforePid);
908        }
909    }
910    if (afterPid) {
911        auto& size = fBytesOfPid[afterPid];
912        size += bytes;
913        fUpdatedBytesOfPid[afterPid] = size;
914    }
915}
916
917void GrResourceCache::refResource(GrGpuResource* resource) {
918    SkASSERT(resource);
919    SkASSERT(resource->getContext()->priv().getResourceCache() == this);
920    if (resource->cacheAccess().hasRef()) {
921        resource->ref();
922    } else {
923        this->refAndMakeResourceMRU(resource);
924    }
925    this->validate();
926}
927
928class GrResourceCache::AvailableForScratchUse {
929public:
930    AvailableForScratchUse() { }
931
932    bool operator()(const GrGpuResource* resource) const {
933        // Everything that is in the scratch map should be usable as a
934        // scratch resource.
935        return true;
936    }
937};
938
939GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
940    SkASSERT(scratchKey.isValid());
941
942    GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
943    if (resource) {
944        fScratchMap.remove(scratchKey, resource);
945        if (!this->isInCache(resource)) {
946            SkDebugf("OHOS GrResourceCache::findAndRefScratchResource not in cache, return!!!");
947            return nullptr;
948        }
949        this->refAndMakeResourceMRU(resource);
950        this->validate();
951    }
952    return resource;
953}
954
955void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
956    ASSERT_SINGLE_OWNER
957    SkASSERT(resource->resourcePriv().getScratchKey().isValid());
958    if (resource->cacheAccess().isUsableAsScratch()) {
959        fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
960    }
961}
962
963void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
964    ASSERT_SINGLE_OWNER
965    // Someone has a ref to this resource in order to have removed the key. When the ref count
966    // reaches zero we will get a ref cnt notification and figure out what to do with it.
967    if (resource->getUniqueKey().isValid()) {
968        SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
969        fUniqueHash.remove(resource->getUniqueKey());
970    }
971    resource->cacheAccess().removeUniqueKey();
972    if (resource->cacheAccess().isUsableAsScratch()) {
973        fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
974    }
975
976    // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
977    // require purging. However, the resource must be ref'ed to get here and therefore can't
978    // be purgeable. We'll purge it when the refs reach zero.
979    SkASSERT(!resource->resourcePriv().isPurgeable());
980    this->validate();
981}
982
983void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
984    ASSERT_SINGLE_OWNER
985    SkASSERT(resource);
986    SkASSERT(this->isInCache(resource));
987
988    // If another resource has the new key, remove its key then install the key on this resource.
989    if (newKey.isValid()) {
990        if (GrGpuResource* old = fUniqueHash.find(newKey)) {
991            // If the old resource using the key is purgeable and is unreachable, then remove it.
992            if (!old->resourcePriv().getScratchKey().isValid() &&
993                old->resourcePriv().isPurgeable()) {
994                old->cacheAccess().release();
995            } else {
996                // removeUniqueKey expects an external owner of the resource.
997                this->removeUniqueKey(sk_ref_sp(old).get());
998            }
999        }
1000        SkASSERT(nullptr == fUniqueHash.find(newKey));
1001
1002        // Remove the entry for this resource if it already has a unique key.
1003        if (resource->getUniqueKey().isValid()) {
1004            SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
1005            fUniqueHash.remove(resource->getUniqueKey());
1006            SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
1007        } else {
1008            // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
1009            // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
1010            // unique key until after this check.
1011            if (resource->cacheAccess().isUsableAsScratch()) {
1012                fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
1013            }
1014        }
1015
1016        resource->cacheAccess().setUniqueKey(newKey);
1017        fUniqueHash.add(resource);
1018    } else {
1019        this->removeUniqueKey(resource);
1020    }
1021
1022    this->validate();
1023}
1024
1025void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
1026    ASSERT_SINGLE_OWNER
1027    SkASSERT(resource);
1028    SkASSERT(this->isInCache(resource));
1029
1030    if (resource->resourcePriv().isPurgeable()) {
1031        // It's about to become unpurgeable.
1032        if (this->isInPurgeableCache(resource)) {
1033            fPurgeableBytes -= resource->gpuMemorySize();
1034            fPurgeableQueue.remove(resource);
1035        }
1036        if (!this->isInNonpurgeableCache(resource)) {
1037            this->addToNonpurgeableArray(resource);
1038        } else {
1039            SkDebugf("OHOS resource in isInNonpurgeableCache, do not add again!");
1040        }
1041    } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
1042               resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1043        SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
1044        fNumBudgetedResourcesFlushWillMakePurgeable--;
1045    }
1046    resource->cacheAccess().ref();
1047
1048    resource->cacheAccess().setTimestamp(this->getNextTimestamp());
1049    this->validate();
1050}
1051
1052void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
1053                                               GrGpuResource::LastRemovedRef removedRef) {
1054    ASSERT_SINGLE_OWNER
1055    SkASSERT(resource);
1056    SkASSERT(!resource->wasDestroyed());
1057    SkASSERT(this->isInCache(resource));
1058    // This resource should always be in the nonpurgeable array when this function is called. It
1059    // will be moved to the queue if it is newly purgeable.
1060    SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
1061    if (!resource || resource->wasDestroyed() || this->isInPurgeableCache(resource) ||
1062        !this->isInNonpurgeableCache(resource)) {
1063        SkDebugf("OHOS GrResourceCache::notifyARefCntReachedZero return!");
1064        return;
1065    }
1066    if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
1067        if (resource->cacheAccess().isUsableAsScratch()) {
1068            fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
1069        }
1070    }
1071
1072    if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1073        this->validate();
1074        return;
1075    }
1076
1077#ifdef SK_DEBUG
1078    // When the timestamp overflows validate() is called. validate() checks that resources in
1079    // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
1080    // the purgeable queue happens just below in this function. So we mark it as an exception.
1081    if (resource->resourcePriv().isPurgeable()) {
1082        fNewlyPurgeableResourceForValidation = resource;
1083    }
1084#endif
1085    resource->cacheAccess().setTimestamp(this->getNextTimestamp());
1086    SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
1087
1088    if (!resource->resourcePriv().isPurgeable() &&
1089        resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1090        ++fNumBudgetedResourcesFlushWillMakePurgeable;
1091    }
1092
1093    if (!resource->resourcePriv().isPurgeable()) {
1094        this->validate();
1095        return;
1096    }
1097
1098    this->removeFromNonpurgeableArray(resource);
1099    fPurgeableQueue.insert(resource);
1100    resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
1101    fPurgeableBytes += resource->gpuMemorySize();
1102
1103    bool hasUniqueKey = resource->getUniqueKey().isValid();
1104
1105    GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
1106
1107    if (budgetedType == GrBudgetedType::kBudgeted) {
1108        // Purge the resource immediately if we're over budget
1109        // Also purge if the resource has neither a valid scratch key nor a unique key.
1110        bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
1111        if (!this->overBudget() && hasKey) {
1112            return;
1113        }
1114    } else {
1115        // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
1116        // they can be reused again by the image connected to the unique key.
1117        if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
1118            return;
1119        }
1120        // Check whether this resource could still be used as a scratch resource.
1121        if (!resource->resourcePriv().refsWrappedObjects() &&
1122            resource->resourcePriv().getScratchKey().isValid()) {
1123            // We won't purge an existing resource to make room for this one.
1124            if (this->wouldFit(resource->gpuMemorySize())) {
1125                resource->resourcePriv().makeBudgeted();
1126                return;
1127            }
1128        }
1129    }
1130
1131    SkDEBUGCODE(int beforeCount = this->getResourceCount();)
1132    resource->cacheAccess().release();
1133    // We should at least free this resource, perhaps dependent resources as well.
1134    SkASSERT(this->getResourceCount() < beforeCount);
1135    this->validate();
1136}
1137
1138void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
1139    ASSERT_SINGLE_OWNER
1140    SkASSERT(resource);
1141    SkASSERT(this->isInCache(resource));
1142
1143    size_t size = resource->gpuMemorySize();
1144    // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
1145    // resource become purgeable. However, we should never allow that transition. Wrapped
1146    // resources are the only resources that can be in that state and they aren't allowed to
1147    // transition from one budgeted state to another.
1148    SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
1149    if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1150        ++fBudgetedCount;
1151        fBudgetedBytes += size;
1152#if GR_CACHE_STATS
1153        fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
1154        fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
1155#endif
1156        if (!resource->resourcePriv().isPurgeable() &&
1157            !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1158            ++fNumBudgetedResourcesFlushWillMakePurgeable;
1159        }
1160        if (resource->cacheAccess().isUsableAsScratch()) {
1161            fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
1162        }
1163        this->purgeAsNeeded();
1164    } else {
1165        SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
1166        --fBudgetedCount;
1167        fBudgetedBytes -= size;
1168        if (!resource->resourcePriv().isPurgeable() &&
1169            !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1170            --fNumBudgetedResourcesFlushWillMakePurgeable;
1171        }
1172        if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
1173            resource->resourcePriv().getScratchKey().isValid()) {
1174            fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
1175        }
1176    }
1177    SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
1178    TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
1179                   fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
1180
1181    this->validate();
1182}
1183
1184static constexpr int timeUnit = 1000;
1185
1186// OH ISSUE: allow access to release interface
1187bool GrResourceCache::allowToPurge(const std::function<bool(void)>& nextFrameHasArrived)
1188{
1189    if (!fEnabled) {
1190        return true;
1191    }
1192    if (fFrameInfo.duringFrame == 0) {
1193        if (nextFrameHasArrived && nextFrameHasArrived()) {
1194            return false;
1195        }
1196        return true;
1197    }
1198    if (fFrameInfo.frameCount != fLastFrameCount) { // the next frame arrives
1199        struct timespec startTime = {0, 0};
1200        if (clock_gettime(CLOCK_REALTIME, &startTime) == -1) {
1201            return true;
1202        }
1203        fStartTime = startTime.tv_sec * timeUnit * timeUnit + startTime.tv_nsec / timeUnit;
1204        fLastFrameCount = fFrameInfo.frameCount;
1205        return true;
1206    }
1207    struct timespec endTime = {0, 0};
1208    if (clock_gettime(CLOCK_REALTIME, &endTime) == -1) {
1209        return true;
1210    }
1211    if (((endTime.tv_sec * timeUnit * timeUnit + endTime.tv_nsec / timeUnit) - fStartTime) >= fOvertimeDuration) {
1212        return false;
1213    }
1214    return true;
1215}
1216
1217void GrResourceCache::purgeAsNeeded(const std::function<bool(void)>& nextFrameHasArrived) {
1218    SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
1219    fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
1220    if (invalidKeyMsgs.count()) {
1221        SkASSERT(fProxyProvider);
1222
1223        for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
1224            if (invalidKeyMsgs[i].inThreadSafeCache()) {
1225                fThreadSafeCache->remove(invalidKeyMsgs[i].key());
1226                SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
1227            } else {
1228                fProxyProvider->processInvalidUniqueKey(
1229                                                    invalidKeyMsgs[i].key(), nullptr,
1230                                                    GrProxyProvider::InvalidateGPUResource::kYes);
1231                SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
1232            }
1233        }
1234    }
1235
1236    this->processFreedGpuResources();
1237
1238    bool stillOverbudget = this->overBudget(nextFrameHasArrived);
1239    while (stillOverbudget && fPurgeableQueue.count() && this->allowToPurge(nextFrameHasArrived)) {
1240        GrGpuResource* resource = fPurgeableQueue.peek();
1241        if (!resource->resourcePriv().isPurgeable()) {
1242            SkDebugf("OHOS GrResourceCache::purgeAsNeeded() resource is nonPurgeable");
1243            continue;
1244        }
1245        SkASSERT(resource->resourcePriv().isPurgeable());
1246        resource->cacheAccess().release();
1247        stillOverbudget = this->overBudget(nextFrameHasArrived);
1248    }
1249
1250    if (stillOverbudget) {
1251        fThreadSafeCache->dropUniqueRefs(this);
1252
1253        stillOverbudget = this->overBudget(nextFrameHasArrived);
1254        while (stillOverbudget && fPurgeableQueue.count() && this->allowToPurge(nextFrameHasArrived)) {
1255            GrGpuResource* resource = fPurgeableQueue.peek();
1256            if (!resource->resourcePriv().isPurgeable()) {
1257                SkDebugf("OHOS GrResourceCache::purgeAsNeeded() resource is nonPurgeable after dropUniqueRefs");
1258                continue;
1259            }
1260            SkASSERT(resource->resourcePriv().isPurgeable());
1261            resource->cacheAccess().release();
1262            stillOverbudget = this->overBudget(nextFrameHasArrived);
1263        }
1264    }
1265
1266    this->validate();
1267}
1268
1269void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime,
1270                                             bool scratchResourcesOnly) {
1271#if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1272    SimpleCacheInfo simpleCacheInfo;
1273    traceBeforePurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1274#endif
1275    if (!scratchResourcesOnly) {
1276        if (purgeTime) {
1277            fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
1278        } else {
1279            fThreadSafeCache->dropUniqueRefs(nullptr);
1280        }
1281
1282        // We could disable maintaining the heap property here, but it would add a lot of
1283        // complexity. Moreover, this is rarely called.
1284        while (fPurgeableQueue.count()) {
1285            GrGpuResource* resource = fPurgeableQueue.peek();
1286
1287            const GrStdSteadyClock::time_point resourceTime =
1288                    resource->cacheAccess().timeWhenResourceBecamePurgeable();
1289            if (purgeTime && resourceTime >= *purgeTime) {
1290                // Resources were given both LRU timestamps and tagged with a frame number when
1291                // they first became purgeable. The LRU timestamp won't change again until the
1292                // resource is made non-purgeable again. So, at this point all the remaining
1293                // resources in the timestamp-sorted queue will have a frame number >= to this
1294                // one.
1295                break;
1296            }
1297
1298            SkASSERT(resource->resourcePriv().isPurgeable());
1299            resource->cacheAccess().release();
1300        }
1301    } else {
1302        // Early out if the very first item is too new to purge to avoid sorting the queue when
1303        // nothing will be deleted.
1304        if (purgeTime && fPurgeableQueue.count() &&
1305            fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
1306#if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1307            traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1308#endif
1309            return;
1310        }
1311
1312        // Sort the queue
1313        fPurgeableQueue.sort();
1314
1315        // Make a list of the scratch resources to delete
1316        SkTDArray<GrGpuResource*> scratchResources;
1317        for (int i = 0; i < fPurgeableQueue.count(); i++) {
1318            GrGpuResource* resource = fPurgeableQueue.at(i);
1319
1320            const GrStdSteadyClock::time_point resourceTime =
1321                    resource->cacheAccess().timeWhenResourceBecamePurgeable();
1322            if (purgeTime && resourceTime >= *purgeTime) {
1323                // scratch or not, all later iterations will be too recently used to purge.
1324                break;
1325            }
1326            SkASSERT(resource->resourcePriv().isPurgeable());
1327            if (!resource->getUniqueKey().isValid()) {
1328                *scratchResources.append() = resource;
1329            }
1330        }
1331
1332        // Delete the scratch resources. This must be done as a separate pass
1333        // to avoid messing up the sorted order of the queue
1334        for (int i = 0; i < scratchResources.count(); i++) {
1335            scratchResources.getAt(i)->cacheAccess().release();
1336        }
1337    }
1338
1339    this->validate();
1340#if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1341    traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1342#endif
1343}
1344
1345void GrResourceCache::purgeUnlockAndSafeCacheGpuResources() {
1346#if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1347    SimpleCacheInfo simpleCacheInfo;
1348    traceBeforePurgeUnlockRes("purgeUnlockAndSafeCacheGpuResources", simpleCacheInfo);
1349#endif
1350    fThreadSafeCache->dropUniqueRefs(nullptr);
1351    // Sort the queue
1352    fPurgeableQueue.sort();
1353
1354    //Make a list of the scratch resources to delete
1355    SkTDArray<GrGpuResource*> scratchResources;
1356    for (int i = 0; i < fPurgeableQueue.count(); i++) {
1357        GrGpuResource* resource = fPurgeableQueue.at(i);
1358        if (!resource) {
1359            continue;
1360        }
1361        SkASSERT(resource->resourcePriv().isPurgeable());
1362        if (!resource->getUniqueKey().isValid()) {
1363            *scratchResources.append() = resource;
1364        }
1365    }
1366
1367    //Delete the scatch resource. This must be done as a separate pass
1368    //to avoid messing up the sorted order of the queue
1369    for (int i = 0; i <scratchResources.count(); i++) {
1370        scratchResources.getAt(i)->cacheAccess().release();
1371    }
1372
1373    this->validate();
1374#if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1375    traceAfterPurgeUnlockRes("purgeUnlockAndSafeCacheGpuResources", simpleCacheInfo);
1376#endif
1377}
1378
1379// OH ISSUE: suppress release window
1380void GrResourceCache::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived) {
1381    if (!fEnabled) {
1382        return;
1383    }
1384    this->purgeAsNeeded(nextFrameHasArrived);
1385}
1386
1387void GrResourceCache::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
1388        const std::set<int>& protectedPidSet) {
1389    HITRACE_OHOS_NAME_FMT_ALWAYS("PurgeGrResourceCache cur=%d, limit=%d", fBudgetedBytes, fMaxBytes);
1390    if (exitedPidSet.size() > 1) {
1391        for (int i = 1; i < fPurgeableQueue.count(); i++) {
1392            GrGpuResource* resource = fPurgeableQueue.at(i);
1393            SkASSERT(resource->resourcePriv().isPurgeable());
1394            if (exitedPidSet.find(resource->getResourceTag().fPid) != exitedPidSet.end()) {
1395                resource->cacheAccess().release();
1396                this->validate();
1397                return;
1398            }
1399        }
1400    }
1401    fPurgeableQueue.sort();
1402    const char* softLimitPercentage = "0.9";
1403    #ifdef NOT_BUILD_FOR_OHOS_SDK
1404    static int softLimit =
1405            std::atof(OHOS::system::GetParameter("persist.sys.graphic.mem.soft_limit",
1406            softLimitPercentage).c_str()) * fMaxBytes;
1407    #else
1408    static int softLimit = 0.9 * fMaxBytes;
1409    #endif
1410    if (fBudgetedBytes >= softLimit) {
1411        for (int i=0; i < fPurgeableQueue.count(); i++) {
1412            GrGpuResource* resource = fPurgeableQueue.at(i);
1413            SkASSERT(resource->resourcePriv().isPurgeable());
1414            if (protectedPidSet.find(resource->getResourceTag().fPid) == protectedPidSet.end()
1415                && (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1416                resource->cacheAccess().release();
1417                this->validate();
1418                return;
1419            }
1420        }
1421    }
1422}
1423
1424void GrResourceCache::purgeUnlockedResourcesByPid(bool scratchResourceOnly, const std::set<int>& exitedPidSet) {
1425#if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1426    SimpleCacheInfo simpleCacheInfo;
1427    traceBeforePurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1428#endif
1429    // Sort the queue
1430    fPurgeableQueue.sort();
1431
1432    //Make lists of the need purged resources to delete
1433    fThreadSafeCache->dropUniqueRefs(nullptr);
1434    SkTDArray<GrGpuResource*> exitPidResources;
1435    SkTDArray<GrGpuResource*> scratchResources;
1436    for (int i = 0; i < fPurgeableQueue.count(); i++) {
1437        GrGpuResource* resource = fPurgeableQueue.at(i);
1438        if (!resource) {
1439            continue;
1440        }
1441        SkASSERT(resource->resourcePriv().isPurgeable());
1442        if (exitedPidSet.count(resource->getResourceTag().fPid)) {
1443            *exitPidResources.append() = resource;
1444        } else if (!resource->getUniqueKey().isValid()) {
1445            *scratchResources.append() = resource;
1446        }
1447    }
1448
1449    //Delete the exited pid and scatch resource. This must be done as a separate pass
1450    //to avoid messing up the sorted order of the queue
1451    for (int i = 0; i <exitPidResources.count(); i++) {
1452        exitPidResources.getAt(i)->cacheAccess().release();
1453    }
1454    for (int i = 0; i <scratchResources.count(); i++) {
1455        scratchResources.getAt(i)->cacheAccess().release();
1456    }
1457
1458    for (auto pid : exitedPidSet) {
1459        fExitedPid_.erase(pid);
1460    }
1461
1462    this->validate();
1463#if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1464    traceAfterPurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1465#endif
1466}
1467
1468void GrResourceCache::purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag) {
1469    // Sort the queue
1470    fPurgeableQueue.sort();
1471
1472    //Make a list of the scratch resources to delete
1473    SkTDArray<GrGpuResource*> scratchResources;
1474    for (int i = 0; i < fPurgeableQueue.count(); i++) {
1475        GrGpuResource* resource = fPurgeableQueue.at(i);
1476        SkASSERT(resource->resourcePriv().isPurgeable());
1477        if (tag.filter(resource->getResourceTag()) && (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1478            *scratchResources.append() = resource;
1479        }
1480    }
1481
1482    //Delete the scatch resource. This must be done as a separate pass
1483    //to avoid messing up the sorted order of the queue
1484    for (int i = 0; i <scratchResources.count(); i++) {
1485        scratchResources.getAt(i)->cacheAccess().release();
1486    }
1487
1488    this->validate();
1489}
1490
1491bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
1492    AutoValidate av(this);
1493    if (desiredHeadroomBytes > fMaxBytes) {
1494        return false;
1495    }
1496    if (this->wouldFit(desiredHeadroomBytes)) {
1497        return true;
1498    }
1499    fPurgeableQueue.sort();
1500
1501    size_t projectedBudget = fBudgetedBytes;
1502    int purgeCnt = 0;
1503    for (int i = 0; i < fPurgeableQueue.count(); i++) {
1504        GrGpuResource* resource = fPurgeableQueue.at(i);
1505        if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1506            projectedBudget -= resource->gpuMemorySize();
1507        }
1508        if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
1509            purgeCnt = i + 1;
1510            break;
1511        }
1512    }
1513    if (purgeCnt == 0) {
1514        return false;
1515    }
1516
1517    // Success! Release the resources.
1518    // Copy to array first so we don't mess with the queue.
1519    std::vector<GrGpuResource*> resources;
1520    resources.reserve(purgeCnt);
1521    for (int i = 0; i < purgeCnt; i++) {
1522        resources.push_back(fPurgeableQueue.at(i));
1523    }
1524    for (GrGpuResource* resource : resources) {
1525        resource->cacheAccess().release();
1526    }
1527    return true;
1528}
1529
1530void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
1531
1532    const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
1533    bool stillOverbudget = tmpByteBudget < fBytes;
1534
1535    if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
1536        // Sort the queue
1537        fPurgeableQueue.sort();
1538
1539        // Make a list of the scratch resources to delete
1540        SkTDArray<GrGpuResource*> scratchResources;
1541        size_t scratchByteCount = 0;
1542        for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
1543            GrGpuResource* resource = fPurgeableQueue.at(i);
1544            SkASSERT(resource->resourcePriv().isPurgeable());
1545            if (!resource->getUniqueKey().isValid()) {
1546                *scratchResources.append() = resource;
1547                scratchByteCount += resource->gpuMemorySize();
1548                stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
1549            }
1550        }
1551
1552        // Delete the scratch resources. This must be done as a separate pass
1553        // to avoid messing up the sorted order of the queue
1554        for (int i = 0; i < scratchResources.count(); i++) {
1555            scratchResources.getAt(i)->cacheAccess().release();
1556        }
1557        stillOverbudget = tmpByteBudget < fBytes;
1558
1559        this->validate();
1560    }
1561
1562    // Purge any remaining resources in LRU order
1563    if (stillOverbudget) {
1564        const size_t cachedByteCount = fMaxBytes;
1565        fMaxBytes = tmpByteBudget;
1566        this->purgeAsNeeded();
1567        fMaxBytes = cachedByteCount;
1568    }
1569}
1570
1571bool GrResourceCache::requestsFlush() const {
1572    return this->overBudget() && !fPurgeableQueue.count() &&
1573           fNumBudgetedResourcesFlushWillMakePurgeable > 0;
1574}
1575
1576void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
1577    texture->ref();
1578    uint32_t id = texture->uniqueID().asUInt();
1579    if (auto* data = fTexturesAwaitingUnref.find(id)) {
1580        data->addRef();
1581    } else {
1582        fTexturesAwaitingUnref.set(id, {texture});
1583    }
1584}
1585
1586void GrResourceCache::processFreedGpuResources() {
1587    if (!fTexturesAwaitingUnref.count()) {
1588        return;
1589    }
1590
1591    SkTArray<GrTextureFreedMessage> msgs;
1592    fFreedTextureInbox.poll(&msgs);
1593    for (int i = 0; i < msgs.count(); ++i) {
1594        SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID);
1595        uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
1596        TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
1597        // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
1598        // empty and we would have returned early above. Thus, any texture from a message should be
1599        // in the list of fTexturesAwaitingUnref.
1600        SkASSERT(info);
1601        info->unref();
1602        if (info->finished()) {
1603            fTexturesAwaitingUnref.remove(id);
1604        }
1605    }
1606}
1607
1608void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
1609    int index = fNonpurgeableResources.count();
1610    *fNonpurgeableResources.append() = resource;
1611    *resource->cacheAccess().accessCacheIndex() = index;
1612}
1613
1614void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
1615    int* index = resource->cacheAccess().accessCacheIndex();
1616    // Fill the hole we will create in the array with the tail object, adjust its index, and
1617    // then pop the array
1618    GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
1619    SkASSERT(fNonpurgeableResources[*index] == resource);
1620    fNonpurgeableResources[*index] = tail;
1621    *tail->cacheAccess().accessCacheIndex() = *index;
1622    fNonpurgeableResources.pop();
1623    SkDEBUGCODE(*index = -1);
1624}
1625
1626uint32_t GrResourceCache::getNextTimestamp() {
1627    // If we wrap then all the existing resources will appear older than any resources that get
1628    // a timestamp after the wrap.
1629    if (0 == fTimestamp) {
1630        int count = this->getResourceCount();
1631        if (count) {
1632            // Reset all the timestamps. We sort the resources by timestamp and then assign
1633            // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
1634            // rare.
1635            SkTDArray<GrGpuResource*> sortedPurgeableResources;
1636            sortedPurgeableResources.setReserve(fPurgeableQueue.count());
1637
1638            while (fPurgeableQueue.count()) {
1639                *sortedPurgeableResources.append() = fPurgeableQueue.peek();
1640                fPurgeableQueue.pop();
1641            }
1642
1643            SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
1644                     CompareTimestamp);
1645
1646            // Pick resources out of the purgeable and non-purgeable arrays based on lowest
1647            // timestamp and assign new timestamps.
1648            int currP = 0;
1649            int currNP = 0;
1650            while (currP < sortedPurgeableResources.count() &&
1651                   currNP < fNonpurgeableResources.count()) {
1652                uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
1653                uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
1654                SkASSERT(tsP != tsNP);
1655                if (tsP < tsNP) {
1656                    sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1657                } else {
1658                    // Correct the index in the nonpurgeable array stored on the resource post-sort.
1659                    *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1660                    fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1661                }
1662            }
1663
1664            // The above loop ended when we hit the end of one array. Finish the other one.
1665            while (currP < sortedPurgeableResources.count()) {
1666                sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1667            }
1668            while (currNP < fNonpurgeableResources.count()) {
1669                *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1670                fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1671            }
1672
1673            // Rebuild the queue.
1674            for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
1675                fPurgeableQueue.insert(sortedPurgeableResources[i]);
1676            }
1677
1678            this->validate();
1679            SkASSERT(count == this->getResourceCount());
1680
1681            // count should be the next timestamp we return.
1682            SkASSERT(fTimestamp == SkToU32(count));
1683        }
1684    }
1685    return fTimestamp++;
1686}
1687
1688void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
1689    SkTDArray<GrGpuResource*> resources;
1690    for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1691        *resources.append() = fNonpurgeableResources[i];
1692    }
1693    for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1694        *resources.append() = fPurgeableQueue.at(i);
1695    }
1696    for (int i = 0; i < resources.count(); i++) {
1697        auto resource = resources.getAt(i);
1698        if (!resource || resource->wasDestroyed()) {
1699            continue;
1700        }
1701        resource->dumpMemoryStatistics(traceMemoryDump);
1702    }
1703}
1704
1705void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
1706    for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1707        if (tag.filter(fNonpurgeableResources[i]->getResourceTag())) {
1708            fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
1709        }
1710    }
1711    for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1712        if (tag.filter(fPurgeableQueue.at(i)->getResourceTag())) {
1713            fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
1714        }
1715    }
1716}
1717
1718#if GR_CACHE_STATS
1719void GrResourceCache::getStats(Stats* stats) const {
1720    stats->reset();
1721
1722    stats->fTotal = this->getResourceCount();
1723    stats->fNumNonPurgeable = fNonpurgeableResources.count();
1724    stats->fNumPurgeable = fPurgeableQueue.count();
1725
1726    for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1727        stats->update(fNonpurgeableResources[i]);
1728    }
1729    for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1730        stats->update(fPurgeableQueue.at(i));
1731    }
1732}
1733
1734#if GR_TEST_UTILS
1735void GrResourceCache::dumpStats(SkString* out) const {
1736    this->validate();
1737
1738    Stats stats;
1739
1740    this->getStats(&stats);
1741
1742    float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
1743
1744    out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
1745    out->appendf("\t\tEntry Count: current %d"
1746                 " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
1747                 stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
1748                 stats.fScratch, fHighWaterCount);
1749    out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
1750                 SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
1751                 SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
1752}
1753
1754void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
1755                                             SkTArray<double>* values) const {
1756    this->validate();
1757
1758    Stats stats;
1759    this->getStats(&stats);
1760
1761    keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
1762}
1763#endif // GR_TEST_UTILS
1764#endif // GR_CACHE_STATS
1765
1766#ifdef SK_DEBUG
1767void GrResourceCache::validate() const {
1768    // Reduce the frequency of validations for large resource counts.
1769    static SkRandom gRandom;
1770    int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
1771    if (~mask && (gRandom.nextU() & mask)) {
1772        return;
1773    }
1774
1775    struct Stats {
1776        size_t fBytes;
1777        int fBudgetedCount;
1778        size_t fBudgetedBytes;
1779        int fLocked;
1780        int fScratch;
1781        int fCouldBeScratch;
1782        int fContent;
1783        const ScratchMap* fScratchMap;
1784        const UniqueHash* fUniqueHash;
1785
1786        Stats(const GrResourceCache* cache) {
1787            memset(this, 0, sizeof(*this));
1788            fScratchMap = &cache->fScratchMap;
1789            fUniqueHash = &cache->fUniqueHash;
1790        }
1791
1792        void update(GrGpuResource* resource) {
1793            fBytes += resource->gpuMemorySize();
1794
1795            if (!resource->resourcePriv().isPurgeable()) {
1796                ++fLocked;
1797            }
1798
1799            const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
1800            const GrUniqueKey& uniqueKey = resource->getUniqueKey();
1801
1802            if (resource->cacheAccess().isUsableAsScratch()) {
1803                SkASSERT(!uniqueKey.isValid());
1804                SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
1805                SkASSERT(!resource->cacheAccess().hasRef());
1806                ++fScratch;
1807                SkASSERT(fScratchMap->countForKey(scratchKey));
1808                SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1809            } else if (scratchKey.isValid()) {
1810                SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
1811                         uniqueKey.isValid() || resource->cacheAccess().hasRef());
1812                SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1813                SkASSERT(!fScratchMap->has(resource, scratchKey));
1814            }
1815            if (uniqueKey.isValid()) {
1816                ++fContent;
1817                SkASSERT(fUniqueHash->find(uniqueKey) == resource);
1818                SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
1819                         resource->resourcePriv().refsWrappedObjects());
1820            }
1821
1822            if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1823                ++fBudgetedCount;
1824                fBudgetedBytes += resource->gpuMemorySize();
1825            }
1826        }
1827    };
1828
1829    {
1830        int count = 0;
1831        fScratchMap.foreach([&](const GrGpuResource& resource) {
1832            SkASSERT(resource.cacheAccess().isUsableAsScratch());
1833            count++;
1834        });
1835        SkASSERT(count == fScratchMap.count());
1836    }
1837
1838    Stats stats(this);
1839    size_t purgeableBytes = 0;
1840    int numBudgetedResourcesFlushWillMakePurgeable = 0;
1841
1842    for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1843        SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
1844                 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
1845        SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
1846        SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
1847        if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
1848            !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
1849            fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
1850            ++numBudgetedResourcesFlushWillMakePurgeable;
1851        }
1852        stats.update(fNonpurgeableResources[i]);
1853    }
1854    for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1855        SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
1856        SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
1857        SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
1858        stats.update(fPurgeableQueue.at(i));
1859        purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
1860    }
1861
1862    SkASSERT(fCount == this->getResourceCount());
1863    SkASSERT(fBudgetedCount <= fCount);
1864    SkASSERT(fBudgetedBytes <= fBytes);
1865    SkASSERT(stats.fBytes == fBytes);
1866    SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
1867             numBudgetedResourcesFlushWillMakePurgeable);
1868    SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
1869    SkASSERT(stats.fBudgetedCount == fBudgetedCount);
1870    SkASSERT(purgeableBytes == fPurgeableBytes);
1871#if GR_CACHE_STATS
1872    SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
1873    SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
1874    SkASSERT(fBytes <= fHighWaterBytes);
1875    SkASSERT(fCount <= fHighWaterCount);
1876    SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
1877    SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
1878#endif
1879    SkASSERT(stats.fContent == fUniqueHash.count());
1880    SkASSERT(stats.fScratch == fScratchMap.count());
1881
1882    // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
1883    // calls. This will be fixed when subresource registration is explicit.
1884    // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
1885    // SkASSERT(!overBudget || locked == count || fPurging);
1886}
1887#endif // SK_DEBUG
1888
1889bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
1890    int index = *resource->cacheAccess().accessCacheIndex();
1891    if (index < 0) {
1892        return false;
1893    }
1894    if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1895        return true;
1896    }
1897    if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1898        return true;
1899    }
1900    SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
1901    return false;
1902}
1903
1904bool GrResourceCache::isInPurgeableCache(const GrGpuResource* resource) const {
1905    int index = *resource->cacheAccess().accessCacheIndex();
1906    if (index < 0) {
1907        return false;
1908    }
1909    if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1910        return true;
1911    }
1912    SkDEBUGFAIL("OHOS Resource index should be -1 or the resource should be in the cache.");
1913    return false;
1914}
1915
1916bool GrResourceCache::isInNonpurgeableCache(const GrGpuResource* resource) const {
1917    int index = *resource->cacheAccess().accessCacheIndex();
1918    if (index < 0) {
1919        return false;
1920    }
1921    if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1922        return true;
1923    }
1924    SkDEBUGFAIL("OHOS Resource index should be -1 or the resource should be in the cache.");
1925    return false;
1926}
1927
1928#if GR_TEST_UTILS
1929
1930int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
1931    int count = 0;
1932    fUniqueHash.foreach([&](const GrGpuResource& resource){
1933        if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
1934            ++count;
1935        }
1936    });
1937    return count;
1938}
1939
1940void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
1941    fTimestamp = newTimestamp;
1942}
1943
1944#endif // GR_TEST_UTILS
1945