1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrResourceAllocator.h"
9
10#include "src/gpu/GrDirectContextPriv.h"
11#include "src/gpu/GrGpuResourcePriv.h"
12#include "src/gpu/GrRenderTargetProxy.h"
13#include "src/gpu/GrResourceProvider.h"
14#include "src/gpu/GrSurfaceProxy.h"
15#include "src/gpu/GrSurfaceProxyPriv.h"
16#include "src/gpu/GrTextureProxy.h"
17
18#ifdef SK_DEBUG
19#include <atomic>
20
21uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
22    static std::atomic<uint32_t> nextID{1};
23    uint32_t id;
24    do {
25        id = nextID.fetch_add(1, std::memory_order_relaxed);
26    } while (id == SK_InvalidUniqueID);
27    return id;
28}
29
30uint32_t GrResourceAllocator::Register::CreateUniqueID() {
31    static std::atomic<uint32_t> nextID{1};
32    uint32_t id;
33    do {
34        id = nextID.fetch_add(1, std::memory_order_relaxed);
35    } while (id == SK_InvalidUniqueID);
36    return id;
37}
38#endif
39
40GrResourceAllocator::~GrResourceAllocator() {
41    SkASSERT(fFailedInstantiation || fIntvlList.empty());
42    SkASSERT(fActiveIntvls.empty());
43    SkASSERT(!fIntvlHash.count());
44}
45
46void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
47                                      ActualUse actualUse
48                                      SkDEBUGCODE(, bool isDirectDstRead)) {
49    SkASSERT(start <= end);
50    SkASSERT(!fAssigned);  // We shouldn't be adding any intervals after (or during) assignment
51
52    if (proxy->canSkipResourceAllocator()) {
53        return;
54    }
55
56    // If a proxy is read only it must refer to a texture with specific content that cannot be
57    // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
58    // with the same texture.
59    if (proxy->readOnly()) {
60        auto resourceProvider = fDContext->priv().resourceProvider();
61        if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(resourceProvider)) {
62            fFailedInstantiation = true;
63        } else {
64            // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
65            // must already be instantiated or it must be a lazy proxy that we instantiated above.
66            SkASSERT(proxy->isInstantiated());
67        }
68        return;
69    }
70    uint32_t proxyID = proxy->uniqueID().asUInt();
71    if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
72        // Revise the interval for an existing use
73        Interval* intvl = *intvlPtr;
74#ifdef SK_DEBUG
75        if (0 == start && 0 == end) {
76            // This interval is for the initial upload to a deferred proxy. Due to the vagaries
77            // of how deferred proxies are collected they can appear as uploads multiple times
78            // in a single opsTasks' list and as uploads in several opsTasks.
79            SkASSERT(0 == intvl->start());
80        } else if (isDirectDstRead) {
81            // Direct reads from the render target itself should occur w/in the existing
82            // interval
83            SkASSERT(intvl->start() <= start && intvl->end() >= end);
84        } else {
85            SkASSERT(intvl->end() <= start && intvl->end() <= end);
86        }
87#endif
88        if (ActualUse::kYes == actualUse) {
89            intvl->addUse();
90        }
91        intvl->extendEnd(end);
92        return;
93    }
94    Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
95
96    if (ActualUse::kYes == actualUse) {
97        newIntvl->addUse();
98    }
99    fIntvlList.insertByIncreasingStart(newIntvl);
100    fIntvlHash.set(proxyID, newIntvl);
101}
102
103// Tragically we have cases where we always have to make new textures.
104static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
105    return caps.reuseScratchTextures() || proxy->asRenderTargetProxy();
106}
107
108static bool user_cache_proxy(GrSurfaceProxy* proxy) {
109    GrTextureProxy* texProxy = proxy->asTextureProxy();
110    if (texProxy) {
111        return texProxy->getUserCacheTarget();
112    }
113    return false;
114}
115
116GrResourceAllocator::Register::Register(GrSurfaceProxy* originatingProxy,
117                                        GrScratchKey scratchKey,
118                                        GrResourceProvider* provider)
119        : fOriginatingProxy(originatingProxy)
120        , fScratchKey(std::move(scratchKey)) {
121    SkASSERT(originatingProxy);
122    SkASSERT(!originatingProxy->isInstantiated());
123    SkASSERT(!originatingProxy->isLazy());
124    SkDEBUGCODE(fUniqueID = CreateUniqueID();)
125    if (scratchKey.isValid()) {
126        if (can_proxy_use_scratch(*provider->caps(), originatingProxy) ||
127            user_cache_proxy(originatingProxy)) {
128            fExistingSurface = provider->findAndRefScratchTexture(fScratchKey);
129        }
130    } else {
131        SkASSERT(this->uniqueKey().isValid());
132        fExistingSurface = provider->findByUniqueKey<GrSurface>(this->uniqueKey());
133    }
134}
135
136bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
137                                                 GrSurfaceProxy* proxy,
138                                                 int knownUseCount) const {
139    if (!can_proxy_use_scratch(caps, proxy)) {
140        return false;
141    }
142
143    if (!this->scratchKey().isValid()) {
144        return false; // no scratch key, no free pool
145    }
146    if (this->uniqueKey().isValid()) {
147        return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
148    }
149    // If all the refs on the proxy are known to the resource allocator then no one
150    // should be holding onto it outside of Ganesh.
151    return !proxy->refCntGreaterThan(knownUseCount);
152}
153
154bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
155                                                       GrResourceProvider* resourceProvider) {
156    SkASSERT(!proxy->peekSurface());
157
158    sk_sp<GrSurface> newSurface;
159    if (!fExistingSurface) {
160        if (proxy == fOriginatingProxy) {
161            newSurface = proxy->priv().createSurface(resourceProvider);
162        } else {
163            newSurface = sk_ref_sp(fOriginatingProxy->peekSurface());
164        }
165    }
166    if (!fExistingSurface && !newSurface) {
167        return false;
168    }
169
170    GrSurface* surface = newSurface ? newSurface.get() : fExistingSurface.get();
171    // Make surface budgeted if this proxy is budgeted.
172    if (SkBudgeted::kYes == proxy->isBudgeted() &&
173        GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
174        // This gets the job done but isn't quite correct. It would be better to try to
175        // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
176        surface->resourcePriv().makeBudgeted();
177    }
178
179    // Propagate the proxy unique key to the surface if we have one.
180    if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
181        if (!surface->getUniqueKey().isValid()) {
182            resourceProvider->assignUniqueKeyToResource(uniqueKey, surface);
183        }
184        SkASSERT(surface->getUniqueKey() == uniqueKey);
185    }
186    proxy->priv().assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
187    return true;
188}
189
190GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
191    SkDEBUGCODE(this->validate());
192
193    Interval* temp = fHead;
194    if (temp) {
195        fHead = temp->next();
196        if (!fHead) {
197            fTail = nullptr;
198        }
199        temp->setNext(nullptr);
200    }
201
202    SkDEBUGCODE(this->validate());
203    return temp;
204}
205
206// TODO: fuse this with insertByIncreasingEnd
207void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
208    SkDEBUGCODE(this->validate());
209    SkASSERT(!intvl->next());
210
211    if (!fHead) {
212        // 14%
213        fHead = fTail = intvl;
214    } else if (intvl->start() <= fHead->start()) {
215        // 3%
216        intvl->setNext(fHead);
217        fHead = intvl;
218    } else if (fTail->start() <= intvl->start()) {
219        // 83%
220        fTail->setNext(intvl);
221        fTail = intvl;
222    } else {
223        // almost never
224        Interval* prev = fHead;
225        Interval* next = prev->next();
226        for (; intvl->start() > next->start(); prev = next, next = next->next()) {
227        }
228
229        SkASSERT(next);
230        intvl->setNext(next);
231        prev->setNext(intvl);
232    }
233
234    SkDEBUGCODE(this->validate());
235}
236
237// TODO: fuse this with insertByIncreasingStart
238void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
239    SkDEBUGCODE(this->validate());
240    SkASSERT(!intvl->next());
241
242    if (!fHead) {
243        // 14%
244        fHead = fTail = intvl;
245    } else if (intvl->end() <= fHead->end()) {
246        // 64%
247        intvl->setNext(fHead);
248        fHead = intvl;
249    } else if (fTail->end() <= intvl->end()) {
250        // 3%
251        fTail->setNext(intvl);
252        fTail = intvl;
253    } else {
254        // 19% but 81% of those land right after the list's head
255        Interval* prev = fHead;
256        Interval* next = prev->next();
257        for (; intvl->end() > next->end(); prev = next, next = next->next()) {
258        }
259
260        SkASSERT(next);
261        intvl->setNext(next);
262        prev->setNext(intvl);
263    }
264
265    SkDEBUGCODE(this->validate());
266}
267
268#ifdef SK_DEBUG
269void GrResourceAllocator::IntervalList::validate() const {
270    SkASSERT(SkToBool(fHead) == SkToBool(fTail));
271
272    Interval* prev = nullptr;
273    for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
274    }
275
276    SkASSERT(fTail == prev);
277}
278#endif
279
280// First try to reuse one of the recently allocated/used registers in the free pool.
281GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
282    auto resourceProvider = fDContext->priv().resourceProvider();
283    // Handle uniquely keyed proxies
284    if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
285        if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
286            return *p;
287        }
288        // No need for a scratch key. These don't go in the free pool.
289        Register* r = fInternalAllocator.make<Register>(proxy, GrScratchKey(), resourceProvider);
290        fUniqueKeyRegisters.set(uniqueKey, r);
291        return r;
292    }
293
294    // Then look in the free pool
295    GrScratchKey scratchKey;
296    proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
297
298    auto filter = [] (const Register* r) {
299        return true;
300    };
301    if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
302        return r;
303    }
304
305    return fInternalAllocator.make<Register>(proxy, std::move(scratchKey), resourceProvider);
306}
307
308// Remove any intervals that end before the current index. Add their registers
309// to the free pool if possible.
310void GrResourceAllocator::expire(unsigned int curIndex) {
311    while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
312        Interval* intvl = fActiveIntvls.popHead();
313        SkASSERT(!intvl->next());
314
315        Register* r = intvl->getRegister();
316        if (r && r->isRecyclable(*fDContext->priv().caps(), intvl->proxy(), intvl->uses())) {
317#if GR_ALLOCATION_SPEW
318            SkDebugf("putting register %d back into pool\n", r->uniqueID());
319#endif
320            // TODO: fix this insertion so we get a more LRU-ish behavior
321            fFreePool.insert(r->scratchKey(), r);
322        }
323        fFinishedIntvls.insertByIncreasingStart(intvl);
324    }
325}
326
327bool GrResourceAllocator::planAssignment() {
328    fIntvlHash.reset(); // we don't need the interval hash anymore
329
330    SkASSERT(!fPlanned && !fAssigned);
331    SkDEBUGCODE(fPlanned = true;)
332
333#if GR_ALLOCATION_SPEW
334    SkDebugf("assigning %d ops\n", fNumOps);
335    this->dumpIntervals();
336#endif
337
338    auto resourceProvider = fDContext->priv().resourceProvider();
339    while (Interval* cur = fIntvlList.popHead()) {
340        this->expire(cur->start());
341        fActiveIntvls.insertByIncreasingEnd(cur);
342
343        // Already-instantiated proxies and lazy proxies don't use registers.
344        if (cur->proxy()->isInstantiated()) {
345            continue;
346        }
347
348        // Instantiate fully-lazy proxies immediately. Ignore other lazy proxies at this stage.
349        if (cur->proxy()->isLazy()) {
350            if (cur->proxy()->isFullyLazy()) {
351                fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
352                if (fFailedInstantiation) {
353                    break;
354                }
355            }
356            continue;
357        }
358
359        Register* r = this->findOrCreateRegisterFor(cur->proxy());
360#if GR_ALLOCATION_SPEW
361        SkDebugf("Assigning register %d to %d\n",
362             r->uniqueID(),
363             cur->proxy()->uniqueID().asUInt());
364#endif
365        SkASSERT(!cur->proxy()->peekSurface());
366        cur->setRegister(r);
367    }
368
369    // expire all the remaining intervals to drain the active interval list
370    this->expire(std::numeric_limits<unsigned int>::max());
371    return !fFailedInstantiation;
372}
373
374bool GrResourceAllocator::makeBudgetHeadroom() {
375    SkASSERT(fPlanned);
376    SkASSERT(!fFailedInstantiation);
377    size_t additionalBytesNeeded = 0;
378    for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
379        GrSurfaceProxy* proxy = cur->proxy();
380        if (SkBudgeted::kNo == proxy->isBudgeted() || proxy->isInstantiated()) {
381            continue;
382        }
383
384        // N.B Fully-lazy proxies were already instantiated in planAssignment
385        if (proxy->isLazy()) {
386            additionalBytesNeeded += proxy->gpuMemorySize();
387        } else {
388            Register* r = cur->getRegister();
389            SkASSERT(r);
390            if (!r->accountedForInBudget() && !r->existingSurface()) {
391                additionalBytesNeeded += proxy->gpuMemorySize();
392            }
393            r->setAccountedForInBudget();
394        }
395    }
396    return fDContext->priv().getResourceCache()->purgeToMakeHeadroom(additionalBytesNeeded);
397}
398
399void GrResourceAllocator::reset() {
400    // NOTE: We do not reset the failedInstantiation flag because we currently do not attempt
401    // to recover from failed instantiations. The user is responsible for checking this flag and
402    // bailing early.
403    SkDEBUGCODE(fPlanned = false;)
404    SkDEBUGCODE(fAssigned = false;)
405    SkASSERT(fActiveIntvls.empty());
406    fFinishedIntvls = IntervalList();
407    fIntvlList = IntervalList();
408    fIntvlHash.reset();
409    fUniqueKeyRegisters.reset();
410    fFreePool.reset();
411    fInternalAllocator.reset();
412}
413
414bool GrResourceAllocator::assign() {
415    if (fFailedInstantiation) {
416        return false;
417    }
418    SkASSERT(fPlanned && !fAssigned);
419    SkDEBUGCODE(fAssigned = true;)
420    auto resourceProvider = fDContext->priv().resourceProvider();
421    while (Interval* cur = fFinishedIntvls.popHead()) {
422        if (fFailedInstantiation) {
423            break;
424        }
425        if (cur->proxy()->isInstantiated()) {
426            continue;
427        }
428        if (cur->proxy()->isLazy()) {
429            fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
430            continue;
431        }
432        Register* r = cur->getRegister();
433        SkASSERT(r);
434        fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
435    }
436    return !fFailedInstantiation;
437}
438
439#if GR_ALLOCATION_SPEW
440void GrResourceAllocator::dumpIntervals() {
441    // Print all the intervals while computing their range
442    SkDebugf("------------------------------------------------------------\n");
443    unsigned int min = std::numeric_limits<unsigned int>::max();
444    unsigned int max = 0;
445    for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
446        SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
447                 cur->proxy()->uniqueID().asUInt(),
448                 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
449                 cur->start(),
450                 cur->end(),
451                 cur->proxy()->priv().getProxyRefCnt(),
452                 cur->proxy()->testingOnly_getBackingRefCnt());
453        min = std::min(min, cur->start());
454        max = std::max(max, cur->end());
455    }
456
457    // Draw a graph of the useage intervals
458    for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
459        SkDebugf("{ %3d,%3d }: ",
460                 cur->proxy()->uniqueID().asUInt(),
461                 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
462        for (unsigned int i = min; i <= max; ++i) {
463            if (i >= cur->start() && i <= cur->end()) {
464                SkDebugf("x");
465            } else {
466                SkDebugf(" ");
467            }
468        }
469        SkDebugf("\n");
470    }
471}
472#endif
473