1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrDrawOpAtlas.h"
9
10 #include <memory>
11
12 #include "include/core/SkLog.h"
13 #include "include/private/SkTPin.h"
14 #include "src/core/SkOpts.h"
15 #include "src/gpu/GrBackendUtils.h"
16 #include "src/gpu/GrOnFlushResourceProvider.h"
17 #include "src/gpu/GrOpFlushState.h"
18 #include "src/gpu/GrProxyProvider.h"
19 #include "src/gpu/GrResourceProvider.h"
20 #include "src/gpu/GrResourceProviderPriv.h"
21 #include "src/gpu/GrSurfaceProxyPriv.h"
22 #include "src/gpu/GrTexture.h"
23 #include "src/gpu/GrTracing.h"
24
25 #ifdef DUMP_ATLAS_DATA
26 static bool gDumpAtlasData = false;
27 #endif
28
29 #ifdef SK_DEBUG
validate(const AtlasLocator& atlasLocator) const30 void GrDrawOpAtlas::validate(const AtlasLocator& atlasLocator) const {
31 // Verify that the plotIndex stored in the PlotLocator is consistent with the glyph rectangle
32 int numPlotsX = fTextureWidth / fPlotWidth;
33 int numPlotsY = fTextureHeight / fPlotHeight;
34
35 int plotIndex = atlasLocator.plotIndex();
36 auto topLeft = atlasLocator.topLeft();
37 int plotX = topLeft.x() / fPlotWidth;
38 int plotY = topLeft.y() / fPlotHeight;
39 SkASSERT(plotIndex == (numPlotsY - plotY - 1) * numPlotsX + (numPlotsX - plotX - 1));
40 }
41 #endif
42
43 // When proxy allocation is deferred until flush time the proxies acting as atlases require
44 // special handling. This is because the usage that can be determined from the ops themselves
45 // isn't sufficient. Independent of the ops there will be ASAP and inline uploads to the
46 // atlases. Extending the usage interval of any op that uses an atlas to the start of the
47 // flush (as is done for proxies that are used for sw-generated masks) also won't work because
48 // the atlas persists even beyond the last use in an op - for a given flush. Given this, atlases
49 // must explicitly manage the lifetime of their backing proxies via the onFlushCallback system
50 // (which calls this method).
instantiate(GrOnFlushResourceProvider* onFlushResourceProvider)51 void GrDrawOpAtlas::instantiate(GrOnFlushResourceProvider* onFlushResourceProvider) {
52 for (uint32_t i = 0; i < fNumActivePages; ++i) {
53 // All the atlas pages are now instantiated at flush time in the activeNewPage method.
54 SkASSERT(fViews[i].proxy() && fViews[i].proxy()->isInstantiated());
55 }
56 }
57
Make(GrProxyProvider* proxyProvider, const GrBackendFormat& format, GrColorType colorType, int width, int height, int plotWidth, int plotHeight, GenerationCounter* generationCounter, AllowMultitexturing allowMultitexturing, int atlasPageNum, EvictionCallback* evictor)58 std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrProxyProvider* proxyProvider,
59 const GrBackendFormat& format,
60 GrColorType colorType, int width,
61 int height, int plotWidth, int plotHeight,
62 GenerationCounter* generationCounter,
63 AllowMultitexturing allowMultitexturing,
64 #ifdef SK_ENABLE_SMALL_PAGE
65 int atlasPageNum,
66 #endif
67 EvictionCallback* evictor) {
68 if (!format.isValid()) {
69 return nullptr;
70 }
71
72 std::unique_ptr<GrDrawOpAtlas> atlas(new GrDrawOpAtlas(proxyProvider, format, colorType,
73 width, height, plotWidth, plotHeight,
74 #ifdef SK_ENABLE_SMALL_PAGE
75 generationCounter, allowMultitexturing, atlasPageNum));
76 #else
77 generationCounter, allowMultitexturing));
78 #endif
79 if (!atlas->getViews()[0].proxy()) {
80 return nullptr;
81 }
82
83 if (evictor != nullptr) {
84 atlas->fEvictionCallbacks.emplace_back(evictor);
85 }
86 return atlas;
87 }
88
89 ////////////////////////////////////////////////////////////////////////////////
Plot(int pageIndex, int plotIndex, GenerationCounter* generationCounter, int offX, int offY, int width, int height, GrColorType colorType)90 GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, GenerationCounter* generationCounter,
91 int offX, int offY, int width, int height, GrColorType colorType)
92 : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken())
93 , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken())
94 , fFlushesSinceLastUse(0)
95 , fPageIndex(pageIndex)
96 , fPlotIndex(plotIndex)
97 , fGenerationCounter(generationCounter)
98 , fGenID(fGenerationCounter->next())
99 , fPlotLocator(fPageIndex, fPlotIndex, fGenID)
100 , fData(nullptr)
101 , fWidth(width)
102 , fHeight(height)
103 , fX(offX)
104 , fY(offY)
105 , fRectanizer(width, height)
106 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
107 , fColorType(colorType)
108 , fBytesPerPixel(GrColorTypeBytesPerPixel(colorType))
109 #ifdef SK_DEBUG
110 , fDirty(false)
111 #endif
112 {
113 // We expect the allocated dimensions to be a multiple of 4 bytes
114 SkASSERT(((width*fBytesPerPixel) & 0x3) == 0);
115 // The padding for faster uploads only works for 1, 2 and 4 byte texels
116 SkASSERT(fBytesPerPixel != 3 && fBytesPerPixel <= 4);
117 fDirtyRect.setEmpty();
118 }
119
~Plot()120 GrDrawOpAtlas::Plot::~Plot() {
121 sk_free(fData);
122 }
123
addSubImage( int width, int height, const void* image, AtlasLocator* atlasLocator)124 bool GrDrawOpAtlas::Plot::addSubImage(
125 int width, int height, const void* image, AtlasLocator* atlasLocator) {
126 SkASSERT(width <= fWidth && height <= fHeight);
127
128 SkIPoint16 loc;
129 if (!fRectanizer.addRect(width, height, &loc)) {
130 return false;
131 }
132
133 GrIRect16 rect = GrIRect16::MakeXYWH(loc.fX, loc.fY, width, height);
134
135 if (!fData) {
136 fData = reinterpret_cast<unsigned char*>(
137 sk_calloc_throw(fBytesPerPixel * fWidth * fHeight));
138 }
139 size_t rowBytes = width * fBytesPerPixel;
140 const unsigned char* imagePtr = (const unsigned char*)image;
141 // point ourselves at the right starting spot
142 unsigned char* dataPtr = fData;
143 dataPtr += fBytesPerPixel * fWidth * rect.fTop;
144 dataPtr += fBytesPerPixel * rect.fLeft;
145 // copy into the data buffer, swizzling as we go if this is ARGB data
146 if (4 == fBytesPerPixel && kN32_SkColorType == kBGRA_8888_SkColorType) {
147 for (int i = 0; i < height; ++i) {
148 SkOpts::RGBA_to_BGRA((uint32_t*)dataPtr, (const uint32_t*)imagePtr, width);
149 dataPtr += fBytesPerPixel * fWidth;
150 imagePtr += rowBytes;
151 }
152 } else {
153 for (int i = 0; i < height; ++i) {
154 memcpy(dataPtr, imagePtr, rowBytes);
155 dataPtr += fBytesPerPixel * fWidth;
156 imagePtr += rowBytes;
157 }
158 }
159
160 fDirtyRect.join({rect.fLeft, rect.fTop, rect.fRight, rect.fBottom});
161
162 rect.offset(fOffset.fX, fOffset.fY);
163 atlasLocator->updateRect(rect);
164 SkDEBUGCODE(fDirty = true;)
165
166 return true;
167 }
168
uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels, GrTextureProxy* proxy)169 void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels,
170 GrTextureProxy* proxy) {
171 // We should only be issuing uploads if we are in fact dirty
172 SkASSERT(fDirty && fData && proxy && proxy->peekTexture());
173 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
174 size_t rowBytes = fBytesPerPixel * fWidth;
175 const unsigned char* dataPtr = fData;
176 // Clamp to 4-byte aligned boundaries
177 unsigned int clearBits = 0x3 / fBytesPerPixel;
178 fDirtyRect.fLeft &= ~clearBits;
179 fDirtyRect.fRight += clearBits;
180 fDirtyRect.fRight &= ~clearBits;
181 SkASSERT(fDirtyRect.fRight <= fWidth);
182 // Set up dataPtr
183 dataPtr += rowBytes * fDirtyRect.fTop;
184 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
185
186 writePixels(proxy,
187 fDirtyRect.makeOffset(fOffset.fX, fOffset.fY),
188 fColorType,
189 dataPtr,
190 rowBytes);
191 fDirtyRect.setEmpty();
192 SkDEBUGCODE(fDirty = false;)
193 }
194
resetRects()195 void GrDrawOpAtlas::Plot::resetRects() {
196 fRectanizer.reset();
197
198 fGenID = fGenerationCounter->next();
199 fPlotLocator = PlotLocator(fPageIndex, fPlotIndex, fGenID);
200 fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken();
201 fLastUse = GrDeferredUploadToken::AlreadyFlushedToken();
202
203 // zero out the plot
204 if (fData) {
205 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
206 }
207
208 fDirtyRect.setEmpty();
209 SkDEBUGCODE(fDirty = false;)
210 }
211
212 ///////////////////////////////////////////////////////////////////////////////
213
GrDrawOpAtlas(GrProxyProvider* proxyProvider, const GrBackendFormat& format, GrColorType colorType, int width, int height, int plotWidth, int plotHeight, GenerationCounter* generationCounter, AllowMultitexturing allowMultitexturing, int atlasPageNum)214 GrDrawOpAtlas::GrDrawOpAtlas(GrProxyProvider* proxyProvider, const GrBackendFormat& format,
215 GrColorType colorType, int width, int height,
216 int plotWidth, int plotHeight, GenerationCounter* generationCounter,
217 #ifdef SK_ENABLE_SMALL_PAGE
218 AllowMultitexturing allowMultitexturing, int atlasPageNum)
219 #else
220 AllowMultitexturing allowMultitexturing)
221 #endif
222 : fFormat(format)
223 , fColorType(colorType)
224 , fTextureWidth(width)
225 , fTextureHeight(height)
226 , fPlotWidth(plotWidth)
227 , fPlotHeight(plotHeight)
228 , fGenerationCounter(generationCounter)
229 , fAtlasGeneration(fGenerationCounter->next())
230 , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken())
231 , fFlushesSinceLastUse(0)
232 #ifdef SK_ENABLE_SMALL_PAGE
233 , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? ((atlasPageNum > 16) ? 16 : atlasPageNum) : 1)
234 #else
235 , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? kMaxMultitexturePages : 1)
236 #endif
237 , fNumActivePages(0) {
238 int numPlotsX = width/plotWidth;
239 int numPlotsY = height/plotHeight;
240 SkASSERT(numPlotsX * numPlotsY <= GrDrawOpAtlas::kMaxPlots);
241 SkASSERT(fPlotWidth * numPlotsX == fTextureWidth);
242 SkASSERT(fPlotHeight * numPlotsY == fTextureHeight);
243
244 fNumPlots = numPlotsX * numPlotsY;
245
246 this->createPages(proxyProvider, generationCounter);
247 SK_LOGD("Texture[Width:%{public}d, Height:%{public}d, MaxPage:%{public}d], Plot[Width:%{public}d, Height:%{public}d].", \
248 fTextureWidth, fTextureHeight, fMaxPages, fPlotWidth, fPlotHeight);
249 }
250
processEviction(PlotLocator plotLocator)251 inline void GrDrawOpAtlas::processEviction(PlotLocator plotLocator) {
252 for (EvictionCallback* evictor : fEvictionCallbacks) {
253 evictor->evict(plotLocator);
254 }
255
256 fAtlasGeneration = fGenerationCounter->next();
257 }
258
updatePlot(GrDeferredUploadTarget* target, AtlasLocator* atlasLocator, Plot* plot)259 inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target,
260 AtlasLocator* atlasLocator, Plot* plot) {
261 int pageIdx = plot->pageIndex();
262 this->makeMRU(plot, pageIdx);
263
264 // If our most recent upload has already occurred then we have to insert a new
265 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
266 // This new update will piggy back on that previously scheduled update.
267 if (plot->lastUploadToken() < target->tokenTracker()->nextTokenToFlush()) {
268 // With c+14 we could move sk_sp into lamba to only ref once.
269 sk_sp<Plot> plotsp(SkRef(plot));
270
271 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
272 SkASSERT(proxy && proxy->isInstantiated()); // This is occurring at flush time
273
274 GrDeferredUploadToken lastUploadToken = target->addASAPUpload(
275 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
276 plotsp->uploadToTexture(writePixels, proxy);
277 });
278 plot->setLastUploadToken(lastUploadToken);
279 }
280 atlasLocator->updatePlotLocator(plot->plotLocator());
281 SkDEBUGCODE(this->validate(*atlasLocator);)
282 return true;
283 }
284
uploadToPage(unsigned int pageIdx, GrDeferredUploadTarget* target, int width, int height, const void* image, AtlasLocator* atlasLocator)285 bool GrDrawOpAtlas::uploadToPage(unsigned int pageIdx, GrDeferredUploadTarget* target, int width,
286 int height, const void* image, AtlasLocator* atlasLocator) {
287 SkASSERT(fViews[pageIdx].proxy() && fViews[pageIdx].proxy()->isInstantiated());
288
289 // look through all allocated plots for one we can share, in Most Recently Refed order
290 PlotList::Iter plotIter;
291 plotIter.init(fPages[pageIdx].fPlotList, PlotList::Iter::kHead_IterStart);
292
293 for (Plot* plot = plotIter.get(); plot; plot = plotIter.next()) {
294 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
295 plot->bpp());
296
297 if (plot->addSubImage(width, height, image, atlasLocator)) {
298 return this->updatePlot(target, atlasLocator, plot);
299 }
300 }
301
302 return false;
303 }
304
305 // Number of atlas-related flushes beyond which we consider a plot to no longer be in use.
306 //
307 // This value is somewhat arbitrary -- the idea is to keep it low enough that
308 // a page with unused plots will get removed reasonably quickly, but allow it
309 // to hang around for a bit in case it's needed. The assumption is that flushes
310 // are rare; i.e., we are not continually refreshing the frame.
311 static constexpr auto kPlotRecentlyUsedCount = 32;
312 static constexpr auto kAtlasRecentlyUsedCount = 128;
313
addToAtlas(GrResourceProvider* resourceProvider, GrDeferredUploadTarget* target, int width, int height, const void* image, AtlasLocator* atlasLocator)314 GrDrawOpAtlas::ErrorCode GrDrawOpAtlas::addToAtlas(GrResourceProvider* resourceProvider,
315 GrDeferredUploadTarget* target,
316 int width, int height, const void* image,
317 AtlasLocator* atlasLocator) {
318 if (width > fPlotWidth || height > fPlotHeight) {
319 return ErrorCode::kError;
320 }
321
322 // Look through each page to see if we can upload without having to flush
323 // We prioritize this upload to the first pages, not the most recently used, to make it easier
324 // to remove unused pages in reverse page order.
325 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
326 if (this->uploadToPage(pageIdx, target, width, height, image, atlasLocator)) {
327 return ErrorCode::kSucceeded;
328 }
329 }
330
331 // If the above fails, then see if the least recently used plot per page has already been
332 // flushed to the gpu if we're at max page allocation, or if the plot has aged out otherwise.
333 // We wait until we've grown to the full number of pages to begin evicting already flushed
334 // plots so that we can maximize the opportunity for reuse.
335 // As before we prioritize this upload to the first pages, not the most recently used.
336 if (fNumActivePages == this->maxPages()) {
337 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
338 Plot* plot = fPages[pageIdx].fPlotList.tail();
339 SkASSERT(plot);
340 if (plot->lastUseToken() < target->tokenTracker()->nextTokenToFlush()) {
341 this->processEvictionAndResetRects(plot);
342 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
343 plot->bpp());
344 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, atlasLocator);
345 SkASSERT(verify);
346 if (!this->updatePlot(target, atlasLocator, plot)) {
347 return ErrorCode::kError;
348 }
349 return ErrorCode::kSucceeded;
350 }
351 }
352 } else {
353 // If we haven't activated all the available pages, try to create a new one and add to it
354 if (!this->activateNewPage(resourceProvider)) {
355 return ErrorCode::kError;
356 }
357
358 if (this->uploadToPage(fNumActivePages-1, target, width, height, image, atlasLocator)) {
359 return ErrorCode::kSucceeded;
360 } else {
361 // If we fail to upload to a newly activated page then something has gone terribly
362 // wrong - return an error
363 return ErrorCode::kError;
364 }
365 }
366
367 if (!fNumActivePages) {
368 return ErrorCode::kError;
369 }
370
371 // Try to find a plot that we can perform an inline upload to.
372 // We prioritize this upload in reverse order of pages to counterbalance the order above.
373 Plot* plot = nullptr;
374 for (int pageIdx = ((int)fNumActivePages)-1; pageIdx >= 0; --pageIdx) {
375 Plot* currentPlot = fPages[pageIdx].fPlotList.tail();
376 if (currentPlot->lastUseToken() != target->tokenTracker()->nextDrawToken()) {
377 plot = currentPlot;
378 break;
379 }
380 }
381
382 // If we can't find a plot that is not used in a draw currently being prepared by an op, then
383 // we have to fail. This gives the op a chance to enqueue the draw, and call back into this
384 // function. When that draw is enqueued, the draw token advances, and the subsequent call will
385 // continue past this branch and prepare an inline upload that will occur after the enqueued
386 // draw which references the plot's pre-upload content.
387 if (!plot) {
388 return ErrorCode::kTryAgain;
389 }
390
391 this->processEviction(plot->plotLocator());
392 int pageIdx = plot->pageIndex();
393 fPages[pageIdx].fPlotList.remove(plot);
394 sk_sp<Plot>& newPlot = fPages[pageIdx].fPlotArray[plot->plotIndex()];
395 newPlot.reset(plot->clone());
396
397 fPages[pageIdx].fPlotList.addToHead(newPlot.get());
398 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
399 newPlot->bpp());
400 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, atlasLocator);
401 SkASSERT(verify);
402
403 // Note that this plot will be uploaded inline with the draws whereas the
404 // one it displaced most likely was uploaded ASAP.
405 // With c++14 we could move sk_sp into lambda to only ref once.
406 sk_sp<Plot> plotsp(SkRef(newPlot.get()));
407
408 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
409 SkASSERT(proxy && proxy->isInstantiated());
410
411 GrDeferredUploadToken lastUploadToken = target->addInlineUpload(
412 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
413 plotsp->uploadToTexture(writePixels, proxy);
414 });
415 newPlot->setLastUploadToken(lastUploadToken);
416
417 atlasLocator->updatePlotLocator(newPlot->plotLocator());
418 SkDEBUGCODE(this->validate(*atlasLocator);)
419
420 return ErrorCode::kSucceeded;
421 }
422
423 #ifdef SK_ENABLE_SMALL_PAGE
compactRadicals(GrDeferredUploadToken startTokenForNextFlush)424 void GrDrawOpAtlas::compactRadicals(GrDeferredUploadToken startTokenForNextFlush) {
425 if (fNumActivePages <= 1) {
426 return;
427 }
428 PlotList::Iter plotIter;
429 unsigned short usedAtlasLastFlush = 0;
430 for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) {
431 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
432 while (Plot* plot = plotIter.get()) {
433 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
434 usedAtlasLastFlush |= (1 << pageIndex);
435 break;
436 } else if (plot->lastUploadToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
437 this->processEvictionAndResetRects(plot);
438 }
439 plotIter.next();
440 }
441 }
442 int lastPageIndex = fNumActivePages - 1;
443 while (lastPageIndex > 0 && !(usedAtlasLastFlush & (1 << lastPageIndex))) {
444 deactivateLastPage();
445 lastPageIndex--;
446 }
447 }
448 #endif
449
compact(GrDeferredUploadToken startTokenForNextFlush)450 void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) {
451 #ifdef SK_ENABLE_SMALL_PAGE
452 int threshold;
453 if (this->fUseRadicalsCompact) {
454 threshold = 1;
455 compactRadicals(startTokenForNextFlush);
456 } else {
457 threshold = kPlotRecentlyUsedCount;
458 }
459 #else
460 int threshold = kPlotRecentlyUsedCount;
461 #endif
462 if (fNumActivePages < 1) {
463 fPrevFlushToken = startTokenForNextFlush;
464 return;
465 }
466
467 // For all plots, reset number of flushes since used if used this frame.
468 PlotList::Iter plotIter;
469 bool atlasUsedThisFlush = false;
470 for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) {
471 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
472 while (Plot* plot = plotIter.get()) {
473 // Reset number of flushes since used
474 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
475 plot->resetFlushesSinceLastUsed();
476 atlasUsedThisFlush = true;
477 }
478
479 plotIter.next();
480 }
481 }
482
483 if (atlasUsedThisFlush) {
484 fFlushesSinceLastUse = 0;
485 } else {
486 ++fFlushesSinceLastUse;
487 }
488
489 // We only try to compact if the atlas was used in the recently completed flush or
490 // hasn't been used in a long time.
491 // This is to handle the case where a lot of text or path rendering has occurred but then just
492 // a blinking cursor is drawn.
493 if (atlasUsedThisFlush || fFlushesSinceLastUse > kAtlasRecentlyUsedCount) {
494 SkTArray<Plot*> availablePlots;
495 uint32_t lastPageIndex = fNumActivePages - 1;
496
497 // For all plots but the last one, update number of flushes since used, and check to see
498 // if there are any in the first pages that the last page can safely upload to.
499 for (uint32_t pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex) {
500 #ifdef DUMP_ATLAS_DATA
501 if (gDumpAtlasData) {
502 SkDebugf("page %d: ", pageIndex);
503 }
504 #endif
505 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
506 while (Plot* plot = plotIter.get()) {
507 // Update number of flushes since plot was last used
508 // We only increment the 'sinceLastUsed' count for flushes where the atlas was used
509 // to avoid deleting everything when we return to text drawing in the blinking
510 // cursor case
511 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
512 plot->incFlushesSinceLastUsed();
513 }
514
515 #ifdef DUMP_ATLAS_DATA
516 if (gDumpAtlasData) {
517 SkDebugf("%d ", plot->flushesSinceLastUsed());
518 }
519 #endif
520 // Count plots we can potentially upload to in all pages except the last one
521 // (the potential compactee).
522 if (plot->flushesSinceLastUsed() > threshold) {
523 availablePlots.push_back() = plot;
524 }
525
526 plotIter.next();
527 }
528 #ifdef DUMP_ATLAS_DATA
529 if (gDumpAtlasData) {
530 SkDebugf("\n");
531 }
532 #endif
533 }
534
535 // Count recently used plots in the last page and evict any that are no longer in use.
536 // Since we prioritize uploading to the first pages, this will eventually
537 // clear out usage of this page unless we have a large need.
538 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
539 unsigned int usedPlots = 0;
540 #ifdef DUMP_ATLAS_DATA
541 if (gDumpAtlasData) {
542 SkDebugf("page %d: ", lastPageIndex);
543 }
544 #endif
545 while (Plot* plot = plotIter.get()) {
546 // Update number of flushes since plot was last used
547 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
548 plot->incFlushesSinceLastUsed();
549 }
550
551 #ifdef DUMP_ATLAS_DATA
552 if (gDumpAtlasData) {
553 SkDebugf("%d ", plot->flushesSinceLastUsed());
554 }
555 #endif
556 // If this plot was used recently
557 if (plot->flushesSinceLastUsed() <= threshold) {
558 usedPlots++;
559 } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
560 // otherwise if aged out just evict it.
561 this->processEvictionAndResetRects(plot);
562 }
563 plotIter.next();
564 }
565 #ifdef DUMP_ATLAS_DATA
566 if (gDumpAtlasData) {
567 SkDebugf("\n");
568 }
569 #endif
570
571 // If recently used plots in the last page are using less than a quarter of the page, try
572 // to evict them if there's available space in earlier pages. Since we prioritize uploading
573 // to the first pages, this will eventually clear out usage of this page unless we have a
574 // large need.
575 if (availablePlots.count() && usedPlots && usedPlots <= fNumPlots / 4) {
576 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
577 while (Plot* plot = plotIter.get()) {
578 // If this plot was used recently
579 if (plot->flushesSinceLastUsed() <= threshold) {
580 // See if there's room in an earlier page and if so evict.
581 // We need to be somewhat harsh here so that a handful of plots that are
582 // consistently in use don't end up locking the page in memory.
583 if (availablePlots.count() > 0) {
584 this->processEvictionAndResetRects(plot);
585 this->processEvictionAndResetRects(availablePlots.back());
586 availablePlots.pop_back();
587 --usedPlots;
588 }
589 if (!usedPlots || !availablePlots.count()) {
590 break;
591 }
592 }
593 plotIter.next();
594 }
595 }
596
597 // If none of the plots in the last page have been used recently, delete it.
598 if (!usedPlots) {
599 #ifdef DUMP_ATLAS_DATA
600 if (gDumpAtlasData) {
601 SkDebugf("delete %d\n", fNumActivePages-1);
602 }
603 #endif
604 this->deactivateLastPage();
605 fFlushesSinceLastUse = 0;
606 }
607 }
608
609 fPrevFlushToken = startTokenForNextFlush;
610 }
611
createPages( GrProxyProvider* proxyProvider, GenerationCounter* generationCounter)612 bool GrDrawOpAtlas::createPages(
613 GrProxyProvider* proxyProvider, GenerationCounter* generationCounter) {
614 SkASSERT(SkIsPow2(fTextureWidth) && SkIsPow2(fTextureHeight));
615
616 SkISize dims = {fTextureWidth, fTextureHeight};
617
618 int numPlotsX = fTextureWidth/fPlotWidth;
619 int numPlotsY = fTextureHeight/fPlotHeight;
620
621 for (uint32_t i = 0; i < this->maxPages(); ++i) {
622 GrSwizzle swizzle = proxyProvider->caps()->getReadSwizzle(fFormat, fColorType);
623 if (GrColorTypeIsAlphaOnly(fColorType)) {
624 swizzle = GrSwizzle::Concat(swizzle, GrSwizzle("aaaa"));
625 }
626 sk_sp<GrSurfaceProxy> proxy = proxyProvider->createProxy(
627 fFormat, dims, GrRenderable::kNo, 1, GrMipmapped::kNo, SkBackingFit::kExact,
628 SkBudgeted::kYes, GrProtected::kNo, GrInternalSurfaceFlags::kNone,
629 GrSurfaceProxy::UseAllocator::kNo);
630 if (!proxy) {
631 return false;
632 }
633 fViews[i] = GrSurfaceProxyView(std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle);
634
635 // set up allocated plots
636 fPages[i].fPlotArray = std::make_unique<sk_sp<Plot>[]>(numPlotsX * numPlotsY);
637
638 sk_sp<Plot>* currPlot = fPages[i].fPlotArray.get();
639 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
640 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
641 uint32_t plotIndex = r * numPlotsX + c;
642 currPlot->reset(new Plot(
643 i, plotIndex, generationCounter, x, y, fPlotWidth, fPlotHeight, fColorType));
644
645 // build LRU list
646 fPages[i].fPlotList.addToHead(currPlot->get());
647 ++currPlot;
648 }
649 }
650
651 }
652
653 return true;
654 }
655
activateNewPage(GrResourceProvider* resourceProvider)656 bool GrDrawOpAtlas::activateNewPage(GrResourceProvider* resourceProvider) {
657 SkASSERT(fNumActivePages < this->maxPages());
658
659 if (!fViews[fNumActivePages].proxy()->instantiate(resourceProvider)) {
660 return false;
661 }
662
663 #ifdef DUMP_ATLAS_DATA
664 if (gDumpAtlasData) {
665 SkDebugf("activated page#: %d\n", fNumActivePages);
666 }
667 #endif
668
669 ++fNumActivePages;
670 return true;
671 }
672
673
deactivateLastPage()674 inline void GrDrawOpAtlas::deactivateLastPage() {
675 SkASSERT(fNumActivePages);
676
677 uint32_t lastPageIndex = fNumActivePages - 1;
678
679 int numPlotsX = fTextureWidth/fPlotWidth;
680 int numPlotsY = fTextureHeight/fPlotHeight;
681
682 fPages[lastPageIndex].fPlotList.reset();
683 for (int r = 0; r < numPlotsY; ++r) {
684 for (int c = 0; c < numPlotsX; ++c) {
685 uint32_t plotIndex = r * numPlotsX + c;
686
687 Plot* currPlot = fPages[lastPageIndex].fPlotArray[plotIndex].get();
688 currPlot->resetRects();
689 currPlot->resetFlushesSinceLastUsed();
690
691 // rebuild the LRU list
692 SkDEBUGCODE(currPlot->fPrev = currPlot->fNext = nullptr);
693 SkDEBUGCODE(currPlot->fList = nullptr);
694 fPages[lastPageIndex].fPlotList.addToHead(currPlot);
695 }
696 }
697
698 // remove ref to the backing texture
699 fViews[lastPageIndex].proxy()->deinstantiate();
700 --fNumActivePages;
701 }
702
GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes)703 GrDrawOpAtlasConfig::GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes) {
704 static const SkISize kARGBDimensions[] = {
705 {256, 256}, // maxBytes < 2^19
706 {512, 256}, // 2^19 <= maxBytes < 2^20
707 {512, 512}, // 2^20 <= maxBytes < 2^21
708 {1024, 512}, // 2^21 <= maxBytes < 2^22
709 {1024, 1024}, // 2^22 <= maxBytes < 2^23
710 {2048, 1024}, // 2^23 <= maxBytes
711 };
712
713 // Index 0 corresponds to maxBytes of 2^18, so start by dividing it by that
714 maxBytes >>= 18;
715 // Take the floor of the log to get the index
716 int index = maxBytes > 0
717 ? SkTPin<int>(SkPrevLog2(maxBytes), 0, SK_ARRAY_COUNT(kARGBDimensions) - 1)
718 : 0;
719
720 SkASSERT(kARGBDimensions[index].width() <= kMaxAtlasDim);
721 SkASSERT(kARGBDimensions[index].height() <= kMaxAtlasDim);
722 fARGBDimensions.set(std::min<int>(kARGBDimensions[index].width(), maxTextureSize),
723 std::min<int>(kARGBDimensions[index].height(), maxTextureSize));
724 fMaxTextureSize = std::min<int>(maxTextureSize, kMaxAtlasDim);
725 }
726
727 #ifdef SK_ENABLE_SMALL_PAGE
resetAsSmallPage()728 int GrDrawOpAtlasConfig::resetAsSmallPage() {
729 size_t maxBytes = fARGBDimensions.width() * fARGBDimensions.height() * 4;
730 fARGBDimensions.set(512, 512);
731 return maxBytes / (fARGBDimensions.width() * fARGBDimensions.height());
732 }
733 #endif
734
atlasDimensions(GrMaskFormat type) const735 SkISize GrDrawOpAtlasConfig::atlasDimensions(GrMaskFormat type) const {
736 if (kA8_GrMaskFormat == type) {
737 // A8 is always 2x the ARGB dimensions, clamped to the max allowed texture size
738 return { std::min<int>(2 * fARGBDimensions.width(), fMaxTextureSize),
739 std::min<int>(2 * fARGBDimensions.height(), fMaxTextureSize) };
740 } else {
741 return fARGBDimensions;
742 }
743 }
744
plotDimensions(GrMaskFormat type) const745 SkISize GrDrawOpAtlasConfig::plotDimensions(GrMaskFormat type) const {
746 if (kA8_GrMaskFormat == type) {
747 SkISize atlasDimensions = this->atlasDimensions(type);
748 // For A8 we want to grow the plots at larger texture sizes to accept more of the
749 // larger SDF glyphs. Since the largest SDF glyph can be 170x170 with padding, this
750 // allows us to pack 3 in a 512x256 plot, or 9 in a 512x512 plot.
751
752 #ifdef SK_ENABLE_SMALL_PAGE
753 // This will give us 515×512 plots for 1024x1024, 256x256 plots otherwise.
754 int plotWidth = atlasDimensions.width() >= 1024 ? 512 : 256;
755 int plotHeight = atlasDimensions.height() >= 1024 ? 512 : 256;
756 #else
757 // This will give us 512x256 plots for 2048x1024, 512x512 plots for 2048x2048,
758 // and 256x256 plots otherwise.
759 int plotWidth = atlasDimensions.width() >= 2048 ? 512 : 256;
760 int plotHeight = atlasDimensions.height() >= 2048 ? 512 : 256;
761 #endif
762
763 return { plotWidth, plotHeight };
764 } else {
765 // ARGB and LCD always use 256x256 plots -- this has been shown to be faster
766 return { 256, 256 };
767 }
768 }
769