1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "src/gpu/GrGpu.h"
10
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/gpu/GrBackendSurface.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkCompressedDataUtils.h"
15 #include "src/core/SkMathPriv.h"
16 #include "src/core/SkMipmap.h"
17 #include "src/gpu/GrAttachment.h"
18 #include "src/gpu/GrBackendUtils.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDataUtils.h"
21 #include "src/gpu/GrDirectContextPriv.h"
22 #include "src/gpu/GrGpuResourcePriv.h"
23 #include "src/gpu/GrNativeRect.h"
24 #include "src/gpu/GrPipeline.h"
25 #include "src/gpu/GrRenderTarget.h"
26 #include "src/gpu/GrResourceCache.h"
27 #include "src/gpu/GrResourceProvider.h"
28 #include "src/gpu/GrRingBuffer.h"
29 #include "src/gpu/GrSemaphore.h"
30 #include "src/gpu/GrStagingBufferManager.h"
31 #include "src/gpu/GrStencilSettings.h"
32 #include "src/gpu/GrTextureProxyPriv.h"
33 #include "src/gpu/GrTracing.h"
34 #include "src/sksl/SkSLCompiler.h"
35
36 ////////////////////////////////////////////////////////////////////////////////
37
GrGpu(GrDirectContext* direct)38 GrGpu::GrGpu(GrDirectContext* direct) : fResetBits(kAll_GrBackendState), fContext(direct) {}
39
~GrGpu()40 GrGpu::~GrGpu() {
41 this->callSubmittedProcs(false);
42 }
43
initCapsAndCompiler(sk_sp<const GrCaps> caps)44 void GrGpu::initCapsAndCompiler(sk_sp<const GrCaps> caps) {
45 fCaps = std::move(caps);
46 fCompiler = std::make_unique<SkSL::Compiler>(fCaps->shaderCaps());
47 }
48
disconnect(DisconnectType type)49 void GrGpu::disconnect(DisconnectType type) {}
50
51 ////////////////////////////////////////////////////////////////////////////////
52
validate_texel_levels(SkISize dimensions, GrColorType texelColorType, const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps)53 static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType,
54 const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) {
55 SkASSERT(mipLevelCount > 0);
56 bool hasBasePixels = texels[0].fPixels;
57 int levelsWithPixelsCnt = 0;
58 auto bpp = GrColorTypeBytesPerPixel(texelColorType);
59 int w = dimensions.fWidth;
60 int h = dimensions.fHeight;
61 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
62 if (texels[currentMipLevel].fPixels) {
63 const size_t minRowBytes = w * bpp;
64 if (caps->writePixelsRowBytesSupport()) {
65 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
66 return false;
67 }
68 if (texels[currentMipLevel].fRowBytes % bpp) {
69 return false;
70 }
71 } else {
72 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
73 return false;
74 }
75 }
76 ++levelsWithPixelsCnt;
77 }
78 if (w == 1 && h == 1) {
79 if (currentMipLevel != mipLevelCount - 1) {
80 return false;
81 }
82 } else {
83 w = std::max(w / 2, 1);
84 h = std::max(h / 2, 1);
85 }
86 }
87 // Either just a base layer or a full stack is required.
88 if (mipLevelCount != 1 && (w != 1 || h != 1)) {
89 return false;
90 }
91 // Can specify just the base, all levels, or no levels.
92 if (!hasBasePixels) {
93 return levelsWithPixelsCnt == 0;
94 }
95 return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount;
96 }
97
createTextureCommon(SkISize dimensions, const GrBackendFormat& format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, SkBudgeted budgeted, GrProtected isProtected, int mipLevelCount, uint32_t levelClearMask)98 sk_sp<GrTexture> GrGpu::createTextureCommon(SkISize dimensions,
99 const GrBackendFormat& format,
100 GrTextureType textureType,
101 GrRenderable renderable,
102 int renderTargetSampleCnt,
103 SkBudgeted budgeted,
104 GrProtected isProtected,
105 int mipLevelCount,
106 uint32_t levelClearMask) {
107 if (this->caps()->isFormatCompressed(format)) {
108 // Call GrGpu::createCompressedTexture.
109 return nullptr;
110 }
111
112 GrMipmapped mipMapped = mipLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
113 if (!this->caps()->validateSurfaceParams(dimensions,
114 format,
115 renderable,
116 renderTargetSampleCnt,
117 mipMapped,
118 textureType)) {
119 return nullptr;
120 }
121
122 if (renderable == GrRenderable::kYes) {
123 renderTargetSampleCnt =
124 this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
125 }
126 // Attempt to catch un- or wrongly initialized sample counts.
127 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
128 this->handleDirtyContext();
129 auto tex = this->onCreateTexture(dimensions,
130 format,
131 renderable,
132 renderTargetSampleCnt,
133 budgeted,
134 isProtected,
135 mipLevelCount,
136 levelClearMask);
137 if (tex) {
138 SkASSERT(tex->backendFormat() == format);
139 SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget());
140 if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
141 tex->resourcePriv().removeScratchKey();
142 }
143 fStats.incTextureCreates();
144 if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) {
145 SkASSERT(GrRenderable::kYes == renderable);
146 tex->asRenderTarget()->setRequiresManualMSAAResolve();
147 }
148 }
149 return tex;
150 }
151
createTexture(SkISize dimensions, const GrBackendFormat& format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, GrMipmapped mipMapped, SkBudgeted budgeted, GrProtected isProtected)152 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
153 const GrBackendFormat& format,
154 GrTextureType textureType,
155 GrRenderable renderable,
156 int renderTargetSampleCnt,
157 GrMipmapped mipMapped,
158 SkBudgeted budgeted,
159 GrProtected isProtected) {
160 int mipLevelCount = 1;
161 if (mipMapped == GrMipmapped::kYes) {
162 mipLevelCount =
163 32 - SkCLZ(static_cast<uint32_t>(std::max(dimensions.fWidth, dimensions.fHeight)));
164 }
165 uint32_t levelClearMask =
166 this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0;
167 auto tex = this->createTextureCommon(dimensions,
168 format,
169 textureType,
170 renderable,
171 renderTargetSampleCnt,
172 budgeted,
173 isProtected,
174 mipLevelCount,
175 levelClearMask);
176 if (tex && mipMapped == GrMipmapped::kYes && levelClearMask) {
177 tex->markMipmapsClean();
178 }
179 return tex;
180 }
181
createTexture(SkISize dimensions, const GrBackendFormat& format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, SkBudgeted budgeted, GrProtected isProtected, GrColorType textureColorType, GrColorType srcColorType, const GrMipLevel texels[], int texelLevelCount)182 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
183 const GrBackendFormat& format,
184 GrTextureType textureType,
185 GrRenderable renderable,
186 int renderTargetSampleCnt,
187 SkBudgeted budgeted,
188 GrProtected isProtected,
189 GrColorType textureColorType,
190 GrColorType srcColorType,
191 const GrMipLevel texels[],
192 int texelLevelCount) {
193 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
194 if (texelLevelCount) {
195 if (!validate_texel_levels(dimensions, srcColorType, texels, texelLevelCount,
196 this->caps())) {
197 return nullptr;
198 }
199 }
200
201 int mipLevelCount = std::max(1, texelLevelCount);
202 uint32_t levelClearMask = 0;
203 if (this->caps()->shouldInitializeTextures()) {
204 if (texelLevelCount) {
205 for (int i = 0; i < mipLevelCount; ++i) {
206 if (!texels->fPixels) {
207 levelClearMask |= static_cast<uint32_t>(1 << i);
208 }
209 }
210 } else {
211 levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1);
212 }
213 }
214
215 auto tex = this->createTextureCommon(dimensions,
216 format,
217 textureType,
218 renderable,
219 renderTargetSampleCnt,
220 budgeted,
221 isProtected,
222 texelLevelCount,
223 levelClearMask);
224 if (tex) {
225 bool markMipLevelsClean = false;
226 // Currently if level 0 does not have pixels then no other level may, as enforced by
227 // validate_texel_levels.
228 if (texelLevelCount && texels[0].fPixels) {
229 if (!this->writePixels(tex.get(),
230 SkIRect::MakeSize(dimensions),
231 textureColorType,
232 srcColorType,
233 texels,
234 texelLevelCount)) {
235 return nullptr;
236 }
237 // Currently if level[1] of mip map has pixel data then so must all other levels.
238 // as enforced by validate_texel_levels.
239 markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels);
240 fStats.incTextureUploads();
241 } else if (levelClearMask && mipLevelCount > 1) {
242 markMipLevelsClean = true;
243 }
244 if (markMipLevelsClean) {
245 tex->markMipmapsClean();
246 }
247 }
248 return tex;
249 }
250
createCompressedTexture(SkISize dimensions, const GrBackendFormat& format, SkBudgeted budgeted, GrMipmapped mipMapped, GrProtected isProtected, const void* data, size_t dataSize)251 sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions,
252 const GrBackendFormat& format,
253 SkBudgeted budgeted,
254 GrMipmapped mipMapped,
255 GrProtected isProtected,
256 const void* data,
257 size_t dataSize) {
258 this->handleDirtyContext();
259 if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() ||
260 dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) {
261 return nullptr;
262 }
263 // Note if we relax the requirement that data must be provided then we must check
264 // caps()->shouldInitializeTextures() here.
265 if (!data) {
266 return nullptr;
267 }
268
269 // TODO: expand CompressedDataIsCorrect to work here too
270 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
271 if (compressionType == SkImage::CompressionType::kNone) {
272 return nullptr;
273 }
274
275 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
276 return nullptr;
277 }
278
279 if (dataSize < SkCompressedDataSize(compressionType, dimensions, nullptr,
280 mipMapped == GrMipmapped::kYes)) {
281 return nullptr;
282 }
283 return this->onCreateCompressedTexture(dimensions, format, budgeted, mipMapped, isProtected,
284 data, dataSize);
285 }
286
createCompressedTexture(SkISize dimensions, const GrBackendFormat& format, SkBudgeted budgeted, GrMipmapped mipMapped, GrProtected isProtected, OH_NativeBuffer* nativeBuffer, size_t bufferSize)287 sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions,
288 const GrBackendFormat& format,
289 SkBudgeted budgeted,
290 GrMipmapped mipMapped,
291 GrProtected isProtected,
292 OH_NativeBuffer* nativeBuffer,
293 size_t bufferSize) {
294 this->handleDirtyContext();
295 if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() ||
296 dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) {
297 return nullptr;
298 }
299 if (!nativeBuffer) {
300 return nullptr;
301 }
302
303 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
304 if (compressionType == SkImage::CompressionType::kNone) {
305 return nullptr;
306 }
307
308 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
309 return nullptr;
310 }
311
312 if (bufferSize < SkCompressedDataSize(compressionType, dimensions, nullptr,
313 mipMapped == GrMipmapped::kYes)) {
314 return nullptr;
315 }
316 return this->onCreateCompressedTexture(dimensions, format, budgeted, mipMapped, isProtected,
317 nativeBuffer, bufferSize);
318 }
319
wrapBackendTexture(const GrBackendTexture& backendTex, GrWrapOwnership ownership, GrWrapCacheable cacheable, GrIOType ioType)320 sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
321 GrWrapOwnership ownership,
322 GrWrapCacheable cacheable,
323 GrIOType ioType) {
324 SkASSERT(ioType != kWrite_GrIOType);
325 this->handleDirtyContext();
326
327 const GrCaps* caps = this->caps();
328 SkASSERT(caps);
329
330 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
331 return nullptr;
332 }
333 if (backendTex.width() > caps->maxTextureSize() ||
334 backendTex.height() > caps->maxTextureSize()) {
335 return nullptr;
336 }
337
338 return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType);
339 }
340
wrapCompressedBackendTexture(const GrBackendTexture& backendTex, GrWrapOwnership ownership, GrWrapCacheable cacheable)341 sk_sp<GrTexture> GrGpu::wrapCompressedBackendTexture(const GrBackendTexture& backendTex,
342 GrWrapOwnership ownership,
343 GrWrapCacheable cacheable) {
344 this->handleDirtyContext();
345
346 const GrCaps* caps = this->caps();
347 SkASSERT(caps);
348
349 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
350 return nullptr;
351 }
352 if (backendTex.width() > caps->maxTextureSize() ||
353 backendTex.height() > caps->maxTextureSize()) {
354 return nullptr;
355 }
356
357 return this->onWrapCompressedBackendTexture(backendTex, ownership, cacheable);
358 }
359
wrapRenderableBackendTexture(const GrBackendTexture& backendTex, int sampleCnt, GrWrapOwnership ownership, GrWrapCacheable cacheable)360 sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
361 int sampleCnt,
362 GrWrapOwnership ownership,
363 GrWrapCacheable cacheable) {
364 this->handleDirtyContext();
365 if (sampleCnt < 1) {
366 return nullptr;
367 }
368
369 const GrCaps* caps = this->caps();
370
371 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType()) ||
372 !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
373 return nullptr;
374 }
375
376 if (backendTex.width() > caps->maxRenderTargetSize() ||
377 backendTex.height() > caps->maxRenderTargetSize()) {
378 return nullptr;
379 }
380 sk_sp<GrTexture> tex =
381 this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable);
382 SkASSERT(!tex || tex->asRenderTarget());
383 if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) {
384 tex->asRenderTarget()->setRequiresManualMSAAResolve();
385 }
386 return tex;
387 }
388
wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT)389 sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
390 this->handleDirtyContext();
391
392 const GrCaps* caps = this->caps();
393
394 if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
395 return nullptr;
396 }
397
398 sk_sp<GrRenderTarget> rt = this->onWrapBackendRenderTarget(backendRT);
399 if (backendRT.isFramebufferOnly()) {
400 rt->setFramebufferOnly();
401 }
402 return rt;
403 }
404
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo)405 sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
406 const GrVkDrawableInfo& vkInfo) {
407 return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
408 }
409
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo)410 sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
411 const GrVkDrawableInfo& vkInfo) {
412 // This is only supported on Vulkan so we default to returning nullptr here
413 return nullptr;
414 }
415
createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern accessPattern, const void* data)416 sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
417 GrAccessPattern accessPattern, const void* data) {
418 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
419 this->handleDirtyContext();
420 sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
421 if (!this->caps()->reuseScratchBuffers()) {
422 buffer->resourcePriv().removeScratchKey();
423 }
424 return buffer;
425 }
426
copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint)427 bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
428 const SkIPoint& dstPoint) {
429 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
430 SkASSERT(dst && src);
431 SkASSERT(!src->framebufferOnly());
432
433 if (dst->readOnly()) {
434 return false;
435 }
436
437 this->handleDirtyContext();
438
439 return this->onCopySurface(dst, src, srcRect, dstPoint);
440 }
441
readPixels(GrSurface* surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, size_t rowBytes)442 bool GrGpu::readPixels(GrSurface* surface,
443 SkIRect rect,
444 GrColorType surfaceColorType,
445 GrColorType dstColorType,
446 void* buffer,
447 size_t rowBytes) {
448 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
449 SkASSERT(surface);
450 SkASSERT(!surface->framebufferOnly());
451 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
452 surface->backendFormat()));
453
454 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
455 return false;
456 }
457
458 size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * rect.width());
459 if (!this->caps()->readPixelsRowBytesSupport()) {
460 if (rowBytes != minRowBytes) {
461 return false;
462 }
463 } else {
464 if (rowBytes < minRowBytes) {
465 return false;
466 }
467 if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
468 return false;
469 }
470 }
471
472 this->handleDirtyContext();
473
474 return this->onReadPixels(surface, rect, surfaceColorType, dstColorType, buffer, rowBytes);
475 }
476
writePixels(GrSurface* surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling)477 bool GrGpu::writePixels(GrSurface* surface,
478 SkIRect rect,
479 GrColorType surfaceColorType,
480 GrColorType srcColorType,
481 const GrMipLevel texels[],
482 int mipLevelCount,
483 bool prepForTexSampling) {
484 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
485 int startTimestamp = 0;
486 int endTimestamp = 0;
487 bool isTagEnabled = IsTagEnabled(HITRACE_TAG_GRAPHIC_AGP);
488 if (isTagEnabled) {
489 startTimestamp = static_cast<int>(
490 std::chrono::duration_cast<std::chrono::microseconds>(
491 std::chrono::steady_clock::now().time_since_epoch()).count());
492 }
493 #endif
494 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
495 ATRACE_ANDROID_FRAMEWORK_ALWAYS("Texture upload(%u) %ix%i",
496 surface->uniqueID().asUInt(), rect.width(), rect.height());
497 SkASSERT(surface);
498 SkASSERT(!surface->framebufferOnly());
499
500 if (surface->readOnly()) {
501 return false;
502 }
503
504 if (mipLevelCount == 0) {
505 return false;
506 } else if (mipLevelCount == 1) {
507 // We require that if we are not mipped, then the write region is contained in the surface
508 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
509 return false;
510 }
511 } else if (rect != SkIRect::MakeSize(surface->dimensions())) {
512 // We require that if the texels are mipped, than the write region is the entire surface
513 return false;
514 }
515
516 if (!validate_texel_levels(rect.size(), srcColorType, texels, mipLevelCount, this->caps())) {
517 return false;
518 }
519
520 this->handleDirtyContext();
521 if (this->onWritePixels(surface,
522 rect,
523 surfaceColorType,
524 srcColorType,
525 texels,
526 mipLevelCount,
527 prepForTexSampling)) {
528 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
529 fStats.incTextureUploads();
530 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
531 if (isTagEnabled) {
532 endTimestamp = static_cast<int>(
533 std::chrono::duration_cast<std::chrono::microseconds>(
534 std::chrono::steady_clock::now().time_since_epoch()).count());
535 int duration = endTimestamp - startTimestamp;
536 if (duration > TEXT_UPLOAD_TIME) {
537 HITRACE_OHOS_NAME_FMT_ALWAYS("TEXT_UPLOAD_TIME = %d µs, Texture upload(%u) %ix%i",
538 duration, surface->uniqueID().asUInt(), rect.width(), rect.height());
539 }
540 }
541 #endif
542 return true;
543 }
544 return false;
545 }
546
transferPixelsTo(GrTexture* texture, SkIRect rect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp<GrGpuBuffer> transferBuffer, size_t offset, size_t rowBytes)547 bool GrGpu::transferPixelsTo(GrTexture* texture,
548 SkIRect rect,
549 GrColorType textureColorType,
550 GrColorType bufferColorType,
551 sk_sp<GrGpuBuffer> transferBuffer,
552 size_t offset,
553 size_t rowBytes) {
554 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
555 SkASSERT(texture);
556 SkASSERT(transferBuffer);
557
558 if (texture->readOnly()) {
559 return false;
560 }
561
562 // We require that the write region is contained in the texture
563 if (!SkIRect::MakeSize(texture->dimensions()).contains(rect)) {
564 return false;
565 }
566
567 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
568 if (this->caps()->writePixelsRowBytesSupport()) {
569 if (rowBytes < SkToSizeT(bpp*rect.width())) {
570 return false;
571 }
572 if (rowBytes % bpp) {
573 return false;
574 }
575 } else {
576 if (rowBytes != SkToSizeT(bpp*rect.width())) {
577 return false;
578 }
579 }
580
581 this->handleDirtyContext();
582 if (this->onTransferPixelsTo(texture,
583 rect,
584 textureColorType,
585 bufferColorType,
586 std::move(transferBuffer),
587 offset,
588 rowBytes)) {
589 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
590 fStats.incTransfersToTexture();
591
592 return true;
593 }
594 return false;
595 }
596
transferPixelsFrom(GrSurface* surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp<GrGpuBuffer> transferBuffer, size_t offset)597 bool GrGpu::transferPixelsFrom(GrSurface* surface,
598 SkIRect rect,
599 GrColorType surfaceColorType,
600 GrColorType bufferColorType,
601 sk_sp<GrGpuBuffer> transferBuffer,
602 size_t offset) {
603 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
604 SkASSERT(surface);
605 SkASSERT(transferBuffer);
606 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
607 surface->backendFormat()));
608
609 #ifdef SK_DEBUG
610 auto supportedRead = this->caps()->supportedReadPixelsColorType(
611 surfaceColorType, surface->backendFormat(), bufferColorType);
612 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
613 SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
614 #endif
615
616 // We require that the write region is contained in the texture
617 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
618 return false;
619 }
620
621 this->handleDirtyContext();
622 if (this->onTransferPixelsFrom(surface,
623 rect,
624 surfaceColorType,
625 bufferColorType,
626 std::move(transferBuffer),
627 offset)) {
628 fStats.incTransfersFromSurface();
629 return true;
630 }
631 return false;
632 }
633
regenerateMipMapLevels(GrTexture* texture)634 bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
635 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
636 SkASSERT(texture);
637 SkASSERT(this->caps()->mipmapSupport());
638 SkASSERT(texture->mipmapped() == GrMipmapped::kYes);
639 if (!texture->mipmapsAreDirty()) {
640 // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the
641 // actual target. This may be caused by things that the drawingManager could not predict,
642 // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc.
643 // NOTE: This goes away once we quit tracking mipmap state on the actual texture.
644 return true;
645 }
646 if (texture->readOnly()) {
647 return false;
648 }
649 if (this->onRegenerateMipMapLevels(texture)) {
650 texture->markMipmapsClean();
651 return true;
652 }
653 return false;
654 }
655
resetTextureBindings()656 void GrGpu::resetTextureBindings() {
657 this->handleDirtyContext();
658 this->onResetTextureBindings();
659 }
660
resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect)661 void GrGpu::resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
662 SkASSERT(target);
663 this->handleDirtyContext();
664 this->onResolveRenderTarget(target, resolveRect);
665 }
666
didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds, uint32_t mipLevels) const667 void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
668 uint32_t mipLevels) const {
669 SkASSERT(surface);
670 SkASSERT(!surface->readOnly());
671 // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
672 if (nullptr == bounds || !bounds->isEmpty()) {
673 GrTexture* texture = surface->asTexture();
674 if (texture) {
675 if (mipLevels == 1) {
676 texture->markMipmapsDirty();
677 } else {
678 texture->markMipmapsClean();
679 }
680 }
681 }
682 }
683
executeFlushInfo(SkSpan<GrSurfaceProxy*> proxies, SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info, const GrBackendSurfaceMutableState* newState)684 void GrGpu::executeFlushInfo(SkSpan<GrSurfaceProxy*> proxies,
685 SkSurface::BackendSurfaceAccess access,
686 const GrFlushInfo& info,
687 const GrBackendSurfaceMutableState* newState) {
688 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
689
690 GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
691
692 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores(
693 new std::unique_ptr<GrSemaphore>[info.fNumSemaphores]);
694 if (this->caps()->semaphoreSupport() && info.fNumSemaphores) {
695 for (size_t i = 0; i < info.fNumSemaphores; ++i) {
696 if (info.fSignalSemaphores[i].isInitialized()) {
697 semaphores[i] = resourceProvider->wrapBackendSemaphore(
698 info.fSignalSemaphores[i],
699 GrSemaphoreWrapType::kWillSignal,
700 kBorrow_GrWrapOwnership);
701 // If we failed to wrap the semaphore it means the client didn't give us a valid
702 // semaphore to begin with. Therefore, it is fine to not signal it.
703 if (semaphores[i]) {
704 this->insertSemaphore(semaphores[i].get());
705 }
706 } else {
707 semaphores[i] = resourceProvider->makeSemaphore(false);
708 if (semaphores[i]) {
709 this->insertSemaphore(semaphores[i].get());
710 info.fSignalSemaphores[i] = semaphores[i]->backendSemaphore();
711 }
712 }
713 }
714 }
715
716 if (info.fFinishedProc) {
717 this->addFinishedProc(info.fFinishedProc, info.fFinishedContext);
718 }
719
720 if (info.fSubmittedProc) {
721 fSubmittedProcs.emplace_back(info.fSubmittedProc, info.fSubmittedContext);
722 }
723
724 // We currently don't support passing in new surface state for multiple proxies here. The only
725 // time we have multiple proxies is if we are flushing a yuv SkImage which won't have state
726 // updates anyways.
727 SkASSERT(!newState || proxies.size() == 1);
728 SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
729 this->prepareSurfacesForBackendAccessAndStateUpdates(proxies, access, newState);
730 }
731
getOpsRenderPass( GrRenderTarget* renderTarget, bool useMSAASurface, GrAttachment* stencil, GrSurfaceOrigin origin, const SkIRect& bounds, const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo, const SkTArray<GrSurfaceProxy*, true>& sampledProxies, GrXferBarrierFlags renderPassXferBarriers)732 GrOpsRenderPass* GrGpu::getOpsRenderPass(
733 GrRenderTarget* renderTarget,
734 bool useMSAASurface,
735 GrAttachment* stencil,
736 GrSurfaceOrigin origin,
737 const SkIRect& bounds,
738 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
739 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
740 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
741 GrXferBarrierFlags renderPassXferBarriers) {
742 #if SK_HISTOGRAMS_ENABLED
743 fCurrentSubmitRenderPassCount++;
744 #endif
745 fStats.incRenderPasses();
746 return this->onGetOpsRenderPass(renderTarget, useMSAASurface, stencil, origin, bounds,
747 colorInfo, stencilInfo, sampledProxies, renderPassXferBarriers);
748 }
749
submitToGpu(bool syncCpu)750 bool GrGpu::submitToGpu(bool syncCpu) {
751 this->stats()->incNumSubmitToGpus();
752
753 if (auto manager = this->stagingBufferManager()) {
754 manager->detachBuffers();
755 }
756
757 if (auto uniformsBuffer = this->uniformsRingBuffer()) {
758 uniformsBuffer->startSubmit(this);
759 }
760
761 bool submitted = this->onSubmitToGpu(syncCpu);
762
763 this->callSubmittedProcs(submitted);
764
765 this->reportSubmitHistograms();
766
767 return submitted;
768 }
769
reportSubmitHistograms()770 void GrGpu::reportSubmitHistograms() {
771 #if SK_HISTOGRAMS_ENABLED
772 // The max allowed value for SK_HISTOGRAM_EXACT_LINEAR is 100. If we want to support higher
773 // values we can add SK_HISTOGRAM_CUSTOM_COUNTS but this has a number of buckets that is less
774 // than the number of actual values
775 static constexpr int kMaxRenderPassBucketValue = 100;
776 SK_HISTOGRAM_EXACT_LINEAR("SubmitRenderPasses",
777 std::min(fCurrentSubmitRenderPassCount, kMaxRenderPassBucketValue),
778 kMaxRenderPassBucketValue);
779 fCurrentSubmitRenderPassCount = 0;
780 #endif
781
782 this->onReportSubmitHistograms();
783 }
784
checkAndResetOOMed()785 bool GrGpu::checkAndResetOOMed() {
786 if (fOOMed) {
787 fOOMed = false;
788 return true;
789 }
790 return false;
791 }
792
callSubmittedProcs(bool success)793 void GrGpu::callSubmittedProcs(bool success) {
794 for (int i = 0; i < fSubmittedProcs.count(); ++i) {
795 fSubmittedProcs[i].fProc(fSubmittedProcs[i].fContext, success);
796 }
797 fSubmittedProcs.reset();
798 }
799
800 #ifdef SK_ENABLE_DUMP_GPU
801 #include "src/utils/SkJSONWriter.h"
802
dumpJSON(SkJSONWriter* writer) const803 void GrGpu::dumpJSON(SkJSONWriter* writer) const {
804 writer->beginObject();
805
806 // TODO: Is there anything useful in the base class to dump here?
807
808 this->onDumpJSON(writer);
809
810 writer->endObject();
811 }
812 #else
dumpJSON(SkJSONWriter* writer) const813 void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
814 #endif
815
816 #if GR_TEST_UTILS
817
818 #if GR_GPU_STATS
819
dump(SkString* out)820 void GrGpu::Stats::dump(SkString* out) {
821 out->appendf("Textures Created: %d\n", fTextureCreates);
822 out->appendf("Texture Uploads: %d\n", fTextureUploads);
823 out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
824 out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
825 out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
826 out->appendf("MSAA Attachment Creates: %d\n", fMSAAAttachmentCreates);
827 out->appendf("Number of draws: %d\n", fNumDraws);
828 out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
829 out->appendf("Number of Scratch MSAA Attachments reused %d\n",
830 fNumScratchMSAAAttachmentsReused);
831 out->appendf("Number of Render Passes: %d\n", fRenderPasses);
832 out->appendf("Reordered DAGs Over Budget: %d\n", fNumReorderedDAGsOverBudget);
833
834 // enable this block to output CSV-style stats for program pre-compilation
835 #if 0
836 SkASSERT(fNumInlineCompilationFailures == 0);
837 SkASSERT(fNumPreCompilationFailures == 0);
838 SkASSERT(fNumCompilationFailures == 0);
839 SkASSERT(fNumPartialCompilationSuccesses == 0);
840
841 SkDebugf("%d, %d, %d, %d, %d\n",
842 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
843 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
844 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
845 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
846 fNumCompilationSuccesses);
847 #endif
848 }
849
dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values)850 void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) {
851 keys->push_back(SkString("render_passes"));
852 values->push_back(fRenderPasses);
853 keys->push_back(SkString("reordered_dags_over_budget"));
854 values->push_back(fNumReorderedDAGsOverBudget);
855 }
856
857 #endif // GR_GPU_STATS
858 #endif // GR_TEST_UTILS
859
CompressedDataIsCorrect(SkISize dimensions, SkImage::CompressionType compressionType, GrMipmapped mipMapped, const void* data, size_t length)860 bool GrGpu::CompressedDataIsCorrect(SkISize dimensions,
861 SkImage::CompressionType compressionType,
862 GrMipmapped mipMapped,
863 const void* data,
864 size_t length) {
865 size_t computedSize = SkCompressedDataSize(compressionType,
866 dimensions,
867 nullptr,
868 mipMapped == GrMipmapped::kYes);
869 return computedSize == length;
870 }
871
createBackendTexture(SkISize dimensions, const GrBackendFormat& format, GrRenderable renderable, GrMipmapped mipMapped, GrProtected isProtected)872 GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions,
873 const GrBackendFormat& format,
874 GrRenderable renderable,
875 GrMipmapped mipMapped,
876 GrProtected isProtected) {
877 const GrCaps* caps = this->caps();
878
879 if (!format.isValid()) {
880 return {};
881 }
882
883 if (caps->isFormatCompressed(format)) {
884 // Compressed formats must go through the createCompressedBackendTexture API
885 return {};
886 }
887
888 if (dimensions.isEmpty() || dimensions.width() > caps->maxTextureSize() ||
889 dimensions.height() > caps->maxTextureSize()) {
890 return {};
891 }
892
893 if (mipMapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) {
894 return {};
895 }
896
897 return this->onCreateBackendTexture(dimensions, format, renderable, mipMapped, isProtected);
898 }
899
clearBackendTexture(const GrBackendTexture& backendTexture, sk_sp<GrRefCntedCallback> finishedCallback, std::array<float, 4> color)900 bool GrGpu::clearBackendTexture(const GrBackendTexture& backendTexture,
901 sk_sp<GrRefCntedCallback> finishedCallback,
902 std::array<float, 4> color) {
903 if (!backendTexture.isValid()) {
904 return false;
905 }
906
907 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
908 return false;
909 }
910
911 return this->onClearBackendTexture(backendTexture, std::move(finishedCallback), color);
912 }
913
createCompressedBackendTexture(SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped, GrProtected isProtected)914 GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions,
915 const GrBackendFormat& format,
916 GrMipmapped mipMapped,
917 GrProtected isProtected) {
918 const GrCaps* caps = this->caps();
919
920 if (!format.isValid()) {
921 return {};
922 }
923
924 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
925 if (compressionType == SkImage::CompressionType::kNone) {
926 // Uncompressed formats must go through the createBackendTexture API
927 return {};
928 }
929
930 if (dimensions.isEmpty() ||
931 dimensions.width() > caps->maxTextureSize() ||
932 dimensions.height() > caps->maxTextureSize()) {
933 return {};
934 }
935
936 if (mipMapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) {
937 return {};
938 }
939
940 return this->onCreateCompressedBackendTexture(dimensions, format, mipMapped, isProtected);
941 }
942
updateCompressedBackendTexture(const GrBackendTexture& backendTexture, sk_sp<GrRefCntedCallback> finishedCallback, const void* data, size_t length)943 bool GrGpu::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
944 sk_sp<GrRefCntedCallback> finishedCallback,
945 const void* data,
946 size_t length) {
947 SkASSERT(data);
948
949 if (!backendTexture.isValid()) {
950 return false;
951 }
952
953 GrBackendFormat format = backendTexture.getBackendFormat();
954
955 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
956 if (compressionType == SkImage::CompressionType::kNone) {
957 // Uncompressed formats must go through the createBackendTexture API
958 return false;
959 }
960
961 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
962 return false;
963 }
964
965 GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo;
966
967 if (!CompressedDataIsCorrect(backendTexture.dimensions(),
968 compressionType,
969 mipMapped,
970 data,
971 length)) {
972 return false;
973 }
974
975 return this->onUpdateCompressedBackendTexture(backendTexture,
976 std::move(finishedCallback),
977 data,
978 length);
979 }
980