1/* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "src/gpu/gl/GrGLBuffer.h" 9 10#include "include/core/SkTraceMemoryDump.h" 11#include "src/core/SkTraceEvent.h" 12#include "src/gpu/GrGpuResourcePriv.h" 13#include "src/gpu/gl/GrGLCaps.h" 14#include "src/gpu/gl/GrGLGpu.h" 15 16#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X) 17#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X) 18 19#define GL_ALLOC_CALL(call) \ 20 [&] { \ 21 if (this->glGpu()->glCaps().skipErrorChecks()) { \ 22 GR_GL_CALL(this->glGpu()->glInterface(), call); \ 23 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \ 24 } else { \ 25 this->glGpu()->clearErrorsAndCheckForOOM(); \ 26 GR_GL_CALL_NOERRCHECK(this->glGpu()->glInterface(), call); \ 27 return this->glGpu()->getErrorAndCheckForOOM(); \ 28 } \ 29 }() 30 31#ifdef SK_DEBUG 32#define VALIDATE() this->validate() 33#else 34#define VALIDATE() do {} while(false) 35#endif 36 37sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType, 38 GrAccessPattern accessPattern, const void* data) { 39 if (gpu->glCaps().transferBufferType() == GrGLCaps::TransferBufferType::kNone && 40 (GrGpuBufferType::kXferCpuToGpu == intendedType || 41 GrGpuBufferType::kXferGpuToCpu == intendedType)) { 42 return nullptr; 43 } 44 45 sk_sp<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern, data)); 46 if (0 == buffer->bufferID()) { 47 return nullptr; 48 } 49 return buffer; 50} 51 52// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer 53// objects are implemented as client-side-arrays on tile-deferred architectures. 54#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW 55 56inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType, 57 GrAccessPattern accessPattern, 58 const GrGLCaps& caps) { 59 auto drawUsage = [](GrAccessPattern pattern) { 60 switch (pattern) { 61 case kDynamic_GrAccessPattern: 62 // TODO: Do we really want to use STREAM_DRAW here on non-Chromium? 63 return DYNAMIC_DRAW_PARAM; 64 case kStatic_GrAccessPattern: 65 return GR_GL_STATIC_DRAW; 66 case kStream_GrAccessPattern: 67 return GR_GL_STREAM_DRAW; 68 } 69 SkUNREACHABLE; 70 }; 71 72 auto readUsage = [](GrAccessPattern pattern) { 73 switch (pattern) { 74 case kDynamic_GrAccessPattern: 75 return GR_GL_DYNAMIC_READ; 76 case kStatic_GrAccessPattern: 77 return GR_GL_STATIC_READ; 78 case kStream_GrAccessPattern: 79 return GR_GL_STREAM_READ; 80 } 81 SkUNREACHABLE; 82 }; 83 84 auto usageType = [&drawUsage, &readUsage, &caps](GrGpuBufferType type, 85 GrAccessPattern pattern) { 86 // GL_NV_pixel_buffer_object adds transfer buffers but not the related <usage> values. 87 if (caps.transferBufferType() == GrGLCaps::TransferBufferType::kNV_PBO) { 88 return drawUsage(pattern); 89 } 90 switch (type) { 91 case GrGpuBufferType::kVertex: 92 case GrGpuBufferType::kIndex: 93 case GrGpuBufferType::kDrawIndirect: 94 case GrGpuBufferType::kXferCpuToGpu: 95 case GrGpuBufferType::kUniform: 96 return drawUsage(pattern); 97 case GrGpuBufferType::kXferGpuToCpu: 98 return readUsage(pattern); 99 } 100 SkUNREACHABLE; 101 }; 102 103 return usageType(bufferType, accessPattern); 104} 105 106GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType, 107 GrAccessPattern accessPattern, const void* data) 108 : INHERITED(gpu, size, intendedType, accessPattern) 109 , fIntendedType(intendedType) 110 , fBufferID(0) 111 , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern, gpu->glCaps())) 112 , fGLSizeInBytes(0) 113 , fHasAttachedToTexture(false) { 114 GL_CALL(GenBuffers(1, &fBufferID)); 115 if (fBufferID) { 116 GrGLenum target = gpu->bindBuffer(fIntendedType, this); 117 GrGLenum error = GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)size, data, fUsage)); 118 if (error != GR_GL_NO_ERROR) { 119 GL_CALL(DeleteBuffers(1, &fBufferID)); 120 fBufferID = 0; 121 } else { 122 fGLSizeInBytes = size; 123 } 124 } 125 VALIDATE(); 126 this->registerWithCache(SkBudgeted::kYes); 127 if (!fBufferID) { 128 this->resourcePriv().removeScratchKey(); 129 } 130} 131 132inline GrGLGpu* GrGLBuffer::glGpu() const { 133 SkASSERT(!this->wasDestroyed()); 134 return static_cast<GrGLGpu*>(this->getGpu()); 135} 136 137inline const GrGLCaps& GrGLBuffer::glCaps() const { 138 return this->glGpu()->glCaps(); 139} 140 141void GrGLBuffer::onRelease() { 142 TRACE_EVENT0("skia.gpu", TRACE_FUNC); 143 144 if (!this->wasDestroyed()) { 145 VALIDATE(); 146 // make sure we've not been abandoned or already released 147 if (fBufferID) { 148 GL_CALL(DeleteBuffers(1, &fBufferID)); 149 fBufferID = 0; 150 fGLSizeInBytes = 0; 151 } 152 fMapPtr = nullptr; 153 VALIDATE(); 154 } 155 156 INHERITED::onRelease(); 157} 158 159void GrGLBuffer::onAbandon() { 160 fBufferID = 0; 161 fGLSizeInBytes = 0; 162 fMapPtr = nullptr; 163 VALIDATE(); 164 INHERITED::onAbandon(); 165} 166 167void GrGLBuffer::onMap() { 168 SkASSERT(fBufferID); 169 SkASSERT(!this->wasDestroyed()); 170 VALIDATE(); 171 SkASSERT(!this->isMapped()); 172 173 // TODO: Make this a function parameter. 174 bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType); 175 176 // Handling dirty context is done in the bindBuffer call 177 switch (this->glCaps().mapBufferType()) { 178 case GrGLCaps::kNone_MapBufferType: 179 return; 180 case GrGLCaps::kMapBuffer_MapBufferType: { 181 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); 182 if (!readOnly) { 183 // Let driver know it can discard the old data 184 if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) { 185 GrGLenum error = 186 GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage)); 187 if (error != GR_GL_NO_ERROR) { 188 return; 189 } 190 } 191 } 192 GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); 193 break; 194 } 195 case GrGLCaps::kMapBufferRange_MapBufferType: { 196 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); 197 // Make sure the GL buffer size agrees with fDesc before mapping. 198 if (fGLSizeInBytes != this->size()) { 199 GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage)); 200 if (error != GR_GL_NO_ERROR) { 201 return; 202 } 203 } 204 GrGLbitfield access; 205 if (readOnly) { 206 access = GR_GL_MAP_READ_BIT; 207 } else { 208 access = GR_GL_MAP_WRITE_BIT; 209 if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) { 210 // TODO: Make this a function parameter. 211 access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; 212 } 213 } 214 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access)); 215 break; 216 } 217 case GrGLCaps::kChromium_MapBufferType: { 218 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); 219 // Make sure the GL buffer size agrees with fDesc before mapping. 220 if (fGLSizeInBytes != this->size()) { 221 GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage)); 222 if (error != GR_GL_NO_ERROR) { 223 return; 224 } 225 } 226 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(), 227 readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); 228 break; 229 } 230 } 231 fGLSizeInBytes = this->size(); 232 VALIDATE(); 233} 234 235void GrGLBuffer::onUnmap() { 236 SkASSERT(fBufferID); 237 VALIDATE(); 238 SkASSERT(this->isMapped()); 239 if (0 == fBufferID) { 240 fMapPtr = nullptr; 241 return; 242 } 243 // bind buffer handles the dirty context 244 switch (this->glCaps().mapBufferType()) { 245 case GrGLCaps::kNone_MapBufferType: 246 SkDEBUGFAIL("Shouldn't get here."); 247 return; 248 case GrGLCaps::kMapBuffer_MapBufferType: // fall through 249 case GrGLCaps::kMapBufferRange_MapBufferType: { 250 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); 251 GL_CALL(UnmapBuffer(target)); 252 break; 253 } 254 case GrGLCaps::kChromium_MapBufferType: 255 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed? 256 GL_CALL(UnmapBufferSubData(fMapPtr)); 257 break; 258 } 259 fMapPtr = nullptr; 260} 261 262bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { 263 SkASSERT(fBufferID); 264 if (this->wasDestroyed()) { 265 return false; 266 } 267 268 SkASSERT(!this->isMapped()); 269 VALIDATE(); 270 if (srcSizeInBytes > this->size()) { 271 return false; 272 } 273 SkASSERT(srcSizeInBytes <= this->size()); 274 // bindbuffer handles dirty context 275 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); 276 277 if (this->glCaps().useBufferDataNullHint()) { 278 if (this->size() == srcSizeInBytes) { 279 GrGLenum error = 280 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage)); 281 if (error != GR_GL_NO_ERROR) { 282 return false; 283 } 284 } else { 285 // Before we call glBufferSubData we give the driver a hint using 286 // glBufferData with nullptr. This makes the old buffer contents 287 // inaccessible to future draws. The GPU may still be processing 288 // draws that reference the old contents. With this hint it can 289 // assign a different allocation for the new contents to avoid 290 // flushing the gpu past draws consuming the old contents. 291 // TODO I think we actually want to try calling bufferData here 292 GrGLenum error = 293 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)this->size(), nullptr, fUsage)); 294 if (error != GR_GL_NO_ERROR) { 295 return false; 296 } 297 GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src)); 298 } 299 fGLSizeInBytes = this->size(); 300 } else { 301 // Note that we're cheating on the size here. Currently no methods 302 // allow a partial update that preserves contents of non-updated 303 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) 304 GrGLenum error = 305 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage)); 306 if (error != GR_GL_NO_ERROR) { 307 return false; 308 } 309 fGLSizeInBytes = srcSizeInBytes; 310 } 311 VALIDATE(); 312 return true; 313} 314 315void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, 316 const SkString& dumpName) const { 317 SkString buffer_id; 318 buffer_id.appendU32(this->bufferID()); 319 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", 320 buffer_id.c_str()); 321} 322 323#ifdef SK_DEBUG 324 325void GrGLBuffer::validate() const { 326 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); 327 SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size()); 328} 329 330#endif 331