1/* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "include/private/SkTArray.h" 9#include "include/private/SkTDArray.h" 10#include "include/private/SkTemplates.h" 11#include "include/utils/SkRandom.h" 12#include "src/gpu/GrMemoryPool.h" 13#include "tests/Test.h" 14 15// A is the top of an inheritance tree of classes that overload op new and 16// and delete to use a GrMemoryPool. The objects have values of different types 17// that can be set and checked. 18class A { 19public: 20 A() {} 21 virtual void setValues(int v) { 22 fChar = static_cast<char>(v & 0xFF); 23 } 24 virtual bool checkValues(int v) { 25 return fChar == static_cast<char>(v & 0xFF); 26 } 27 virtual ~A() {} 28 29 void* operator new(size_t size) { 30 if (!gPool) { 31 return ::operator new(size); 32 } else { 33 return gPool->allocate(size); 34 } 35 } 36 37 void operator delete(void* p) { 38 if (!gPool) { 39 ::operator delete(p); 40 } else { 41 return gPool->release(p); 42 } 43 } 44 45 static A* Create(SkRandom* r); 46 47 static void SetAllocator(size_t preallocSize, size_t minAllocSize) { 48 gPool = GrMemoryPool::Make(preallocSize, minAllocSize); 49 } 50 51 static void ResetAllocator() { gPool.reset(); } 52 53 static void ValidatePool() { 54#ifdef SK_DEBUG 55 gPool->validate(); 56#endif 57 } 58 59private: 60 static std::unique_ptr<GrMemoryPool> gPool; 61 char fChar; 62}; 63 64std::unique_ptr<GrMemoryPool> A::gPool; 65 66class B : public A { 67public: 68 B() {} 69 void setValues(int v) override { 70 fDouble = static_cast<double>(v); 71 this->INHERITED::setValues(v); 72 } 73 bool checkValues(int v) override { 74 return fDouble == static_cast<double>(v) && 75 this->INHERITED::checkValues(v); 76 } 77 78private: 79 double fDouble; 80 81 using INHERITED = A; 82}; 83 84class C : public A { 85public: 86 C() {} 87 void setValues(int v) override { 88 fInt64 = static_cast<int64_t>(v); 89 this->INHERITED::setValues(v); 90 } 91 bool checkValues(int v) override { 92 return fInt64 == static_cast<int64_t>(v) && 93 this->INHERITED::checkValues(v); 94 } 95 96private: 97 int64_t fInt64; 98 99 using INHERITED = A; 100}; 101 102// D derives from C and owns a dynamically created B 103class D : public C { 104public: 105 D() { 106 fB = new B(); 107 } 108 void setValues(int v) override { 109 fVoidStar = reinterpret_cast<void*>(static_cast<intptr_t>(v)); 110 this->INHERITED::setValues(v); 111 fB->setValues(v); 112 } 113 bool checkValues(int v) override { 114 return fVoidStar == reinterpret_cast<void*>(static_cast<intptr_t>(v)) && 115 fB->checkValues(v) && 116 this->INHERITED::checkValues(v); 117 } 118 ~D() override { 119 delete fB; 120 } 121private: 122 void* fVoidStar; 123 B* fB; 124 125 using INHERITED = C; 126}; 127 128class E : public A { 129public: 130 E() {} 131 void setValues(int v) override { 132 for (size_t i = 0; i < SK_ARRAY_COUNT(fIntArray); ++i) { 133 fIntArray[i] = v; 134 } 135 this->INHERITED::setValues(v); 136 } 137 bool checkValues(int v) override { 138 bool ok = true; 139 for (size_t i = 0; ok && i < SK_ARRAY_COUNT(fIntArray); ++i) { 140 if (fIntArray[i] != v) { 141 ok = false; 142 } 143 } 144 return ok && this->INHERITED::checkValues(v); 145 } 146private: 147 int fIntArray[20]; 148 149 using INHERITED = A; 150}; 151 152A* A::Create(SkRandom* r) { 153 switch (r->nextRangeU(0, 4)) { 154 case 0: 155 return new A; 156 case 1: 157 return new B; 158 case 2: 159 return new C; 160 case 3: 161 return new D; 162 case 4: 163 return new E; 164 default: 165 // suppress warning 166 return nullptr; 167 } 168} 169 170struct Rec { 171 A* fInstance; 172 int fValue; 173}; 174 175DEF_TEST(GrMemoryPool, reporter) { 176 // prealloc and min alloc sizes for the pool 177 static const size_t gSizes[][2] = { 178 {0, 0}, 179 {10 * sizeof(A), 20 * sizeof(A)}, 180 {100 * sizeof(A), 100 * sizeof(A)}, 181 {500 * sizeof(A), 500 * sizeof(A)}, 182 {10000 * sizeof(A), 0}, 183 {1, 100 * sizeof(A)}, 184 }; 185 186 // different percentages of creation vs deletion 187 static const float gCreateFraction[] = {1.f, .95f, 0.75f, .5f}; 188 // number of create/destroys per test 189 static const int kNumIters = 20000; 190 // check that all the values stored in A objects are correct after this 191 // number of iterations 192 static const int kCheckPeriod = 500; 193 194 SkRandom r; 195 for (size_t s = 0; s < SK_ARRAY_COUNT(gSizes); ++s) { 196 A::SetAllocator(gSizes[s][0], gSizes[s][1]); 197 A::ValidatePool(); 198 for (size_t c = 0; c < SK_ARRAY_COUNT(gCreateFraction); ++c) { 199 SkTDArray<Rec> instanceRecs; 200 for (int i = 0; i < kNumIters; ++i) { 201 float createOrDestroy = r.nextUScalar1(); 202 if (createOrDestroy < gCreateFraction[c] || 203 0 == instanceRecs.count()) { 204 Rec* rec = instanceRecs.append(); 205 rec->fInstance = A::Create(&r); 206 rec->fValue = static_cast<int>(r.nextU()); 207 rec->fInstance->setValues(rec->fValue); 208 } else { 209 int d = r.nextRangeU(0, instanceRecs.count() - 1); 210 Rec& rec = instanceRecs[d]; 211 REPORTER_ASSERT(reporter, rec.fInstance->checkValues(rec.fValue)); 212 delete rec.fInstance; 213 instanceRecs.removeShuffle(d); 214 } 215 if (0 == i % kCheckPeriod) { 216 A::ValidatePool(); 217 for (Rec& rec : instanceRecs) { 218 REPORTER_ASSERT(reporter, rec.fInstance->checkValues(rec.fValue)); 219 } 220 } 221 } 222 for (Rec& rec : instanceRecs) { 223 REPORTER_ASSERT(reporter, rec.fInstance->checkValues(rec.fValue)); 224 delete rec.fInstance; 225 } 226 } 227 } 228} 229 230// GrMemoryPool requires that it's empty at the point of destruction. This helps 231// achieving that by releasing all added memory in the destructor. 232class AutoPoolReleaser { 233public: 234 AutoPoolReleaser(GrMemoryPool& pool): fPool(pool) { 235 } 236 ~AutoPoolReleaser() { 237 for (void* ptr: fAllocated) { 238 fPool.release(ptr); 239 } 240 } 241 void add(void* ptr) { 242 fAllocated.push_back(ptr); 243 } 244private: 245 GrMemoryPool& fPool; 246 SkTArray<void*> fAllocated; 247}; 248 249DEF_TEST(GrMemoryPoolAPI, reporter) { 250 constexpr size_t kSmallestMinAllocSize = GrMemoryPool::kMinAllocationSize; 251 252 // Allocates memory until pool adds a new block (pool->size() changes). 253 auto allocateMemory = [](GrMemoryPool& pool, AutoPoolReleaser& r) { 254 size_t origPoolSize = pool.size(); 255 while (pool.size() == origPoolSize) { 256 r.add(pool.allocate(31)); 257 } 258 }; 259 260 // Effective prealloc space capacity is >= kMinAllocationSize. 261 { 262 auto pool = GrMemoryPool::Make(0, 0); 263 REPORTER_ASSERT(reporter, pool->preallocSize() == kSmallestMinAllocSize); 264 } 265 266 // Effective block size capacity >= kMinAllocationSize. 267 { 268 auto pool = GrMemoryPool::Make(kSmallestMinAllocSize, kSmallestMinAllocSize / 2); 269 AutoPoolReleaser r(*pool); 270 271 allocateMemory(*pool, r); 272 REPORTER_ASSERT(reporter, pool->size() == kSmallestMinAllocSize); 273 } 274 275 // Pool allocates exactly preallocSize on creation. 276 { 277 constexpr size_t kPreallocSize = kSmallestMinAllocSize * 5; 278 auto pool = GrMemoryPool::Make(kPreallocSize, 0); 279 REPORTER_ASSERT(reporter, pool->preallocSize() == kPreallocSize); 280 } 281 282 // Pool allocates exactly minAllocSize when it expands. 283 { 284 constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 7; 285 auto pool = GrMemoryPool::Make(0, kMinAllocSize); 286 AutoPoolReleaser r(*pool); 287 REPORTER_ASSERT(reporter, pool->size() == 0); 288 289 allocateMemory(*pool, r); 290 REPORTER_ASSERT(reporter, pool->size() == kMinAllocSize); 291 292 allocateMemory(*pool, r); 293 REPORTER_ASSERT(reporter, pool->size() == 2 * kMinAllocSize); 294 } 295 296 // When asked to allocate amount > minAllocSize, pool allocates larger block 297 // to accommodate all internal structures. 298 { 299 constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 2; 300 auto pool = GrMemoryPool::Make(kSmallestMinAllocSize, kMinAllocSize); 301 AutoPoolReleaser r(*pool); 302 303 REPORTER_ASSERT(reporter, pool->size() == 0); 304 305 constexpr size_t hugeSize = 10 * kMinAllocSize; 306 r.add(pool->allocate(hugeSize)); 307 REPORTER_ASSERT(reporter, pool->size() > hugeSize); 308 309 // Block size allocated to accommodate huge request doesn't include any extra 310 // space, so next allocation request allocates a new block. 311 size_t hugeBlockSize = pool->size(); 312 r.add(pool->allocate(0)); 313 REPORTER_ASSERT(reporter, pool->size() == hugeBlockSize + kMinAllocSize); 314 } 315} 316