1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
9
10 #include <semaphore>
11 #include <thread>
12
13 #include "include/core/SkExecutor.h"
14 #include "include/core/SkLog.h"
15 #include "include/gpu/vk/GrVkExtensions.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/vk/GrVkInterface.h"
18 #include "src/gpu/vk/GrVkMemory.h"
19 #include "src/gpu/vk/GrVkUtil.h"
20 #include "src/core/SkUtils.h"
21
GetThreadPool()22 static SkExecutor& GetThreadPool() {
23 static std::unique_ptr<SkExecutor> executor = SkExecutor::MakeFIFOThreadPool(1, false);
24 return *executor;
25 }
26
27 #ifndef SK_USE_VMA
Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const GrVkExtensions* extensions, sk_sp<const GrVkInterface> interface, const GrVkCaps* caps, bool cacheFlag, size_t maxBlockCount)28 sk_sp<GrVkMemoryAllocator> GrVkAMDMemoryAllocator::Make(VkInstance instance,
29 VkPhysicalDevice physicalDevice,
30 VkDevice device,
31 uint32_t physicalDeviceVersion,
32 const GrVkExtensions* extensions,
33 sk_sp<const GrVkInterface> interface,
34 const GrVkCaps* caps,
35 bool cacheFlag,
36 size_t maxBlockCount) {
37 return nullptr;
38 }
39 #else
40
Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const GrVkExtensions* extensions, sk_sp<const GrVkInterface> interface, const GrVkCaps* caps, bool cacheFlag, size_t maxBlockCount)41 sk_sp<GrVkMemoryAllocator> GrVkAMDMemoryAllocator::Make(VkInstance instance,
42 VkPhysicalDevice physicalDevice,
43 VkDevice device,
44 uint32_t physicalDeviceVersion,
45 const GrVkExtensions* extensions,
46 sk_sp<const GrVkInterface> interface,
47 const GrVkCaps* caps,
48 bool cacheFlag,
49 size_t maxBlockCount) {
50 #define GR_COPY_FUNCTION(NAME) functions.vk##NAME = interface->fFunctions.f##NAME
51 #define GR_COPY_FUNCTION_KHR(NAME) functions.vk##NAME##KHR = interface->fFunctions.f##NAME
52
53 VmaVulkanFunctions functions;
54 GR_COPY_FUNCTION(GetPhysicalDeviceProperties);
55 GR_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
56 GR_COPY_FUNCTION(AllocateMemory);
57 GR_COPY_FUNCTION(FreeMemory);
58 GR_COPY_FUNCTION(MapMemory);
59 GR_COPY_FUNCTION(UnmapMemory);
60 GR_COPY_FUNCTION(FlushMappedMemoryRanges);
61 GR_COPY_FUNCTION(InvalidateMappedMemoryRanges);
62 GR_COPY_FUNCTION(BindBufferMemory);
63 GR_COPY_FUNCTION(BindImageMemory);
64 GR_COPY_FUNCTION(GetBufferMemoryRequirements);
65 GR_COPY_FUNCTION(GetImageMemoryRequirements);
66 GR_COPY_FUNCTION(CreateBuffer);
67 GR_COPY_FUNCTION(DestroyBuffer);
68 GR_COPY_FUNCTION(CreateImage);
69 GR_COPY_FUNCTION(DestroyImage);
70 GR_COPY_FUNCTION(CmdCopyBuffer);
71 GR_COPY_FUNCTION_KHR(GetBufferMemoryRequirements2);
72 GR_COPY_FUNCTION_KHR(GetImageMemoryRequirements2);
73 GR_COPY_FUNCTION_KHR(BindBufferMemory2);
74 GR_COPY_FUNCTION_KHR(BindImageMemory2);
75 GR_COPY_FUNCTION_KHR(GetPhysicalDeviceMemoryProperties2);
76
77 VmaAllocatorCreateInfo info;
78 info.flags = 0; // OH ISSUE: enable vma lock protect
79 if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
80 (extensions->hasExtension(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, 1) &&
81 extensions->hasExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, 1))) {
82 info.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
83 }
84
85 info.physicalDevice = physicalDevice;
86 info.device = device;
87 // 4MB was picked for the size here by looking at memory usage of Android apps and runs of DM.
88 // It seems to be a good compromise of not wasting unused allocated space and not making too
89 // many small allocations. The AMD allocator will start making blocks at 1/8 the max size and
90 // builds up block size as needed before capping at the max set here.
91 if (cacheFlag) {
92 info.preferredLargeHeapBlockSize = SkGetVmaBlockSizeMB() * 1024 * 1024; // 1024 = 1K
93 } else {
94 info.preferredLargeHeapBlockSize = 4 * 1024 * 1024;
95 }
96 info.maxBlockCount = maxBlockCount;
97 info.pAllocationCallbacks = nullptr;
98 info.pDeviceMemoryCallbacks = nullptr;
99 info.frameInUseCount = 0;
100 info.pHeapSizeLimit = nullptr;
101 info.pVulkanFunctions = &functions;
102 info.pRecordSettings = nullptr;
103 info.instance = instance;
104 info.vulkanApiVersion = physicalDeviceVersion;
105
106 VmaAllocator allocator;
107 vmaCreateAllocator(&info, &allocator);
108
109 return sk_sp<GrVkAMDMemoryAllocator>(new GrVkAMDMemoryAllocator(
110 #ifdef NOT_USE_PRE_ALLOC
111 allocator, std::move(interface), caps->mustUseCoherentHostVisibleMemory()));
112 #else
113 allocator, std::move(interface), caps->mustUseCoherentHostVisibleMemory(), cacheFlag));
114 #endif
115 }
116
GrVkAMDMemoryAllocator(VmaAllocator allocator, sk_sp<const GrVkInterface> interface, bool mustUseCoherentHostVisibleMemory)117 GrVkAMDMemoryAllocator::GrVkAMDMemoryAllocator(VmaAllocator allocator,
118 sk_sp<const GrVkInterface> interface,
119 #ifdef NOT_USE_PRE_ALLOC
120 bool mustUseCoherentHostVisibleMemory)
121 #else
122 bool mustUseCoherentHostVisibleMemory,
123 bool cacheFlag)
124 #endif
125 : fAllocator(allocator)
126 , fInterface(std::move(interface))
127 #ifdef NOT_USE_PRE_ALLOC
128 , fMustUseCoherentHostVisibleMemory(mustUseCoherentHostVisibleMemory) {}
129 #else
130 , fMustUseCoherentHostVisibleMemory(mustUseCoherentHostVisibleMemory)
131 , fCacheFlag(cacheFlag) {}
132 #endif
133
~GrVkAMDMemoryAllocator()134 GrVkAMDMemoryAllocator::~GrVkAMDMemoryAllocator() {
135 vmaDestroyAllocator(fAllocator);
136 fAllocator = VK_NULL_HANDLE;
137 }
138
139 // OH ISSUE: VMA preAlloc
FirstPreAllocMemory(VmaAllocator allocator, VmaAllocationCreateInfo info)140 static void FirstPreAllocMemory(VmaAllocator allocator, VmaAllocationCreateInfo info) {
141 VkImage fakeImage;
142 VmaAllocation reservedAllocation;
143 if (allocator == nullptr) {
144 return;
145 }
146 VkResult result = vmaCreateFakeImage(allocator, &fakeImage);
147 if (result != VK_SUCCESS) {
148 SK_LOGE("FirstPreAllocMemory: CreateFakeImage Failed!! VkResult %d", result);
149 return;
150 }
151 {
152 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "vmaAllocateReservedMemoryForImage");
153 result = vmaAllocateReservedMemoryForImage(allocator, fakeImage, &info, &reservedAllocation, nullptr);
154 }
155 if (result != VK_SUCCESS) {
156 SK_LOGE("FirstPreAllocMemory: AllocateReservedMemory Failed!! VkResult %d", result);
157 vmaDestroyFakeImage(allocator, fakeImage);
158 return;
159 }
160 {
161 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "vmaBindImageMemory");
162 result = vmaBindImageMemory(allocator, reservedAllocation, fakeImage);
163 }
164 if (result != VK_SUCCESS) {
165 SK_LOGE("FirstPreAllocMemory: BindImageMemory Failed!! VkResult %d", result);
166 }
167 vmaDestroyFakeImage(allocator, fakeImage);
168 vmaFreeReservedMemory(allocator, reservedAllocation);
169 }
170
171 // OH ISSUE: VMA preAlloc
PreAllocMemory(VmaAllocator allocator, VmaAllocation reservedAllocation)172 static void PreAllocMemory(VmaAllocator allocator, VmaAllocation reservedAllocation) {
173 VkImage fakeImage;
174 if (allocator == nullptr) {
175 return;
176 }
177 VkResult result = vmaCreateFakeImage(allocator, &fakeImage);
178 if (result != VK_SUCCESS) {
179 SK_LOGE("PreAllocMemory: CreateFakeImage Failed!! VkResult %d", result);
180 return;
181 }
182 {
183 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "vmaBindImageMemory");
184 result = vmaBindImageMemory(allocator, reservedAllocation, fakeImage);
185 }
186 if (result != VK_SUCCESS) {
187 SK_LOGE("PreAllocMemory: BindImageMemory Failed!! VkResult %d", result);
188 }
189 vmaDestroyFakeImage(allocator, fakeImage);
190 vmaFreeReservedMemory(allocator, reservedAllocation);
191 }
192
allocateImageMemory(VkImage image, AllocationPropertyFlags flags, GrVkBackendMemory* backendMemory)193 VkResult GrVkAMDMemoryAllocator::allocateImageMemory(VkImage image, AllocationPropertyFlags flags,
194 GrVkBackendMemory* backendMemory) {
195 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
196 VmaAllocationCreateInfo info;
197 info.flags = 0;
198 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
199 info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
200 info.preferredFlags = 0;
201 info.memoryTypeBits = 0;
202 info.pool = VK_NULL_HANDLE;
203 info.pUserData = nullptr;
204
205 if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
206 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
207 }
208
209 if (AllocationPropertyFlags::kLazyAllocation & flags) {
210 info.requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
211 }
212
213 if (AllocationPropertyFlags::kProtected & flags) {
214 info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
215 }
216
217 VmaAllocation allocation;
218 VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
219 if (VK_SUCCESS == result) {
220 *backendMemory = (GrVkBackendMemory)allocation;
221 }
222
223 // OH ISSUE: VMA preAlloc
224 bool newBlockflag = false;
225 vmaGetNewBlockStats(allocation, &newBlockflag);
226 if (newBlockflag && fCacheFlag && SkGetPreAllocFlag()) {
227 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "GrVkAMDMemoryAllocator trigger preAlloc");
228 vmaClearNewBlockStats(allocation);
229 std::lock_guard<std::mutex> lock(mPreAllocMutex);
230 // After swap, allocation belongs to vma reserved block.
231 VkResult result2 = vmaSwapReservedBlock(fAllocator, image, &info, &allocation, nullptr);
232 if (result2 == VK_NOT_READY) {
233 GetThreadPool().add([=] {
234 std::lock_guard<std::mutex> lock(mPreAllocMutex);
235 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "FirstPreAllocMemory");
236 FirstPreAllocMemory(fAllocator, info);
237 });
238 return result;
239 }
240 if (result2 == VK_SUCCESS) {
241 GetThreadPool().add([=] {
242 std::this_thread::sleep_for(std::chrono::microseconds(SkGetPreAllocDelay()));
243 std::lock_guard<std::mutex> lock(mPreAllocMutex);
244 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "PreAllocMemory");
245 PreAllocMemory(fAllocator, allocation);
246 });
247 VmaAllocation newAllocation;
248 VkResult result3 = vmaAllocateMemoryForImage(fAllocator, image, &info, &newAllocation, nullptr);
249 if (result3 == VK_SUCCESS) {
250 *backendMemory = (GrVkBackendMemory)newAllocation;
251 }
252 return result3;
253 }
254 }
255 return result;
256 }
257
allocateBufferMemory(VkBuffer buffer, BufferUsage usage, AllocationPropertyFlags flags, GrVkBackendMemory* backendMemory)258 VkResult GrVkAMDMemoryAllocator::allocateBufferMemory(VkBuffer buffer, BufferUsage usage,
259 AllocationPropertyFlags flags,
260 GrVkBackendMemory* backendMemory) {
261 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
262 VmaAllocationCreateInfo info;
263 info.flags = 0;
264 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
265 info.memoryTypeBits = 0;
266 info.pool = VK_NULL_HANDLE;
267 info.pUserData = nullptr;
268
269 switch (usage) {
270 case BufferUsage::kGpuOnly:
271 info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
272 info.preferredFlags = 0;
273 break;
274 case BufferUsage::kCpuWritesGpuReads:
275 // When doing cpu writes and gpu reads the general rule of thumb is to use coherent
276 // memory. Though this depends on the fact that we are not doing any cpu reads and the
277 // cpu writes are sequential. For sparse writes we'd want cpu cached memory, however we
278 // don't do these types of writes in Skia.
279 //
280 // TODO: In the future there may be times where specific types of memory could benefit
281 // from a coherent and cached memory. Typically these allow for the gpu to read cpu
282 // writes from the cache without needing to flush the writes throughout the cache. The
283 // reverse is not true and GPU writes tend to invalidate the cache regardless. Also
284 // these gpu cache read access are typically lower bandwidth than non-cached memory.
285 // For now Skia doesn't really have a need or want of this type of memory. But if we
286 // ever do we could pass in an AllocationPropertyFlag that requests the cached property.
287 info.requiredFlags =
288 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
289 info.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
290 break;
291 case BufferUsage::kTransfersFromCpuToGpu:
292 info.requiredFlags =
293 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
294 break;
295 case BufferUsage::kTransfersFromGpuToCpu:
296 info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
297 info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
298 break;
299 }
300
301 if (fMustUseCoherentHostVisibleMemory &&
302 (info.requiredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) {
303 info.requiredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
304 }
305
306 if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
307 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
308 }
309
310 if ((AllocationPropertyFlags::kLazyAllocation & flags) && BufferUsage::kGpuOnly == usage) {
311 info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
312 }
313
314 if (AllocationPropertyFlags::kPersistentlyMapped & flags) {
315 SkASSERT(BufferUsage::kGpuOnly != usage);
316 info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
317 }
318
319 VmaAllocation allocation;
320 VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
321 if (VK_SUCCESS == result) {
322 *backendMemory = (GrVkBackendMemory)allocation;
323 }
324
325 return result;
326 }
327
freeMemory(const GrVkBackendMemory& memoryHandle)328 void GrVkAMDMemoryAllocator::freeMemory(const GrVkBackendMemory& memoryHandle) {
329 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
330 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
331 vmaFreeMemory(fAllocator, allocation);
332 }
333
getAllocInfo(const GrVkBackendMemory& memoryHandle, GrVkAlloc* alloc) const334 void GrVkAMDMemoryAllocator::getAllocInfo(const GrVkBackendMemory& memoryHandle,
335 GrVkAlloc* alloc) const {
336 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
337 VmaAllocationInfo vmaInfo;
338 vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
339
340 VkMemoryPropertyFlags memFlags;
341 vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
342
343 uint32_t flags = 0;
344 if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & memFlags) {
345 flags |= GrVkAlloc::kMappable_Flag;
346 }
347 if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
348 flags |= GrVkAlloc::kNoncoherent_Flag;
349 }
350 if (VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT & memFlags) {
351 flags |= GrVkAlloc::kLazilyAllocated_Flag;
352 }
353
354 alloc->fMemory = vmaInfo.deviceMemory;
355 alloc->fOffset = vmaInfo.offset;
356 alloc->fSize = vmaInfo.size;
357 alloc->fFlags = flags;
358 alloc->fBackendMemory = memoryHandle;
359 alloc->fAllocator = (GrVkMemoryAllocator *)this;
360 }
361
mapMemory(const GrVkBackendMemory& memoryHandle, void** data)362 VkResult GrVkAMDMemoryAllocator::mapMemory(const GrVkBackendMemory& memoryHandle, void** data) {
363 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
364 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
365 return vmaMapMemory(fAllocator, allocation, data);
366 }
367
unmapMemory(const GrVkBackendMemory& memoryHandle)368 void GrVkAMDMemoryAllocator::unmapMemory(const GrVkBackendMemory& memoryHandle) {
369 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
370 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
371 vmaUnmapMemory(fAllocator, allocation);
372 }
373
flushMemory(const GrVkBackendMemory& memoryHandle, VkDeviceSize offset, VkDeviceSize size)374 VkResult GrVkAMDMemoryAllocator::flushMemory(const GrVkBackendMemory& memoryHandle,
375 VkDeviceSize offset, VkDeviceSize size) {
376 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
377 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
378 return vmaFlushAllocation(fAllocator, allocation, offset, size);
379 }
380
invalidateMemory(const GrVkBackendMemory& memoryHandle, VkDeviceSize offset, VkDeviceSize size)381 VkResult GrVkAMDMemoryAllocator::invalidateMemory(const GrVkBackendMemory& memoryHandle,
382 VkDeviceSize offset, VkDeviceSize size) {
383 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
384 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
385 return vmaInvalidateAllocation(fAllocator, allocation, offset, size);
386 }
387
totalUsedMemory() const388 uint64_t GrVkAMDMemoryAllocator::totalUsedMemory() const {
389 VmaStats stats;
390 vmaCalculateStats(fAllocator, &stats);
391 return stats.total.usedBytes;
392 }
393
totalAllocatedMemory() const394 uint64_t GrVkAMDMemoryAllocator::totalAllocatedMemory() const {
395 VmaStats stats;
396 vmaCalculateStats(fAllocator, &stats);
397 return stats.total.usedBytes + stats.total.unusedBytes;
398 }
399
dumpVmaStats(SkString *out, const char *sep) const400 void GrVkAMDMemoryAllocator::dumpVmaStats(SkString *out, const char *sep) const
401 {
402 constexpr int MB = 1024 * 1024;
403 if (out == nullptr || sep == nullptr) {
404 return;
405 }
406 bool flag = SkGetMemoryOptimizedFlag();
407 out->appendf("vma_flag: %d %s", flag, sep);
408 if (!flag) {
409 return;
410 }
411 VmaStats stats;
412 vmaCalculateStats(fAllocator, &stats);
413 uint64_t free = stats.total.unusedBytes;
414 uint64_t used = stats.total.usedBytes;
415 uint64_t total = free + used;
416 auto maxBlockCount = SkGetVmaBlockCountMax();
417 out->appendf("vma_free: %llu (%d MB)%s", free, free / MB, sep);
418 out->appendf("vma_used: %llu (%d MB)%s", used, used / MB, sep);
419 out->appendf("vma_total: %llu (%d MB)%s", total, total / MB, sep);
420 out->appendf("vma_cacheBlockSize: %d MB%s", SkGetVmaBlockSizeMB(), sep);
421 out->appendf("vma_cacheBlockCount: %llu / %llu%s",
422 stats.total.blockCount <= maxBlockCount ? stats.total.blockCount : maxBlockCount, maxBlockCount, sep);
423 out->appendf("vma_dedicatedBlockCount: %llu%s",
424 stats.total.blockCount <= maxBlockCount ? 0 : stats.total.blockCount - maxBlockCount, sep);
425 out->appendf("vma_allocationCount: %u%s", stats.total.allocationCount, sep);
426 out->appendf("vma_unusedRangeCount: %u%s", stats.total.unusedRangeCount, sep);
427 out->appendf("vma_allocationSize: %llu / %llu / %llu%s",
428 stats.total.allocationSizeMin, stats.total.allocationSizeAvg, stats.total.allocationSizeMax, sep);
429 out->appendf("vma_unusedRangeSize: %llu / %llu / %llu%s",
430 stats.total.unusedRangeSizeMin, stats.total.unusedRangeSizeAvg, stats.total.unusedRangeSizeMax, sep);
431 uint32_t blockSize = 0;
432 vmaGetPreAllocBlockSize(fAllocator, &blockSize);
433 out->appendf("vma_preAllocBlockSize: %d / 1%s", blockSize, sep);
434 }
435
vmaDefragment()436 void GrVkAMDMemoryAllocator::vmaDefragment()
437 {
438 if (!fCacheFlag) {
439 return;
440 }
441 bool flag = SkGetVmaDefragmentOn();
442 if (!flag) {
443 return;
444 }
445 bool debugFlag = SkGetVmaDebugFlag();
446 if (!debugFlag) {
447 std::lock_guard<std::mutex> lock(mPreAllocMutex);
448 vmaFreeEmptyBlock(fAllocator);
449 return;
450 }
451
452 // dfx
453 SkString debugInfo;
454 dumpVmaStats(&debugInfo);
455 SkDebugf("GrVkAMDMemoryAllocator::vmaDefragment() before: %s",
456 debugInfo.c_str());
457 HITRACE_OHOS_NAME_FMT_ALWAYS("GrVkAMDMemoryAllocator::vmaDefragment() before: %s", debugInfo.c_str());
458
459 {
460 std::lock_guard<std::mutex> lock(mPreAllocMutex);
461 vmaFreeEmptyBlock(fAllocator);
462 }
463
464 // dfx
465 debugInfo = "";
466 dumpVmaStats(&debugInfo);
467 SkDebugf("GrVkAMDMemoryAllocator::vmaDefragment() after: %s",
468 debugInfo.c_str());
469 HITRACE_OHOS_NAME_FMT_ALWAYS("GrVkAMDMemoryAllocator::vmaDefragment() after: %s", debugInfo.c_str());
470 }
471
472 #endif // SK_USE_VMA
473