1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 #include "src/gpu/vk/GrVkMemory.h"
8
9 #ifdef NOT_BUILD_FOR_OHOS_SDK
10 #include <parameters.h>
11 #endif
12
13 #include "include/core/SkExecutor.h"
14 #include "src/core/SkTraceEvent.h"
15 #include "src/core/SkUtils.h"
16 #include "src/gpu/vk/GrVkGpu.h"
17 #include "src/gpu/vk/GrVkUtil.h"
18
19 #define VK_CALL(GPU, X) GR_VK_CALL((GPU)->vkInterface(), X)
20
21 using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
22 using BufferUsage = GrVkMemoryAllocator::BufferUsage;
23
FindMemoryType(GrVkGpu *gpu, uint32_t typeFilter, VkMemoryPropertyFlags properties, uint32_t &typeIndex)24 static bool FindMemoryType(GrVkGpu *gpu, uint32_t typeFilter, VkMemoryPropertyFlags properties, uint32_t &typeIndex)
25 {
26 VkPhysicalDevice physicalDevice = gpu->physicalDevice();
27 VkPhysicalDeviceMemoryProperties memProperties{};
28 VK_CALL(gpu, GetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties));
29
30 bool hasFound = false;
31 for (uint32_t i = 0; i < memProperties.memoryTypeCount && !hasFound; ++i) {
32 if (typeFilter & (1 << i)) {
33 uint32_t supportedFlags = memProperties.memoryTypes[i].propertyFlags & properties;
34 if (supportedFlags == properties) {
35 typeIndex = i;
36 hasFound = true;
37 }
38 }
39 }
40
41 return hasFound;
42 }
43
AllocAndBindBufferMemory(GrVkGpu* gpu, VkBuffer buffer, BufferUsage usage, GrVkAlloc* alloc, size_t size)44 bool GrVkMemory::AllocAndBindBufferMemory(GrVkGpu* gpu,
45 VkBuffer buffer,
46 BufferUsage usage,
47 #ifdef SKIA_DFX_FOR_OHOS
48 GrVkAlloc* alloc,
49 size_t size) {
50 #else
51 GrVkAlloc* alloc) {
52 #endif
53 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
54 GrVkBackendMemory memory = 0;
55
56 AllocationPropertyFlags propFlags;
57 bool shouldPersistentlyMapCpuToGpu = gpu->vkCaps().shouldPersistentlyMapCpuToGpuBuffers();
58 if (usage == BufferUsage::kTransfersFromCpuToGpu ||
59 (usage == BufferUsage::kCpuWritesGpuReads && shouldPersistentlyMapCpuToGpu)) {
60 // In general it is always fine (and often better) to keep buffers always mapped that we are
61 // writing to on the cpu.
62 propFlags = AllocationPropertyFlags::kPersistentlyMapped;
63 } else {
64 propFlags = AllocationPropertyFlags::kNone;
65 }
66
67 VkResult result = allocator->allocateBufferMemory(buffer, usage, propFlags, &memory);
68 if (!gpu->checkVkResult(result)) {
69 return false;
70 }
71 allocator->getAllocInfo(memory, alloc);
72
73 #ifdef SKIA_DFX_FOR_OHOS
74 alloc->fBytes = size;
75 gpu->addAllocBufferBytes(size);
76 #endif
77
78 // Bind buffer
79 VkResult err;
80 GR_VK_CALL_RESULT(gpu, err, BindBufferMemory(gpu->device(), buffer, alloc->fMemory,
81 alloc->fOffset));
82 if (err) {
83 FreeBufferMemory(gpu, *alloc);
84 return false;
85 }
86
87 return true;
88 }
89
90 bool GrVkMemory::ImportAndBindBufferMemory(GrVkGpu* gpu,
91 OH_NativeBuffer *nativeBuffer,
92 VkBuffer buffer,
93 GrVkAlloc* alloc) {
94 HITRACE_OHOS_NAME_ALWAYS("ImportAndBindBufferMemory");
95 VkDevice device = gpu->device();
96 VkMemoryRequirements memReqs{};
97 VK_CALL(gpu, GetBufferMemoryRequirements(device, buffer, &memReqs));
98
99 uint32_t typeIndex = 0;
100 bool hasFound = FindMemoryType(gpu, memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, typeIndex);
101 if (!hasFound) {
102 return false;
103 }
104
105 // Import external memory
106 VkImportNativeBufferInfoOHOS importInfo{};
107 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_NATIVE_BUFFER_INFO_OHOS;
108 importInfo.pNext = nullptr;
109 importInfo.buffer = nativeBuffer;
110
111 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo{};
112 dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
113 dedicatedAllocInfo.pNext = &importInfo;
114 dedicatedAllocInfo.image = VK_NULL_HANDLE;
115 dedicatedAllocInfo.buffer = buffer;
116
117 VkMemoryAllocateInfo allocateInfo{};
118 allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
119 allocateInfo.pNext = &dedicatedAllocInfo;
120 allocateInfo.allocationSize = memReqs.size;
121 allocateInfo.memoryTypeIndex = typeIndex;
122
123 VkResult err;
124 VkDeviceMemory memory;
125 GR_VK_CALL_RESULT(gpu, err, AllocateMemory(device, &allocateInfo, nullptr, &memory));
126 if (err) {
127 return false;
128 }
129
130 // Bind buffer
131 GR_VK_CALL_RESULT(gpu, err, BindBufferMemory(device, buffer, memory, 0));
132 if (err) {
133 VK_CALL(gpu, FreeMemory(device, memory, nullptr));
134 return false;
135 }
136
137 alloc->fMemory = memory;
138 alloc->fOffset = 0;
139 alloc->fSize = memReqs.size;
140 alloc->fFlags = 0;
141 alloc->fIsExternalMemory = true;
142
143 return true;
144 }
145
146 void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
147 #ifdef SKIA_DFX_FOR_OHOS
148 ((GrVkGpu*)gpu)->removeAllocBufferBytes(alloc.fBytes);
149 #endif
150 if (alloc.fIsExternalMemory) {
151 VK_CALL(gpu, FreeMemory(gpu->device(), alloc.fMemory, nullptr));
152 } else {
153 SkASSERT(alloc.fBackendMemory);
154 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
155 if (alloc.fAllocator != nullptr) {
156 allocator = alloc.fAllocator;
157 }
158 allocator->freeMemory(alloc.fBackendMemory);
159 }
160 }
161
162 bool GrVkMemory::AllocAndBindImageMemory(GrVkGpu* gpu,
163 VkImage image,
164 GrMemoryless memoryless,
165 GrVkAlloc* alloc,
166 int memorySize) {
167 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
168 GrVkBackendMemory memory = 0;
169
170 bool vmaFlag = SkGetVmaCacheFlag();
171 bool vmaCacheFlag = vmaFlag && memorySize > SkGetNeedCachedMemroySize();
172 if (vmaCacheFlag) {
173 allocator = gpu->memoryAllocatorCacheImage();
174 }
175
176 VkMemoryRequirements memReqs;
177 HITRACE_OHOS_NAME_FMT_ALWAYS("AllocAndBindImageMemory vmaCacheFlag %d memSizeOver %d",
178 vmaCacheFlag, memorySize > SkGetNeedCachedMemroySize());
179 GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
180
181 AllocationPropertyFlags propFlags;
182 // If we ever find that our allocator is not aggressive enough in using dedicated image
183 // memory we can add a size check here to force the use of dedicate memory. However for now,
184 // we let the allocators decide. The allocator can query the GPU for each image to see if the
185 // GPU recommends or requires the use of dedicated memory.
186 if (vmaCacheFlag) {
187 propFlags = AllocationPropertyFlags::kNone;
188 } else if (gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
189 propFlags = AllocationPropertyFlags::kDedicatedAllocation;
190 } else {
191 propFlags = AllocationPropertyFlags::kNone;
192 }
193
194 if (gpu->protectedContext()) {
195 propFlags |= AllocationPropertyFlags::kProtected;
196 }
197
198 if (memoryless == GrMemoryless::kYes) {
199 propFlags |= AllocationPropertyFlags::kLazyAllocation;
200 }
201
202 { // OH ISSUE: add trace for vulkan interface
203 HITRACE_OHOS_NAME_ALWAYS("allocateImageMemory");
204 VkResult result = allocator->allocateImageMemory(image, propFlags, &memory);
205 if (!gpu->checkVkResult(result)) {
206 return false;
207 }
208 }
209
210 allocator->getAllocInfo(memory, alloc);
211 #ifdef SKIA_DFX_FOR_OHOS
212 alloc->fBytes = memorySize;
213 gpu->addAllocImageBytes(memorySize);
214 #endif
215
216 { // OH ISSUE: add trace for vulkan interface
217 HITRACE_OHOS_NAME_ALWAYS("BindImageMemory");
218 // Bind buffer
219 VkResult err;
220 GR_VK_CALL_RESULT(gpu, err, BindImageMemory(gpu->device(), image, alloc->fMemory,
221 alloc->fOffset));
222 if (err) {
223 FreeImageMemory(gpu, *alloc);
224 return false;
225 }
226 }
227
228 return true;
229 }
230
231 void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
232 #ifdef SKIA_DFX_FOR_OHOS
233 ((GrVkGpu*)gpu)->removeAllocImageBytes(alloc.fBytes);
234 #endif
235 SkASSERT(alloc.fBackendMemory);
236 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
237 if (alloc.fAllocator != nullptr) {
238 allocator = alloc.fAllocator;
239 }
240 allocator->freeMemory(alloc.fBackendMemory);
241 }
242
243 void* GrVkMemory::MapAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc) {
244 SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
245 SkASSERT(alloc.fBackendMemory);
246 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
247 if (alloc.fAllocator != nullptr) {
248 allocator = alloc.fAllocator;
249 }
250 void* mapPtr;
251 VkResult result = allocator->mapMemory(alloc.fBackendMemory, &mapPtr);
252 if (!gpu->checkVkResult(result)) {
253 return nullptr;
254 }
255 return mapPtr;
256 }
257
258 void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
259 SkASSERT(alloc.fBackendMemory);
260 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
261 if (alloc.fAllocator != nullptr) {
262 allocator = alloc.fAllocator;
263 }
264 allocator->unmapMemory(alloc.fBackendMemory);
265 }
266
267 void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
268 VkDeviceSize size, VkDeviceSize alignment,
269 VkMappedMemoryRange* range) {
270 SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
271 offset = offset + alloc.fOffset;
272 VkDeviceSize offsetDiff = offset & (alignment -1);
273 offset = offset - offsetDiff;
274 size = (size + alignment - 1) & ~(alignment - 1);
275 #ifdef SK_DEBUG
276 SkASSERT(offset >= alloc.fOffset);
277 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
278 SkASSERT(0 == (offset & (alignment-1)));
279 SkASSERT(size > 0);
280 SkASSERT(0 == (size & (alignment-1)));
281 #endif
282
283 memset(range, 0, sizeof(VkMappedMemoryRange));
284 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
285 range->memory = alloc.fMemory;
286 range->offset = offset;
287 range->size = size;
288 }
289
290 void GrVkMemory::FlushMappedAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
291 VkDeviceSize size) {
292 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
293 SkASSERT(offset == 0);
294 SkASSERT(size <= alloc.fSize);
295 SkASSERT(alloc.fBackendMemory);
296 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
297 if (alloc.fAllocator != nullptr) {
298 allocator = alloc.fAllocator;
299 }
300 VkResult result = allocator->flushMemory(alloc.fBackendMemory, offset, size);
301 gpu->checkVkResult(result);
302 }
303 }
304
305 void GrVkMemory::InvalidateMappedAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc,
306 VkDeviceSize offset, VkDeviceSize size) {
307 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
308 SkASSERT(offset == 0);
309 SkASSERT(size <= alloc.fSize);
310 SkASSERT(alloc.fBackendMemory);
311 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
312 if (alloc.fAllocator != nullptr) {
313 allocator = alloc.fAllocator;
314 }
315 VkResult result = allocator->invalidateMemory(alloc.fBackendMemory, offset, size);
316 gpu->checkVkResult(result);
317 }
318 }
319
320