1/*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include <assert.h>
25#include <stddef.h>
26#include <stdint.h>
27#include <vulkan/vulkan.h>
28
29#include "pvr_bo.h"
30#include "pvr_private.h"
31#include "pvr_types.h"
32#include "pvr_winsys.h"
33#include "vk_alloc.h"
34#include "vk_log.h"
35
36static uint32_t pvr_bo_alloc_to_winsys_flags(uint64_t flags)
37{
38   uint32_t ws_flags = 0;
39
40   if (flags & PVR_BO_ALLOC_FLAG_CPU_ACCESS)
41      ws_flags |= PVR_WINSYS_BO_FLAG_CPU_ACCESS;
42
43   if (flags & PVR_BO_ALLOC_FLAG_GPU_UNCACHED)
44      ws_flags |= PVR_WINSYS_BO_FLAG_GPU_UNCACHED;
45
46   if (flags & PVR_BO_ALLOC_FLAG_PM_FW_PROTECT)
47      ws_flags |= PVR_WINSYS_BO_FLAG_PM_FW_PROTECT;
48
49   if (flags & PVR_BO_ALLOC_FLAG_ZERO_ON_ALLOC)
50      ws_flags |= PVR_WINSYS_BO_FLAG_ZERO_ON_ALLOC;
51
52   return ws_flags;
53}
54
55/**
56 * \brief Helper interface to allocate a GPU buffer and map it to both host and
57 * device virtual memory. Host mapping is conditional and is controlled by
58 * flags.
59 *
60 * \param[in] device      Logical device pointer.
61 * \param[in] heap        Heap to allocate device virtual address from.
62 * \param[in] size        Size of buffer to allocate.
63 * \param[in] alignment   Required alignment of the allocation. Must be a power
64 *                        of two.
65 * \param[in] flags       Controls allocation, CPU and GPU mapping behavior
66 *                        using PVR_BO_ALLOC_FLAG_*.
67 * \param[out] pvr_bo_out On success output buffer is returned in this pointer.
68 * \return VK_SUCCESS on success, or error code otherwise.
69 *
70 * \sa #pvr_bo_free()
71 */
72VkResult pvr_bo_alloc(struct pvr_device *device,
73                      struct pvr_winsys_heap *heap,
74                      uint64_t size,
75                      uint64_t alignment,
76                      uint64_t flags,
77                      struct pvr_bo **const pvr_bo_out)
78{
79   const uint32_t ws_flags = pvr_bo_alloc_to_winsys_flags(flags);
80   struct pvr_bo *pvr_bo;
81   pvr_dev_addr_t addr;
82   VkResult result;
83
84   pvr_bo = vk_alloc(&device->vk.alloc,
85                     sizeof(*pvr_bo),
86                     8,
87                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
88   if (!pvr_bo)
89      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
90
91   result = device->ws->ops->buffer_create(device->ws,
92                                           size,
93                                           alignment,
94                                           PVR_WINSYS_BO_TYPE_GPU,
95                                           ws_flags,
96                                           &pvr_bo->bo);
97   if (result != VK_SUCCESS)
98      goto err_vk_free;
99
100   if (flags & PVR_BO_ALLOC_FLAG_CPU_MAPPED) {
101      void *map = device->ws->ops->buffer_map(pvr_bo->bo);
102      if (!map) {
103         result = VK_ERROR_MEMORY_MAP_FAILED;
104         goto err_buffer_destroy;
105      }
106   }
107
108   pvr_bo->vma = device->ws->ops->heap_alloc(heap, size, alignment);
109   if (!pvr_bo->vma) {
110      result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
111      goto err_buffer_unmap;
112   }
113
114   addr = device->ws->ops->vma_map(pvr_bo->vma, pvr_bo->bo, 0, size);
115   if (!addr.addr) {
116      result = VK_ERROR_MEMORY_MAP_FAILED;
117      goto err_heap_free;
118   }
119
120   *pvr_bo_out = pvr_bo;
121
122   return VK_SUCCESS;
123
124err_heap_free:
125   device->ws->ops->heap_free(pvr_bo->vma);
126
127err_buffer_unmap:
128   if (flags & PVR_BO_ALLOC_FLAG_CPU_MAPPED)
129      device->ws->ops->buffer_unmap(pvr_bo->bo);
130
131err_buffer_destroy:
132   device->ws->ops->buffer_destroy(pvr_bo->bo);
133
134err_vk_free:
135   vk_free(&device->vk.alloc, pvr_bo);
136
137   return result;
138}
139
140/**
141 * \brief Interface to map the buffer into host virtual address space.
142 *
143 * Buffer should have been created with the #PVR_BO_ALLOC_FLAG_CPU_ACCESS
144 * flag. It should also not already be mapped or it should have been unmapped
145 * using #pvr_bo_cpu_unmap() before mapping again.
146 *
147 * \param[in] device Logical device pointer.
148 * \param[in] pvr_bo Buffer to map.
149 * \return Valid host virtual address on success, or NULL otherwise.
150 *
151 * \sa #pvr_bo_alloc(), #PVR_BO_ALLOC_FLAG_CPU_MAPPED
152 */
153void *pvr_bo_cpu_map(struct pvr_device *device, struct pvr_bo *pvr_bo)
154{
155   assert(!pvr_bo->bo->map);
156
157   return device->ws->ops->buffer_map(pvr_bo->bo);
158}
159
160/**
161 * \brief Interface to unmap the buffer from host virtual address space.
162 *
163 * Buffer should have a valid mapping, created either using #pvr_bo_cpu_map() or
164 * by passing #PVR_BO_ALLOC_FLAG_CPU_MAPPED flag to #pvr_bo_alloc() at
165 * allocation time.
166 *
167 * Buffer can be remapped using #pvr_bo_cpu_map().
168 *
169 * \param[in] device Logical device pointer.
170 * \param[in] pvr_bo Buffer to unmap.
171 */
172void pvr_bo_cpu_unmap(struct pvr_device *device, struct pvr_bo *pvr_bo)
173{
174   assert(pvr_bo->bo->map);
175   device->ws->ops->buffer_unmap(pvr_bo->bo);
176}
177
178/**
179 * \brief Interface to free the buffer object.
180 *
181 * \param[in] device Logical device pointer.
182 * \param[in] pvr_bo Buffer to free.
183 *
184 * \sa #pvr_bo_alloc()
185 */
186void pvr_bo_free(struct pvr_device *device, struct pvr_bo *pvr_bo)
187{
188   if (!pvr_bo)
189      return;
190
191   device->ws->ops->vma_unmap(pvr_bo->vma);
192   device->ws->ops->heap_free(pvr_bo->vma);
193
194   if (pvr_bo->bo->map)
195      device->ws->ops->buffer_unmap(pvr_bo->bo);
196
197   device->ws->ops->buffer_destroy(pvr_bo->bo);
198
199   vk_free(&device->vk.alloc, pvr_bo);
200}
201