1/* 2 * © Copyright 2018 Alyssa Rosenzweig 3 * Copyright (C) 2019 Collabora, Ltd. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 * 24 */ 25 26#include <unistd.h> 27#include <sys/mman.h> 28 29#include "pan_device.h" 30#include "pan_mempool.h" 31 32/* Knockoff u_upload_mgr. Uploads wherever we left off, allocating new entries 33 * when needed. 34 * 35 * In "owned" mode, a single parent owns the entire pool, and the pool owns all 36 * created BOs. All BOs are tracked and addable as 37 * panfrost_pool_get_bo_handles. Freeing occurs at the level of an entire pool. 38 * This is useful for streaming uploads, where the batch owns the pool. 39 * 40 * In "unowned" mode, the pool is freestanding. It does not track created BOs 41 * or hold references. Instead, the consumer must manage the created BOs. This 42 * is more flexible, enabling non-transient CSO state or shader code to be 43 * packed with conservative lifetime handling. 44 */ 45 46static struct panfrost_bo * 47panfrost_pool_alloc_backing(struct panfrost_pool *pool, size_t bo_sz) 48{ 49 /* We don't know what the BO will be used for, so let's flag it 50 * RW and attach it to both the fragment and vertex/tiler jobs. 51 * TODO: if we want fine grained BO assignment we should pass 52 * flags to this function and keep the read/write, 53 * fragment/vertex+tiler pools separate. 54 */ 55 struct panfrost_bo *bo = panfrost_bo_create(pool->base.dev, bo_sz, 56 pool->base.create_flags, pool->base.label); 57 58 if (pool->owned) 59 util_dynarray_append(&pool->bos, struct panfrost_bo *, bo); 60 else 61 panfrost_bo_unreference(pool->transient_bo); 62 63 pool->transient_bo = bo; 64 pool->transient_offset = 0; 65 66 return bo; 67} 68 69void 70panfrost_pool_init(struct panfrost_pool *pool, void *memctx, 71 struct panfrost_device *dev, 72 unsigned create_flags, size_t slab_size, const char *label, 73 bool prealloc, bool owned) 74{ 75 memset(pool, 0, sizeof(*pool)); 76 pan_pool_init(&pool->base, dev, create_flags, slab_size, label); 77 pool->owned = owned; 78 79 if (owned) 80 util_dynarray_init(&pool->bos, memctx); 81 82 if (prealloc) 83 panfrost_pool_alloc_backing(pool, pool->base.slab_size); 84} 85 86void 87panfrost_pool_cleanup(struct panfrost_pool *pool) 88{ 89 if (!pool->owned) { 90 panfrost_bo_unreference(pool->transient_bo); 91 return; 92 } 93 94 util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo) 95 panfrost_bo_unreference(*bo); 96 97 util_dynarray_fini(&pool->bos); 98} 99 100void 101panfrost_pool_get_bo_handles(struct panfrost_pool *pool, uint32_t *handles) 102{ 103 assert(pool->owned && "pool does not track BOs in unowned mode"); 104 105 unsigned idx = 0; 106 util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo) { 107 assert((*bo)->gem_handle > 0); 108 handles[idx++] = (*bo)->gem_handle; 109 110 /* Update the BO access flags so that panfrost_bo_wait() knows 111 * about all pending accesses. 112 * We only keep the READ/WRITE info since this is all the BO 113 * wait logic cares about. 114 * We also preserve existing flags as this batch might not 115 * be the first one to access the BO. 116 */ 117 (*bo)->gpu_access |= PAN_BO_ACCESS_RW; 118 } 119} 120 121#define PAN_GUARD_SIZE 4096 122 123static struct panfrost_ptr 124panfrost_pool_alloc_aligned(struct panfrost_pool *pool, size_t sz, unsigned alignment) 125{ 126 assert(alignment == util_next_power_of_two(alignment)); 127 128 /* Find or create a suitable BO */ 129 struct panfrost_bo *bo = pool->transient_bo; 130 unsigned offset = ALIGN_POT(pool->transient_offset, alignment); 131 132#ifdef PAN_DBG_OVERFLOW 133 if (unlikely(pool->base.dev->debug & PAN_DBG_OVERFLOW) && 134 !(pool->base.create_flags & PAN_BO_INVISIBLE)) { 135 unsigned aligned = ALIGN_POT(sz, sysconf(_SC_PAGESIZE)); 136 unsigned bo_size = aligned + PAN_GUARD_SIZE; 137 138 bo = panfrost_pool_alloc_backing(pool, bo_size); 139 memset(bo->ptr.cpu, 0xbb, bo_size); 140 141 /* Place the object as close as possible to the protected 142 * region at the end of the buffer while keeping alignment. */ 143 offset = ROUND_DOWN_TO(aligned - sz, alignment); 144 145 if (mprotect(bo->ptr.cpu + aligned, 146 PAN_GUARD_SIZE, PROT_NONE) == -1) 147 perror("mprotect"); 148 149 pool->transient_bo = NULL; 150 } 151#endif 152 153 /* If we don't fit, allocate a new backing */ 154 if (unlikely(bo == NULL || (offset + sz) >= pool->base.slab_size)) { 155 bo = panfrost_pool_alloc_backing(pool, 156 ALIGN_POT(MAX2(pool->base.slab_size, sz), 4096)); 157 offset = 0; 158 } 159 160 pool->transient_offset = offset + sz; 161 162 struct panfrost_ptr ret = { 163 .cpu = bo->ptr.cpu + offset, 164 .gpu = bo->ptr.gpu + offset, 165 }; 166 167 return ret; 168} 169PAN_POOL_ALLOCATOR(struct panfrost_pool, panfrost_pool_alloc_aligned) 170