1/* 2 * Copyright © 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25#ifndef _AMDGPU_INTERNAL_H_ 26#define _AMDGPU_INTERNAL_H_ 27 28#include <assert.h> 29#include <pthread.h> 30 31#include "libdrm_macros.h" 32#include "xf86atomic.h" 33#include "amdgpu.h" 34#include "util_double_list.h" 35#include "handle_table.h" 36 37#define AMDGPU_CS_MAX_RINGS 8 38/* do not use below macro if b is not power of 2 aligned value */ 39#define __round_mask(x, y) ((__typeof__(x))((y)-1)) 40#define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1) 41#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y)) 42 43#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff 44#define AMDGPU_NULL_SUBMIT_SEQ 0 45 46struct amdgpu_bo_va_hole { 47 struct list_head list; 48 uint64_t offset; 49 uint64_t size; 50}; 51 52struct amdgpu_bo_va_mgr { 53 uint64_t va_max; 54 struct list_head va_holes; 55 pthread_mutex_t bo_va_mutex; 56 uint32_t va_alignment; 57}; 58 59struct amdgpu_va { 60 amdgpu_device_handle dev; 61 uint64_t address; 62 uint64_t size; 63 enum amdgpu_gpu_va_range range; 64 struct amdgpu_bo_va_mgr *vamgr; 65}; 66 67struct amdgpu_device { 68 atomic_t refcount; 69 struct amdgpu_device *next; 70 int fd; 71 int flink_fd; 72 unsigned major_version; 73 unsigned minor_version; 74 75 char *marketing_name; 76 /** List of buffer handles. Protected by bo_table_mutex. */ 77 struct handle_table bo_handles; 78 /** List of buffer GEM flink names. Protected by bo_table_mutex. */ 79 struct handle_table bo_flink_names; 80 /** This protects all hash tables. */ 81 pthread_mutex_t bo_table_mutex; 82 struct drm_amdgpu_info_device dev_info; 83 struct amdgpu_gpu_info info; 84 /** The VA manager for the lower virtual address space */ 85 struct amdgpu_bo_va_mgr vamgr; 86 /** The VA manager for the 32bit address space */ 87 struct amdgpu_bo_va_mgr vamgr_32; 88 /** The VA manager for the high virtual address space */ 89 struct amdgpu_bo_va_mgr vamgr_high; 90 /** The VA manager for the 32bit high address space */ 91 struct amdgpu_bo_va_mgr vamgr_high_32; 92}; 93 94struct amdgpu_bo { 95 atomic_t refcount; 96 struct amdgpu_device *dev; 97 98 uint64_t alloc_size; 99 100 uint32_t handle; 101 uint32_t flink_name; 102 103 pthread_mutex_t cpu_access_mutex; 104 void *cpu_ptr; 105 int64_t cpu_map_count; 106}; 107 108struct amdgpu_bo_list { 109 struct amdgpu_device *dev; 110 111 uint32_t handle; 112}; 113 114struct amdgpu_context { 115 struct amdgpu_device *dev; 116 /** Mutex for accessing fences and to maintain command submissions 117 in good sequence. */ 118 pthread_mutex_t sequence_mutex; 119 /* context id*/ 120 uint32_t id; 121 uint64_t last_seq[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS]; 122 struct list_head sem_list[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS]; 123}; 124 125/** 126 * Structure describing sw semaphore based on scheduler 127 * 128 */ 129struct amdgpu_semaphore { 130 atomic_t refcount; 131 struct list_head list; 132 struct amdgpu_cs_fence signal_fence; 133}; 134 135/** 136 * Functions. 137 */ 138 139drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 140 uint64_t max, uint64_t alignment); 141 142drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr); 143 144drm_private void amdgpu_parse_asic_ids(struct amdgpu_device *dev); 145 146drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev); 147 148drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout); 149 150/** 151 * Inline functions. 152 */ 153 154/** 155 * Increment src and decrement dst as if we were updating references 156 * for an assignment between 2 pointers of some objects. 157 * 158 * \return true if dst is 0 159 */ 160static inline bool update_references(atomic_t *dst, atomic_t *src) 161{ 162 if (dst != src) { 163 /* bump src first */ 164 if (src) { 165 assert(atomic_read(src) > 0); 166 atomic_inc(src); 167 } 168 if (dst) { 169 assert(atomic_read(dst) > 0); 170 return atomic_dec_and_test(dst); 171 } 172 } 173 return false; 174} 175 176#endif 177