1/* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Monk.liu@amd.com 23 */ 24#ifndef AMDGPU_VIRT_H 25#define AMDGPU_VIRT_H 26 27#include "amdgv_sriovmsg.h" 28 29#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ 30#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ 31#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ 32#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ 33#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ 34#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ 35 36/* all asic after AI use this offset */ 37#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 38/* tonga/fiji use this offset */ 39#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 40 41enum amdgpu_sriov_vf_mode { 42 SRIOV_VF_MODE_BARE_METAL = 0, 43 SRIOV_VF_MODE_ONE_VF, 44 SRIOV_VF_MODE_MULTI_VF, 45}; 46 47struct amdgpu_mm_table { 48 struct amdgpu_bo *bo; 49 uint32_t *cpu_addr; 50 uint64_t gpu_addr; 51}; 52 53#define AMDGPU_VF_ERROR_ENTRY_SIZE 16 54 55/* struct error_entry - amdgpu VF error information. */ 56struct amdgpu_vf_error_buffer { 57 struct mutex lock; 58 int read_count; 59 int write_count; 60 uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE]; 61 uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE]; 62 uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; 63}; 64 65enum idh_request; 66 67/** 68 * struct amdgpu_virt_ops - amdgpu device virt operations 69 */ 70struct amdgpu_virt_ops { 71 int (*req_full_gpu)(struct amdgpu_device *adev, bool init); 72 int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); 73 int (*req_init_data)(struct amdgpu_device *adev); 74 int (*reset_gpu)(struct amdgpu_device *adev); 75 int (*wait_reset)(struct amdgpu_device *adev); 76 void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, 77 u32 data1, u32 data2, u32 data3); 78}; 79 80/* 81 * Firmware Reserve Frame buffer 82 */ 83struct amdgpu_virt_fw_reserve { 84 struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; 85 struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; 86 unsigned int checksum_key; 87}; 88 89/* 90 * Legacy GIM header 91 * 92 * Defination between PF and VF 93 * Structures forcibly aligned to 4 to keep the same style as PF. 94 */ 95#define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024) 96 97#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \ 98 (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2)) 99 100enum AMDGIM_FEATURE_FLAG { 101 /* GIM supports feature of Error log collecting */ 102 AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1, 103 /* GIM supports feature of loading uCodes */ 104 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, 105 /* VRAM LOST by GIM */ 106 AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, 107 /* MM bandwidth */ 108 AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8, 109 /* PP ONE VF MODE in GIM */ 110 AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), 111}; 112 113struct amdgim_pf2vf_info_v1 { 114 /* header contains size and version */ 115 struct amd_sriov_msg_pf2vf_info_header header; 116 /* max_width * max_height */ 117 unsigned int uvd_enc_max_pixels_count; 118 /* 16x16 pixels/sec, codec independent */ 119 unsigned int uvd_enc_max_bandwidth; 120 /* max_width * max_height */ 121 unsigned int vce_enc_max_pixels_count; 122 /* 16x16 pixels/sec, codec independent */ 123 unsigned int vce_enc_max_bandwidth; 124 /* MEC FW position in kb from the start of visible frame buffer */ 125 unsigned int mecfw_kboffset; 126 /* The features flags of the GIM driver supports. */ 127 unsigned int feature_flags; 128 /* use private key from mailbox 2 to create chueksum */ 129 unsigned int checksum; 130} __aligned(4); 131 132struct amdgim_vf2pf_info_v1 { 133 /* header contains size and version */ 134 struct amd_sriov_msg_vf2pf_info_header header; 135 /* driver version */ 136 char driver_version[64]; 137 /* driver certification, 1=WHQL, 0=None */ 138 unsigned int driver_cert; 139 /* guest OS type and version: need a define */ 140 unsigned int os_info; 141 /* in the unit of 1M */ 142 unsigned int fb_usage; 143 /* guest gfx engine usage percentage */ 144 unsigned int gfx_usage; 145 /* guest gfx engine health percentage */ 146 unsigned int gfx_health; 147 /* guest compute engine usage percentage */ 148 unsigned int compute_usage; 149 /* guest compute engine health percentage */ 150 unsigned int compute_health; 151 /* guest vce engine usage percentage. 0xffff means N/A. */ 152 unsigned int vce_enc_usage; 153 /* guest vce engine health percentage. 0xffff means N/A. */ 154 unsigned int vce_enc_health; 155 /* guest uvd engine usage percentage. 0xffff means N/A. */ 156 unsigned int uvd_enc_usage; 157 /* guest uvd engine usage percentage. 0xffff means N/A. */ 158 unsigned int uvd_enc_health; 159 unsigned int checksum; 160} __aligned(4); 161 162struct amdgim_vf2pf_info_v2 { 163 /* header contains size and version */ 164 struct amd_sriov_msg_vf2pf_info_header header; 165 uint32_t checksum; 166 /* driver version */ 167 uint8_t driver_version[64]; 168 /* driver certification, 1=WHQL, 0=None */ 169 uint32_t driver_cert; 170 /* guest OS type and version: need a define */ 171 uint32_t os_info; 172 /* in the unit of 1M */ 173 uint32_t fb_usage; 174 /* guest gfx engine usage percentage */ 175 uint32_t gfx_usage; 176 /* guest gfx engine health percentage */ 177 uint32_t gfx_health; 178 /* guest compute engine usage percentage */ 179 uint32_t compute_usage; 180 /* guest compute engine health percentage */ 181 uint32_t compute_health; 182 /* guest vce engine usage percentage. 0xffff means N/A. */ 183 uint32_t vce_enc_usage; 184 /* guest vce engine health percentage. 0xffff means N/A. */ 185 uint32_t vce_enc_health; 186 /* guest uvd engine usage percentage. 0xffff means N/A. */ 187 uint32_t uvd_enc_usage; 188 /* guest uvd engine usage percentage. 0xffff means N/A. */ 189 uint32_t uvd_enc_health; 190 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; 191} __aligned(4); 192 193struct amdgpu_virt_ras_err_handler_data { 194 /* point to bad page records array */ 195 struct eeprom_table_record *bps; 196 /* point to reserved bo array */ 197 struct amdgpu_bo **bps_bo; 198 /* the count of entries */ 199 int count; 200 /* last reserved entry's index + 1 */ 201 int last_reserved; 202}; 203 204/* GPU virtualization */ 205struct amdgpu_virt { 206 uint32_t caps; 207 struct amdgpu_bo *csa_obj; 208 void *csa_cpu_addr; 209 bool chained_ib_support; 210 uint32_t reg_val_offs; 211 struct amdgpu_irq_src ack_irq; 212 struct amdgpu_irq_src rcv_irq; 213 struct work_struct flr_work; 214 struct amdgpu_mm_table mm_table; 215 const struct amdgpu_virt_ops *ops; 216 struct amdgpu_vf_error_buffer vf_errors; 217 struct amdgpu_virt_fw_reserve fw_reserve; 218 uint32_t gim_feature; 219 uint32_t reg_access_mode; 220 int req_init_data_ver; 221 bool tdr_debug; 222 struct amdgpu_virt_ras_err_handler_data *virt_eh_data; 223 bool ras_init_done; 224 225 /* vf2pf message */ 226 struct delayed_work vf2pf_work; 227 uint32_t vf2pf_update_interval_ms; 228}; 229 230#define amdgpu_sriov_enabled(adev) \ 231((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) 232 233#define amdgpu_sriov_vf(adev) \ 234((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF) 235 236#define amdgpu_sriov_bios(adev) \ 237((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) 238 239#define amdgpu_sriov_runtime(adev) \ 240((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) 241 242#define amdgpu_sriov_fullaccess(adev) \ 243(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev))) 244 245#define amdgpu_passthrough(adev) \ 246((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) 247 248#define amdgpu_sriov_vf_mmio_access_protection(adev) \ 249((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) 250 251static inline bool is_virtual_machine(void) 252{ 253#ifdef CONFIG_X86 254 return boot_cpu_has(X86_FEATURE_HYPERVISOR); 255#else 256 return false; 257#endif 258} 259 260#define amdgpu_sriov_is_pp_one_vf(adev) \ 261 ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) 262#define amdgpu_sriov_is_debug(adev) \ 263 ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) 264#define amdgpu_sriov_is_normal(adev) \ 265 ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) 266 267bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 268void amdgpu_virt_init_setting(struct amdgpu_device *adev); 269void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, 270 uint32_t reg0, uint32_t rreg1, 271 uint32_t ref, uint32_t mask); 272int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); 273int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); 274int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); 275void amdgpu_virt_request_init_data(struct amdgpu_device *adev); 276int amdgpu_virt_wait_reset(struct amdgpu_device *adev); 277int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); 278void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); 279void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); 280void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); 281void amdgpu_virt_exchange_data(struct amdgpu_device *adev); 282void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); 283void amdgpu_detect_virtualization(struct amdgpu_device *adev); 284 285bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); 286int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); 287void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); 288 289enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev); 290#endif 291