/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/s390x/ |
H A D | processor.c | 50 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) in virt_arch_pg_map() argument 55 TEST_ASSERT((gva % vm->page_size) == 0, in virt_arch_pg_map() 58 gva, vm->page_size); in virt_arch_pg_map() 60 (gva >> vm->page_shift)), in virt_arch_pg_map() 62 gva); in virt_arch_pg_map() 66 gva, vm->page_size); in virt_arch_pg_map() 70 gva, vm->max_gfn, vm->page_size); in virt_arch_pg_map() 75 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; in virt_arch_pg_map() 82 idx = (gva >> 12) & 0x0ffu; /* page index */ in virt_arch_pg_map() 89 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument [all...] |
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | kvm.h | 285 TP_PROTO(u64 gva, u64 gfn), 287 TP_ARGS(gva, gfn), 290 __field(__u64, gva) 295 __entry->gva = gva; 299 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) 304 TP_PROTO(u64 gva, u64 gfn), 306 TP_ARGS(gva, gfn) 311 TP_PROTO(u64 gva, u6 [all...] |
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | kvm.h | 261 TP_PROTO(u64 gva, u64 gfn), 263 TP_ARGS(gva, gfn), 266 __field(__u64, gva) 271 __entry->gva = gva; 275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) 280 TP_PROTO(u64 gva, u64 gfn), 282 TP_ARGS(gva, gfn) 287 TP_PROTO(u64 gva, u6 [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/s390x/ |
H A D | processor.c | 54 void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa, in virt_pg_map() argument 60 TEST_ASSERT((gva % vm->page_size) == 0, in virt_pg_map() 63 gva, vm->page_size); in virt_pg_map() 65 (gva >> vm->page_shift)), in virt_pg_map() 67 gva); in virt_pg_map() 71 gva, vm->page_size); in virt_pg_map() 75 gva, vm->max_gfn, vm->page_size); in virt_pg_map() 80 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; in virt_pg_map() 87 idx = (gva >> 12) & 0x0ffu; /* page index */ in virt_pg_map() 94 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument [all...] |
/kernel/linux/linux-6.6/arch/riscv/kvm/ |
H A D | tlb.c | 79 unsigned long gva, in kvm_riscv_local_hfence_vvma_asid_gva() 94 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_asid_gva() 99 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_asid_gva() 120 unsigned long gva, unsigned long gvsz, in kvm_riscv_local_hfence_vvma_gva() 134 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_gva() 139 for (pos = gva; pos < (gva in kvm_riscv_local_hfence_vvma_gva() 77 kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, unsigned long asid, unsigned long gva, unsigned long gvsz, unsigned long order) kvm_riscv_local_hfence_vvma_asid_gva() argument 119 kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, unsigned long gva, unsigned long gvsz, unsigned long order) kvm_riscv_local_hfence_vvma_gva() argument 355 kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, unsigned long hbase, unsigned long hmask, unsigned long gva, unsigned long gvsz, unsigned long order, unsigned long asid) kvm_riscv_hfence_vvma_asid_gva() argument 384 kvm_riscv_hfence_vvma_gva(struct kvm *kvm, unsigned long hbase, unsigned long hmask, unsigned long gva, unsigned long gvsz, unsigned long order) kvm_riscv_hfence_vvma_gva() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mana/ |
H A D | mr.c | 46 req.gva.dma_region_handle = mr_params->gva.dma_region_handle; in mana_ib_gd_create_mr() 47 req.gva.virtual_address = mr_params->gva.virtual_address; in mana_ib_gd_create_mr() 48 req.gva.access_flags = mr_params->gva.access_flags; in mana_ib_gd_create_mr() 149 mr_params.gva.dma_region_handle = dma_region_handle; in mana_ib_reg_user_mr() 150 mr_params.gva.virtual_address = iova; in mana_ib_reg_user_mr() 151 mr_params.gva.access_flags = in mana_ib_reg_user_mr()
|
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/aarch64/ |
H A D | processor.c | 24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument 29 return (gva >> shift) & mask; in pgd_index() 32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument 40 return (gva >> shift) & mask; in pud_index() 43 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pmd_index() argument 51 return (gva >> shift) & mask; in pmd_index() 54 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) in pte_index() argument 57 return (gva >> vm->page_shift) & mask; in pte_index() 148 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument 155 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * in addr_gva2gpa() [all...] |
H A D | ucall.c | 101 vm_vaddr_t gva; in get_ucall() local 105 memcpy(&gva, run->mmio.data, sizeof(gva)); in get_ucall() 106 memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); in get_ucall()
|
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/include/ |
H A D | perf_test_util.h | 44 uint64_t gva; member 69 uint64_t gva; in guest_code() local 76 gva = vcpu_args->gva; in guest_code() 81 uint64_t addr = gva + (i * perf_test_args.guest_page_size); in guest_code() 187 vcpu_args->gva = guest_test_virt_mem +
|
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/aarch64/ |
H A D | processor.c | 25 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument 30 return (gva >> shift) & mask; in pgd_index() 33 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument 41 return (gva >> shift) & mask; in pud_index() 44 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pmd_index() argument 52 return (gva >> shift) & mask; in pmd_index() 55 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) in pte_index() argument 58 return (gva >> vm->page_shift) & mask; in pte_index() 160 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) in virt_get_pte_hva() argument 167 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * in virt_get_pte_hva() 198 addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) addr_arch_gva2gpa() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/kvm/vmx/ |
H A D | sgx.c | 25 int size, int alignment, gva_t *gva) in sgx_get_encls_gva() 31 *gva = offset; in sgx_get_encls_gva() 34 *gva += s.base; in sgx_get_encls_gva() 37 if (!IS_ALIGNED(*gva, alignment)) { in sgx_get_encls_gva() 40 fault = is_noncanonical_address(*gva, vcpu); in sgx_get_encls_gva() 42 *gva &= 0xffffffff; in sgx_get_encls_gva() 45 (*gva > s.limit) || in sgx_get_encls_gva() 47 (((u64)*gva + size - 1) > s.limit + 1)); in sgx_get_encls_gva() 73 static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, in sgx_gva_to_gpa() argument 79 *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, in sgx_gva_to_gpa() 24 sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, int size, int alignment, gva_t *gva) sgx_get_encls_gva() argument 104 sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr) sgx_inject_fault() argument [all...] |
H A D | vmx_ops.h | 17 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva); 304 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) in __invvpid() argument 309 u64 gva; in __invvpid() member 310 } operand = { vpid, 0, gva }; in __invvpid() 312 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva); in __invvpid()
|
/kernel/linux/linux-5.10/arch/mips/kvm/ |
H A D | mmu.c | 1001 kvm_err("No ptep for gva %lx\n", badvaddr); in kvm_mips_handle_kseg0_tlb_fault() 1016 unsigned long gva, in kvm_mips_handle_mapped_seg_tlb_fault() 1022 unsigned int idx = TLB_LO_IDX(*tlb, gva); in kvm_mips_handle_mapped_seg_tlb_fault() 1032 if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) in kvm_mips_handle_mapped_seg_tlb_fault() 1052 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE); in kvm_mips_handle_mapped_seg_tlb_fault() 1054 kvm_err("No ptep for gva %lx\n", gva); in kvm_mips_handle_mapped_seg_tlb_fault() 1063 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); in kvm_mips_handle_mapped_seg_tlb_fault() 1157 * @gva: Guest virtual address to be accessed. 1171 unsigned long gva, in kvm_trap_emul_gva_fault() 1014 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, struct kvm_mips_tlb *tlb, unsigned long gva, bool write_fault) kvm_mips_handle_mapped_seg_tlb_fault() argument 1170 kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, unsigned long gva, bool write) kvm_trap_emul_gva_fault() argument [all...] |
/kernel/linux/linux-5.10/arch/x86/kvm/ |
H A D | x86.h | 165 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() 176 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; in vcpu_cache_mmio_info() 188 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 193 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument 195 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info() 201 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument 204 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva() 269 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u1 164 vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) vcpu_cache_mmio_info() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/kvm/ |
H A D | x86.h | 219 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() 230 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; in vcpu_cache_mmio_info() 242 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 247 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument 249 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info() 255 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument 258 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva() 317 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u1 218 vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) vcpu_cache_mmio_info() argument [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/ |
H A D | access_tracking_perf_test.c | 95 static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) in lookup_pfn() argument 97 uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); in lookup_pfn() 130 uint64_t base_gva = vcpu_args->gva; in mark_vcpu_memory_idle() 149 uint64_t gva = base_gva + page * memstress_args.guest_page_size; in mark_vcpu_memory_idle() local 150 uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); in mark_vcpu_memory_idle()
|
/kernel/linux/linux-5.10/arch/s390/kvm/ |
H A D | gaccess.c | 493 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, in trans_exc() argument 534 tec->addr = gva >> PAGE_SHIFT; in trans_exc() 596 * @gva: guest virtual address 614 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, in guest_translate() argument 618 union vaddress vaddr = {.addr = gva}; in guest_translate() 619 union raddress raddr = {.addr = gva}; in guest_translate() 904 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, in guest_translate_address() argument 912 gva = kvm_s390_logical_to_effective(vcpu, gva); in guest_translate_address() 913 rc = get_vcpu_asce(vcpu, &asce, gva, a in guest_translate_address() 938 check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, unsigned long length, enum gacc_mode mode) check_gva_range() argument [all...] |
H A D | gaccess.h | 189 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 191 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/ |
H A D | memstress.c | 53 uint64_t gva; in memstress_guest_code() local 61 gva = vcpu_args->gva; in memstress_guest_code() 77 addr = gva + (page * args->guest_page_size); in memstress_guest_code() 105 vcpu_args->gva = guest_test_virt_mem + in memstress_setup_vcpus() 111 vcpu_args->gva = guest_test_virt_mem; in memstress_setup_vcpus()
|
/kernel/linux/linux-6.6/arch/riscv/include/asm/ |
H A D | kvm_host.h | 265 unsigned long gva, 271 unsigned long gva, unsigned long gvsz, 292 unsigned long gva, unsigned long gvsz, 299 unsigned long gva, unsigned long gvsz,
|
/kernel/linux/linux-5.10/arch/x86/kvm/vmx/ |
H A D | vmx_ops.h | 21 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva); 255 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) in __invvpid() argument 260 u64 gva; in __invvpid() member 261 } operand = { vpid, 0, gva }; in __invvpid() 263 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva); in __invvpid()
|
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/riscv/ |
H A D | processor.c | 46 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument 53 return (gva & pte_index_mask[level]) >> pte_index_shift[level]; in pte_index() 113 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument 121 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa() 128 pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa() 134 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_arch_gva2gpa() 137 TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d", in addr_arch_gva2gpa() 138 gva, level); in addr_arch_gva2gpa()
|
/kernel/linux/linux-6.6/arch/s390/kvm/ |
H A D | gaccess.c | 496 static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, in trans_exc_ending() argument 546 tec->addr = gva >> PAGE_SHIFT; in trans_exc_ending() 566 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, in trans_exc() argument 569 return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false); in trans_exc() 614 * @gva: guest virtual address 632 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, in guest_translate() argument 636 union vaddress vaddr = {.addr = gva}; in guest_translate() 637 union raddress raddr = {.addr = gva}; in guest_translate() 1276 * @gva: Guest virtual address 1288 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u argument 1312 check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, unsigned long length, enum gacc_mode mode, u8 access_key) check_gva_range() argument [all...] |
/kernel/linux/linux-6.6/arch/mips/kvm/ |
H A D | tlb.c | 161 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been 163 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not 166 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_guest_tlb_lookup() argument 184 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); in kvm_vz_guest_tlb_lookup() 226 pa = entrylo[!!(gva & pagemaskbit)]; in kvm_vz_guest_tlb_lookup() 240 pa |= gva & ~(pagemask | pagemaskbit); in kvm_vz_guest_tlb_lookup()
|
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/x86_64/ |
H A D | cpuid_test.c | 129 vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); in vcpu_alloc_cpuid() local 130 struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); in vcpu_alloc_cpuid() 134 *p_gva = gva; in vcpu_alloc_cpuid()
|