/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_64_mmu_radix.c | 137 u64 pte, base, gpa; in kvmppc_mmu_walk_radix_tree() local 191 gpa = pte & 0x01fffffffffff000ul; in kvmppc_mmu_walk_radix_tree() 192 if (gpa & ((1ul << offset) - 1)) in kvmppc_mmu_walk_radix_tree() 194 gpa |= eaddr & ((1ul << offset) - 1); in kvmppc_mmu_walk_radix_tree() 202 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree() 394 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, in kvmppc_unmap_pte() argument 401 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() 405 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); in kvmppc_unmap_pte() 406 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmppc_unmap_pte() 425 gpa in kvmppc_unmap_pte() 545 kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, unsigned long gpa, unsigned int lpid) kvmppc_unmap_free_pmd_entry_table() argument 561 kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, unsigned long gpa, unsigned int lpid) kvmppc_unmap_free_pud_entry_table() argument 586 kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, unsigned long mmu_seq, unsigned int lpid, unsigned long *rmapp, struct rmap_nested **n_rmap) kvmppc_create_pte() argument 764 kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, unsigned long gpa, unsigned int lpid) kvmppc_hv_handle_set_rc() argument 792 kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, bool writing, bool kvm_ro, pte_t *inserted_pte, unsigned int *levelp) kvmppc_book3s_instantiate_page() argument 920 unsigned long gpa, gfn; kvmppc_book3s_radix_page_fault() local 1002 unsigned long gpa = gfn << PAGE_SHIFT; kvm_unmap_radix() local 1022 unsigned long gpa = gfn << PAGE_SHIFT; kvm_age_radix() local 1050 unsigned long gpa = gfn << PAGE_SHIFT; kvm_test_age_radix() local 1068 unsigned long gpa = gfn << PAGE_SHIFT; kvm_radix_test_clear_dirty() local 1149 unsigned long gpa; kvmppc_radix_flush_memslot() local 1233 unsigned long gpa; global() member 1274 unsigned long gpa; debugfs_radix_read() local [all...] |
H A D | book3s_hv_uvmem.c | 232 unsigned long gpa; member 509 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() 529 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out() 562 gpa, 0, page_shift); in __kvmppc_svm_page_out() 582 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() 588 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out() 638 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages() 639 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n", in kvmppc_uvmem_drop_pages() 640 pvt->gpa, addr); in kvmppc_uvmem_drop_pages() 688 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struc argument 506 __kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) __kvmppc_svm_page_out() argument 579 kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) kvmppc_svm_page_out() argument 734 kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long gpa, struct kvm *kvm, unsigned long page_shift, bool pagein) kvmppc_svm_page_in() argument 871 kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift) kvmppc_share_page() argument 930 kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, unsigned long flags, unsigned long page_shift) kvmppc_h_svm_page_in() argument 1040 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, unsigned long flags, unsigned long page_shift) kvmppc_h_svm_page_out() argument [all...] |
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_64_mmu_radix.c | 144 u64 pte, base, gpa; in kvmppc_mmu_walk_radix_tree() local 199 gpa = pte & 0x01fffffffffff000ul; in kvmppc_mmu_walk_radix_tree() 200 if (gpa & ((1ul << offset) - 1)) in kvmppc_mmu_walk_radix_tree() 202 gpa |= eaddr & ((1ul << offset) - 1); in kvmppc_mmu_walk_radix_tree() 210 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree() 418 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, in kvmppc_unmap_pte() argument 425 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() 429 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); in kvmppc_unmap_pte() 430 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmppc_unmap_pte() 449 gpa in kvmppc_unmap_pte() 569 kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, unsigned long gpa, unsigned int lpid) kvmppc_unmap_free_pmd_entry_table() argument 585 kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, unsigned long gpa, unsigned int lpid) kvmppc_unmap_free_pud_entry_table() argument 610 kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, unsigned long mmu_seq, unsigned int lpid, unsigned long *rmapp, struct rmap_nested **n_rmap) kvmppc_create_pte() argument 788 kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, unsigned long gpa, unsigned int lpid) kvmppc_hv_handle_set_rc() argument 816 kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, bool writing, bool kvm_ro, pte_t *inserted_pte, unsigned int *levelp) kvmppc_book3s_instantiate_page() argument 944 unsigned long gpa, gfn; kvmppc_book3s_radix_page_fault() local 1031 unsigned long gpa = gfn << PAGE_SHIFT; kvm_unmap_radix() local 1050 unsigned long gpa = gfn << PAGE_SHIFT; kvm_age_radix() local 1079 unsigned long gpa = gfn << PAGE_SHIFT; kvm_test_age_radix() local 1097 unsigned long gpa = gfn << PAGE_SHIFT; kvm_radix_test_clear_dirty() local 1178 unsigned long gpa; kvmppc_radix_flush_memslot() local 1262 unsigned long gpa; global() member 1303 unsigned long gpa; debugfs_radix_read() local [all...] |
H A D | book3s_hv_uvmem.c | 234 unsigned long gpa; member 516 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() 536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out() 569 gpa, 0, page_shift); in __kvmppc_svm_page_out() 589 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() 595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out() 645 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages() 646 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n", in kvmppc_uvmem_drop_pages() 647 pvt->gpa, addr); in kvmppc_uvmem_drop_pages() 695 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struc argument 513 __kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) __kvmppc_svm_page_out() argument 586 kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) kvmppc_svm_page_out() argument 740 kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long gpa, struct kvm *kvm, unsigned long page_shift, bool pagein) kvmppc_svm_page_in() argument 877 kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift) kvmppc_share_page() argument 936 kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, unsigned long flags, unsigned long page_shift) kvmppc_h_svm_page_in() argument 1046 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, unsigned long flags, unsigned long page_shift) kvmppc_h_svm_page_out() argument [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/ |
H A D | max_guest_memory_test.c | 23 uint64_t gpa; in guest_code() local 25 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code() 26 *((volatile uint64_t *)gpa) = gpa; in guest_code() 96 uint64_t gpa, nr_bytes; in spawn_workers() local 104 TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges"); in spawn_workers() 110 for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa in spawn_workers() 170 uint64_t max_gpa, gpa, slot_size, max_mem, i; main() local [all...] |
H A D | memslot_perf_test.c | 185 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) in vm_gpa2hva() argument 192 TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); in vm_gpa2hva() 193 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva() 194 "Too high gpa to translate"); in vm_gpa2hva() 195 gpa -= MEM_GPA; in vm_gpa2hva() 197 gpage = gpa / guest_page_size; in vm_gpa2hva() 198 pgoffs = gpa % guest_page_size; in vm_gpa2hva() 211 "Asking for remaining pages in slot but gpa not page aligned"); in vm_gpa2hva() 331 uint64_t gpa; in prepare_vm() local 632 uint64_t gpa, ctr; test_memslot_do_unmap() local 657 uint64_t gpa; test_memslot_map_unmap_check() local [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/aarch64/ |
H A D | ucall.c | 12 static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) in ucall_mmio_init() argument 14 if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) in ucall_mmio_init() 17 virt_pg_map(vm, gpa, gpa, 0); in ucall_mmio_init() 19 ucall_exit_mmio_addr = (vm_vaddr_t *)gpa; in ucall_mmio_init() 27 vm_paddr_t gpa, start, end, step, offset; in ucall_init() local 32 gpa = (vm_paddr_t)arg; in ucall_init() 33 ret = ucall_mmio_init(vm, gpa); in ucall_init() 34 TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa); in ucall_init() [all...] |
/kernel/linux/linux-6.6/arch/s390/kvm/ |
H A D | gaccess.h | 152 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc() local 154 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc() 178 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in read_guest_lc() local 180 return kvm_read_guest(vcpu->kvm, gpa, data, len); in read_guest_lc() 190 unsigned long *gpa, enum gacc_mode mode, 196 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, 199 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, 209 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len, __uint128_t *old, 357 * @gpa: guest physical (absolute) address 361 * Copy @len bytes from @data (kernel space) to @gpa (gues 371 write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, unsigned long len) write_guest_abs() argument 394 read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, unsigned long len) read_guest_abs() argument [all...] |
H A D | vsie.c | 652 * Pin the guest page given by gpa and set hpa to the pinned host address. 656 * - -EINVAL if the gpa is not valid guest storage 658 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) in pin_guest_page() argument 662 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); in pin_guest_page() 665 *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); in pin_guest_page() 670 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) in unpin_guest_page() argument 674 mark_page_dirty(kvm, gpa_to_gfn(gpa)); in unpin_guest_page() 739 gpa_t gpa; in pin_blocks() local 742 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; in pin_blocks() 744 gpa | in pin_blocks() 852 unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, gpa_t gpa) unpin_scb() argument 868 pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, gpa_t gpa) pin_scb() argument [all...] |
H A D | gaccess.c | 606 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) in deref_table() argument 608 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); in deref_table() 615 * @gpa: points to where guest physical (absolute) address should be stored 623 * an addressing exception is indicated and @gpa will not be changed. 625 * Returns: - zero on success; @gpa contains the resulting absolute address 633 unsigned long *gpa, const union asce asce, in guest_translate() 792 *gpa = raddr.addr; in guest_translate() 816 enum gacc_mode mode, gpa_t gpa) in vm_check_access_key() 826 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in vm_check_access_key() 879 enum gacc_mode mode, union asce asce, gpa_t gpa, in vcpu_check_access_key() 632 guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, unsigned long *gpa, const union asce asce, enum gacc_mode mode, enum prot_type *prot) guest_translate() argument 815 vm_check_access_key(struct kvm *kvm, u8 access_key, enum gacc_mode mode, gpa_t gpa) vm_check_access_key() argument 878 vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key, enum gacc_mode mode, union asce asce, gpa_t gpa, unsigned long ga, unsigned int len) vcpu_check_access_key() argument 962 unsigned long gpa; guest_range_to_gpas() local 997 access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, void *data, unsigned int len) access_guest_page() argument 1012 access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, void *data, unsigned int len, u8 access_key) access_guest_page_with_key() argument 1045 access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len, enum gacc_mode mode, u8 access_key) access_guest_abs_with_key() argument 1150 unsigned long gpa; access_guest_real() local 1188 cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len, __uint128_t *old_addr, __uint128_t new, u8 access_key, bool *success) cmpxchg_guest_abs_with_key() argument 1288 guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, unsigned long *gpa, enum gacc_mode mode, u8 access_key) guest_translate_address_with_key() argument 1337 check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, enum gacc_mode mode, u8 access_key) check_gpa_range() argument [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/ |
H A D | memstress.c | 109 vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes); in memstress_setup_vcpus() 114 vcpu_args->gpa = args->gpa; in memstress_setup_vcpus() 119 pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", in memstress_setup_vcpus() 120 i, vcpu_args->gpa, vcpu_args->gpa + in memstress_setup_vcpus() 197 args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size; in memstress_create_vm() 198 args->gpa = align_down(args->gpa, backing_src_pages in memstress_create_vm() [all...] |
/kernel/linux/linux-5.10/arch/s390/kvm/ |
H A D | gaccess.h | 152 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc() local 154 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc() 178 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in read_guest_lc() local 180 return kvm_read_guest(vcpu->kvm, gpa, data, len); in read_guest_lc() 190 u8 ar, unsigned long *gpa, enum gacc_mode mode); 296 * @gpa: guest physical (absolute) address 300 * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address). 310 int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, in write_guest_abs() argument 313 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_abs() 319 * @gpa 333 read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, unsigned long len) read_guest_abs() argument [all...] |
H A D | vsie.c | 637 * Pin the guest page given by gpa and set hpa to the pinned host address. 641 * - -EINVAL if the gpa is not valid guest storage 643 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) in pin_guest_page() argument 647 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); in pin_guest_page() 650 *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK); in pin_guest_page() 655 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) in unpin_guest_page() argument 659 mark_page_dirty(kvm, gpa_to_gfn(gpa)); in unpin_guest_page() 724 gpa_t gpa; in pin_blocks() local 727 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; in pin_blocks() 729 gpa | in pin_blocks() 837 unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, gpa_t gpa) unpin_scb() argument 853 pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, gpa_t gpa) pin_scb() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/ |
H A D | page_track.h | 30 void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes); 41 static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, in __kvm_page_track_write() argument 50 static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_page_track_write() argument 53 __kvm_page_track_write(vcpu->kvm, gpa, new, bytes); in kvm_page_track_write() 55 kvm_mmu_track_write(vcpu, gpa, new, bytes); in kvm_page_track_write()
|
/kernel/linux/linux-6.6/arch/riscv/kvm/ |
H A D | tlb.c | 21 gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_vmid_gpa() 33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 49 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_gpa() argument 61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 66 for (pos = gpa; pos < (gpa in kvm_riscv_local_hfence_gvma_gpa() 20 kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, gpa_t gpa, gpa_t gpsz, unsigned long order) kvm_riscv_local_hfence_gvma_vmid_gpa() argument 332 kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, unsigned long hbase, unsigned long hmask, gpa_t gpa, gpa_t gpsz, unsigned long order) kvm_riscv_hfence_gvma_vmid_gpa() argument [all...] |
H A D | mmu.c | 179 gpa_t gpa, phys_addr_t hpa, in gstage_map_page() 219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); in gstage_map_page() 346 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, in kvm_riscv_gstage_ioremap() argument 359 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; in kvm_riscv_gstage_ioremap() 362 for (addr = gpa; addr < end; addr += PAGE_SIZE) { in kvm_riscv_gstage_ioremap() 386 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) in kvm_riscv_gstage_iounmap() argument 389 gstage_unmap_range(kvm, gpa, size, false); in kvm_riscv_gstage_iounmap() 425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() local 429 gstage_unmap_range(kvm, gpa, size, false); in kvm_arch_flush_shadow_memslot() 510 gpa_t gpa in kvm_arch_prepare_memory_region() local 177 gstage_map_page(struct kvm *kvm, struct kvm_mmu_memory_cache *pcache, gpa_t gpa, phys_addr_t hpa, unsigned long page_size, bool page_rdonly, bool page_exec) gstage_map_page() argument 609 kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write) kvm_riscv_gstage_map() argument [all...] |
/kernel/linux/linux-6.6/virt/kvm/ |
H A D | pfncache.c | 86 if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE) in kvm_gpc_check() 153 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva in hva_to_pfn_retry() 155 * different task may not fail the gpa/uhva/generation checks. in hva_to_pfn_retry() 222 gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK); in hva_to_pfn_retry() 239 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, in __kvm_gpc_refresh() argument 243 unsigned long page_offset = gpa & ~PAGE_MASK; in __kvm_gpc_refresh() 260 * gpa, memslots generation, etc..., so they must be fully serialized. in __kvm_gpc_refresh() 276 if (gpc->gpa != gpa || gpc->generation != slots->generation || in __kvm_gpc_refresh() 278 gfn_t gfn = gpa_to_gfn(gpa); in __kvm_gpc_refresh() 357 kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) kvm_gpc_activate() argument [all...] |
/kernel/linux/linux-5.10/arch/x86/include/asm/uv/ |
H A D | uv_hub.h | 382 /* global bits offset - number of local address bits in gpa for this UV arch */ 460 uv_gpa_in_mmr_space(unsigned long gpa) in uv_gpa_in_mmr_space() argument 462 return (gpa >> 62) == 0x3UL; in uv_gpa_in_mmr_space() 466 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) in uv_gpa_to_soc_phys_ram() argument 474 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | in uv_gpa_to_soc_phys_ram() 475 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); in uv_gpa_to_soc_phys_ram() 477 paddr = gpa & uv_hub_info->gpa_mask; in uv_gpa_to_soc_phys_ram() 483 /* gpa -> gnode */ 484 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) in uv_gpa_to_gnode() argument 495 uv_gpa_to_pnode(unsigned long gpa) uv_gpa_to_pnode() argument 501 uv_gpa_to_offset(unsigned long gpa) uv_gpa_to_offset() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/include/asm/uv/ |
H A D | uv_hub.h | 383 /* global bits offset - number of local address bits in gpa for this UV arch */ 461 uv_gpa_in_mmr_space(unsigned long gpa) in uv_gpa_in_mmr_space() argument 463 return (gpa >> 62) == 0x3UL; in uv_gpa_in_mmr_space() 467 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) in uv_gpa_to_soc_phys_ram() argument 475 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | in uv_gpa_to_soc_phys_ram() 476 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); in uv_gpa_to_soc_phys_ram() 478 paddr = gpa & uv_hub_info->gpa_mask; in uv_gpa_to_soc_phys_ram() 484 /* gpa -> gnode */ 485 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) in uv_gpa_to_gnode() argument 496 uv_gpa_to_pnode(unsigned long gpa) uv_gpa_to_pnode() argument 502 uv_gpa_to_offset(unsigned long gpa) uv_gpa_to_offset() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gvt/ |
H A D | page_track.c | 152 * @gpa: the gpa of this write 159 int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, in intel_vgpu_page_track_handler() argument 167 page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); in intel_vgpu_page_track_handler() 175 intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT); in intel_vgpu_page_track_handler() 177 ret = page_track->handler(page_track, gpa, data, bytes); in intel_vgpu_page_track_handler() 179 gvt_err("guest page write error, gpa %llx\n", gpa); in intel_vgpu_page_track_handler()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gvt/ |
H A D | page_track.c | 152 * @gpa: the gpa of this write 159 int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, in intel_vgpu_page_track_handler() argument 165 page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); in intel_vgpu_page_track_handler() 171 intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); in intel_vgpu_page_track_handler() 173 ret = page_track->handler(page_track, gpa, data, bytes); in intel_vgpu_page_track_handler() 175 gvt_err("guest page write error, gpa %llx\n", gpa); in intel_vgpu_page_track_handler()
|
/kernel/linux/linux-5.10/arch/arm64/kvm/ |
H A D | hypercalls.c | 17 gpa_t gpa; in kvm_hvc_call_handler() local 82 gpa = kvm_init_stolen_time(vcpu); in kvm_hvc_call_handler() 83 if (gpa != GPA_INVALID) in kvm_hvc_call_handler() 84 val = gpa; in kvm_hvc_call_handler()
|
/kernel/linux/linux-6.6/arch/x86/kvm/ |
H A D | cpuid.h | 45 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_vcpu_is_legal_gpa() argument 47 return !(gpa & vcpu->arch.reserved_gpa_bits); in kvm_vcpu_is_legal_gpa() 50 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_vcpu_is_illegal_gpa() argument 52 return !kvm_vcpu_is_legal_gpa(vcpu, gpa); in kvm_vcpu_is_illegal_gpa() 56 gpa_t gpa, gpa_t alignment) in kvm_vcpu_is_legal_aligned_gpa() 58 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); in kvm_vcpu_is_legal_aligned_gpa() 61 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) in page_address_valid() argument 63 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); in page_address_valid() 55 kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment) kvm_vcpu_is_legal_aligned_gpa() argument
|
/kernel/linux/linux-6.6/arch/mips/kvm/ |
H A D | mmu.c | 449 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_set_spte_gfn() local 451 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_set_spte_gfn() 484 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_test_age_gfn() local 485 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_test_age_gfn() 495 * @gpa: Guest physical address of fault. 497 * @out_entry: New PTE for @gpa (written on success unless NULL). 498 * @out_buddy: New PTE for @gpa's buddy (written on success unless 510 static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, in _kvm_mips_map_page_fast() argument 515 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast() 524 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in _kvm_mips_map_page_fast() 586 kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write_fault, pte_t *out_entry, pte_t *out_buddy) kvm_mips_map_page() argument [all...] |
/kernel/linux/linux-6.6/arch/riscv/include/asm/ |
H A D | kvm_host.h | 257 gpa_t gpa, gpa_t gpsz, 260 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, 286 gpa_t gpa, gpa_t gpsz, 304 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, 307 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, 311 gpa_t gpa, unsigned long hva, bool is_write);
|