Lines Matching refs:hva
406 hva_t hva = memslot->userspace_addr;
409 hva_t reg_end = hva + size;
424 struct vm_area_struct *vma = find_vma(current->mm, hva);
433 vm_start = max(hva, vma->vm_start);
440 hva = vm_end;
441 } while (hva < reg_end);
628 unsigned long hva,
684 return (hva & ~(map_size - 1)) >= uaddr_start &&
685 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
689 * Check if the given hva is backed by a transparent huge page (THP) and
698 unsigned long hva, kvm_pfn_t *pfnp,
709 fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
742 struct kvm_memory_slot *memslot, unsigned long hva,
774 vma = find_vma_intersection(current->mm, hva, hva + 1);
776 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
795 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
803 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
850 kvm_send_hwpoison_signal(hva, vma_shift);
880 vma_pagesize = transparent_hugepage_adjust(memslot, hva,
956 unsigned long hva;
995 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
997 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1025 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1051 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1133 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1135 unsigned long end = hva + PAGE_SIZE;
1141 trace_kvm_set_spte_hva(hva);
1148 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn);
1177 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1181 trace_kvm_test_age_hva(hva);
1182 return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
1299 hva_t hva = mem->userspace_addr;
1300 hva_t reg_end = hva + mem->memory_size;
1329 struct vm_area_struct *vma = find_vma(current->mm, hva);
1338 vm_start = max(hva, vma->vm_start);
1361 hva = vm_end;
1362 } while (hva < reg_end);