Lines Matching refs:hva

944 	hva_t hva = memslot->userspace_addr;
947 hva_t reg_end = hva + size;
965 vma = find_vma_intersection(current->mm, hva, reg_end);
972 vm_start = max(hva, vma->vm_start);
979 hva = vm_end;
980 } while (hva < reg_end);
1212 unsigned long hva,
1268 return (hva & ~(map_size - 1)) >= uaddr_start &&
1269 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1273 * Check if the given hva is backed by a transparent huge page (THP) and
1282 unsigned long hva, kvm_pfn_t *pfnp,
1292 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1293 int sz = get_user_mapping_size(kvm, hva);
1332 static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
1344 pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
1347 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
1348 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
1349 ALIGN(hva, PUD_SIZE) <= vma->vm_end)
1353 if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
1354 ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
1355 ALIGN(hva, PMD_SIZE) <= vma->vm_end)
1395 struct kvm_memory_slot *memslot, unsigned long hva,
1444 vma = vma_lookup(current->mm, hva);
1446 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1459 vma_shift = get_vma_page_shift(vma, hva);
1465 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
1473 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
1510 kvm_send_hwpoison_signal(hva, vma_shift);
1554 hva, &pfn,
1643 unsigned long hva;
1702 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1704 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1732 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1758 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
2013 hva_t hva, reg_end;
2027 hva = new->userspace_addr;
2028 reg_end = hva + (new->npages << PAGE_SHIFT);
2045 vma = find_vma_intersection(current->mm, hva, reg_end);
2061 hva = min(reg_end, vma->vm_end);
2062 } while (hva < reg_end);