Lines Matching refs:from
516 int kvm_share_hyp(void *from, void *to)
530 if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to))
534 return create_hyp_mappings(from, to, PAGE_HYP);
536 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
548 void kvm_unshare_hyp(void *from, void *to)
553 if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
556 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
566 * @from: The virtual kernel start address of the range
574 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
578 unsigned long start = kern_hyp_va((unsigned long)from);
593 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
1315 * We can therefore safely transfer the refcount from PG_tail
1316 * to PG_head and switch the pfn from a tail page to the head
1427 * and so normally don't require allocations from the memcache. The
1740 * complement it with the bottom 12 bits from the
1998 * Free any leftovers from the eager page splitting cache. Do
2021 * Prevent userspace from creating a memory region outside of the IPA