Lines Matching refs:size

35 					   phys_addr_t size)
37 phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
44 phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
46 return __stage2_range_addr_end(addr, end, size);
200 static void *kvm_host_zalloc_pages_exact(size_t size)
202 return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
205 static void *kvm_s2_zalloc_pages_exact(size_t size)
207 void *virt = kvm_host_zalloc_pages_exact(size);
210 kvm_account_pgtable_pages(virt, (size >> PAGE_SHIFT));
214 static void kvm_s2_free_pages_exact(void *virt, size_t size)
216 kvm_account_pgtable_pages(virt, -(size >> PAGE_SHIFT));
217 free_pages_exact(virt, size);
273 static void clean_dcache_guest_page(void *va, size_t size)
275 __clean_dcache_guest_page(va, size);
278 static void invalidate_icache_guest_page(void *va, size_t size)
280 __invalidate_icache_guest_page(va, size);
311 * @size: The size of the area to unmap
319 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
323 phys_addr_t end = start + size;
326 WARN_ON(size & ~PAGE_MASK);
331 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
333 __unmap_stage2_range(mmu, start, size, true);
404 int __create_hyp_mappings(unsigned long start, unsigned long size,
413 err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
625 * @size: The size of the VA range to reserve.
629 * and aligned based on the order of @size.
633 int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
647 * The allocated size is always a multiple of PAGE_SIZE.
649 size = PAGE_ALIGN(size);
650 base = io_map_base - size;
661 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
670 phys_addr, size, prot);
678 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
679 ret = hyp_alloc_private_va_range(size, &addr);
683 ret = __create_hyp_mappings(addr, size, phys_addr, prot);
694 size_t size;
700 * an alignment of our allocation on the order of the size.
702 size = PAGE_SIZE * 2;
703 base = ALIGN_DOWN(io_map_base - size, size);
728 *haddr = base + size;
736 * @size: Size of the region being mapped
740 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
750 *kaddr = ioremap(phys_addr, size);
759 ret = __create_hyp_private_mapping(phys_addr, size,
775 * @size: Size of the region being mapped
778 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
786 ret = __create_hyp_private_mapping(phys_addr, size,
946 phys_addr_t size = PAGE_SIZE * memslot->npages;
947 hva_t reg_end = hva + size;
1061 * @size: The size of the mapping
1065 phys_addr_t pa, unsigned long size, bool writable)
1078 size += offset_in_page(guest_ipa);
1081 for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
1217 size_t size;
1223 size = memslot->npages * PAGE_SIZE;
1228 uaddr_end = uaddr_start + size;
1258 * by the memslot. This means we have to prohibit block size mappings
1278 * Returns the size of the mapping.
1373 unsigned long size)
1375 unsigned long i, nr_pages = size >> PAGE_SHIFT;
1587 * kvm_pgtable_stage2_map() should be called to change block size.
1742 * of the page size.
1819 u64 size = (range->end - range->start) << PAGE_SHIFT;
1826 size, true);
1831 u64 size = (range->end - range->start) << PAGE_SHIFT;
1838 size, false);
1853 unsigned long size = hyp_idmap_end - hyp_idmap_start;
1854 int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start,
1897 * currently configured page size and VA_BITS_MIN, in which case we will
2085 phys_addr_t size = slot->npages << PAGE_SHIFT;
2088 unmap_stage2_range(&kvm->arch.mmu, gpa, size);