Lines Matching refs:addr

47 	unsigned long addr = (unsigned long)x;
49 return addr >= VMALLOC_START && addr < VMALLOC_END;
72 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
77 pte = pte_offset_kernel(pmd, addr);
79 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
81 } while (pte++, addr += PAGE_SIZE, addr != end);
85 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
92 pmd = pmd_offset(pud, addr);
94 next = pmd_addr_end(addr, end);
104 vunmap_pte_range(pmd, addr, next, mask);
107 } while (pmd++, addr = next, addr != end);
110 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
117 pud = pud_offset(p4d, addr);
119 next = pud_addr_end(addr, end);
129 vunmap_pmd_range(pud, addr, next, mask);
130 } while (pud++, addr = next, addr != end);
133 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
140 p4d = p4d_offset(pgd, addr);
142 next = p4d_addr_end(addr, end);
152 vunmap_pud_range(p4d, addr, next, mask);
153 } while (p4d++, addr = next, addr != end);
161 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
174 unsigned long addr = start;
177 BUG_ON(addr >= end);
178 pgd = pgd_offset_k(addr);
180 next = pgd_addr_end(addr, end);
185 vunmap_p4d_range(pgd, addr, next, &mask);
186 } while (pgd++, addr = next, addr != end);
192 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
203 pte = pte_alloc_kernel_track(pmd, addr, mask);
213 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
215 } while (pte++, addr += PAGE_SIZE, addr != end);
220 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
227 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
231 next = pmd_addr_end(addr, end);
232 if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask))
234 } while (pmd++, addr = next, addr != end);
238 static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
245 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
249 next = pud_addr_end(addr, end);
250 if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask))
252 } while (pud++, addr = next, addr != end);
256 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
263 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
267 next = p4d_addr_end(addr, end);
268 if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask))
270 } while (p4d++, addr = next, addr != end);
276 * @addr: start of the VM area to map
281 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
292 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
295 unsigned long start = addr;
296 unsigned long end = addr + size;
303 BUG_ON(addr >= end);
304 pgd = pgd_offset_k(addr);
306 next = pgd_addr_end(addr, end);
309 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
312 } while (pgd++, addr = next, addr != end);
338 unsigned long addr = (unsigned long)x;
339 if (addr >= MODULES_VADDR && addr < MODULES_END)
350 unsigned long addr = (unsigned long) vmalloc_addr;
352 pgd_t *pgd = pgd_offset_k(addr);
366 p4d = p4d_offset(pgd, addr);
369 pud = pud_offset(p4d, addr);
382 pmd = pmd_offset(pud, addr);
387 ptep = pte_offset_map(pmd, addr);
493 static struct vmap_area *__find_vmap_area(unsigned long addr)
501 if (addr < va->va_start)
503 else if (addr >= va->va_end)
1155 unsigned long addr;
1214 addr = __alloc_vmap_area(size, align, vstart, vend);
1217 if (unlikely(addr == vend))
1220 va->va_start = addr;
1221 va->va_end = addr + size;
1233 ret = kasan_populate_vmalloc(addr, size);
1442 static struct vmap_area *find_vmap_area(unsigned long addr)
1447 va = __find_vmap_area(addr);
1515 static unsigned long addr_to_vb_idx(unsigned long addr)
1517 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1518 addr /= VMAP_BLOCK_SIZE;
1519 return addr;
1524 unsigned long addr;
1526 addr = va_start + (pages_off << PAGE_SHIFT);
1527 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1528 return (void *)addr;
1698 static void vb_free(unsigned long addr, unsigned long size)
1707 flush_cache_vunmap(addr, addr + size);
1710 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
1711 vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
1713 unmap_kernel_range_noflush(addr, size);
1716 flush_tlb_kernel_range(addr, addr + size);
1803 unsigned long addr = (unsigned long)mem;
1807 BUG_ON(!addr);
1808 BUG_ON(addr < VMALLOC_START);
1809 BUG_ON(addr > VMALLOC_END);
1810 BUG_ON(!PAGE_ALIGNED(addr));
1816 vb_free(addr, size);
1820 va = find_vmap_area(addr);
1845 unsigned long addr;
1852 addr = (unsigned long)mem;
1860 addr = va->va_start;
1861 mem = (void *)addr;
1866 if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
1881 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1892 if (tmp->addr >= vm->addr) {
1893 BUG_ON(tmp->addr < vm->addr + vm->size);
1896 BUG_ON(tmp->addr + tmp->size > vm->addr);
1910 * vm->addr contains the allocated address.
1917 unsigned long addr;
1919 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1920 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1922 vm->addr = (void *)addr;
1997 va->va_start = (unsigned long)tmp->addr;
2012 * @addr: start of the VM area to unmap
2018 void unmap_kernel_range(unsigned long addr, unsigned long size)
2020 unsigned long end = addr + size;
2022 flush_cache_vunmap(addr, end);
2023 unmap_kernel_range_noflush(addr, size);
2024 flush_tlb_kernel_range(addr, end);
2031 vm->addr = (void *)va->va_start;
2128 * @addr: base address
2130 * Search for the kernel VM area starting at @addr, and return it.
2136 struct vm_struct *find_vm_area(const void *addr)
2140 va = find_vmap_area((unsigned long)addr);
2149 * @addr: base address
2151 * Search for the kernel VM area starting at @addr, and remove it.
2157 struct vm_struct *remove_vm_area(const void *addr)
2164 va = __find_vmap_area((unsigned long)addr);
2199 remove_vm_area(area->addr);
2220 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2221 if (addr) {
2222 start = min(addr, start);
2223 end = max(addr + PAGE_SIZE, end);
2238 static void __vunmap(const void *addr, int deallocate_pages)
2242 if (!addr)
2245 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2246 addr))
2249 area = find_vm_area(addr);
2252 addr);
2256 debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2257 debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2259 kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2281 static inline void __vfree_deferred(const void *addr)
2291 if (llist_add((struct llist_node *)addr, &p->list))
2297 * @addr: memory base address
2302 void vfree_atomic(const void *addr)
2306 kmemleak_free(addr);
2308 if (!addr)
2310 __vfree_deferred(addr);
2313 static void __vfree(const void *addr)
2316 __vfree_deferred(addr);
2318 __vunmap(addr, 1);
2323 * @addr: Memory base address
2325 * Free the virtually continuous memory area starting at @addr, as obtained
2330 * If @addr is NULL, no operation is performed.
2338 void vfree(const void *addr)
2342 kmemleak_free(addr);
2346 if (!addr)
2349 __vfree(addr);
2355 * @addr: memory base address
2357 * Free the virtually contiguous memory area starting at @addr,
2362 void vunmap(const void *addr)
2366 if (addr)
2367 __vunmap(addr, 0);
2402 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
2404 vunmap(area->addr);
2412 return area->addr;
2423 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2451 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2457 flush_cache_vmap((unsigned long)area->addr,
2458 (unsigned long)area->addr + count * PAGE_SIZE);
2460 return area->addr;
2488 remove_vm_area(area->addr);
2516 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
2520 return area->addr;
2526 __vfree(area->addr);
2554 void *addr;
2566 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2567 if (!addr)
2579 return addr;
2770 * small helper routine , copy contents to buf from addr.
2774 static int aligned_vread(char *buf, char *addr, unsigned long count)
2782 offset = offset_in_page(addr);
2786 p = vmalloc_to_page(addr);
2805 addr += length;
2813 static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2821 offset = offset_in_page(addr);
2825 p = vmalloc_to_page(addr);
2842 addr += length;
2853 * @addr: vm address.
2856 * This function checks that addr is a valid vmalloc'ed area, and
2858 * of [addr...addr+count) includes some valid address, data is copied to
2862 * If [addr...addr+count) doesn't includes any intersects with alive
2870 * Return: number of bytes for which addr and buf should be increased
2871 * (same number as @count) or %0 if [addr...addr+count) doesn't
2874 long vread(char *buf, char *addr, unsigned long count)
2883 if ((unsigned long) addr + count < count)
2884 count = -(unsigned long) addr;
2895 vaddr = (char *) vm->addr;
2896 if (addr >= vaddr + get_vm_area_size(vm))
2898 while (addr < vaddr) {
2903 addr++;
2906 n = vaddr + get_vm_area_size(vm) - addr;
2910 aligned_vread(buf, addr, n);
2914 addr += n;
2932 * @addr: vm address.
2935 * This function checks that addr is a valid vmalloc'ed area, and
2936 * copy data from a buffer to the given addr. If specified range of
2937 * [addr...addr+count) includes some valid address, data is copied from
2941 * If [addr...addr+count) doesn't includes any intersects with alive
2949 * Return: number of bytes for which addr and buf should be
2950 * increased (same number as @count) or %0 if [addr...addr+count)
2953 long vwrite(char *buf, char *addr, unsigned long count)
2962 if ((unsigned long) addr + count < count)
2963 count = -(unsigned long) addr;
2975 vaddr = (char *) vm->addr;
2976 if (addr >= vaddr + get_vm_area_size(vm))
2978 while (addr < vaddr) {
2982 addr++;
2985 n = vaddr + get_vm_area_size(vm) - addr;
2989 aligned_vwrite(buf, addr, n);
2993 addr += n;
3070 * @addr: vmalloc memory
3071 * @pgoff: number of pages into addr before first page to map
3075 * This function checks that addr is a valid vmalloc'ed area, and
3081 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3085 addr, pgoff,
3093 ret = remove_vm_area(area->addr);
3106 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3107 * @addr: target address
3111 * i.e. va->va_start < addr && va->va_end < addr or NULL
3112 * if there are no any areas before @addr.
3115 pvm_find_va_enclose_addr(unsigned long addr)
3125 if (tmp->va_start <= addr) {
3127 if (tmp->va_end >= addr)
3152 unsigned long addr;
3157 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3158 if ((*va)->va_start < addr)
3159 return addr;
3528 v->addr, v->addr + v->size, v->size);