Lines Matching refs:area
157 * unmap_kernel_range_noflush - unmap kernel VM area
158 * @start: start of the VM area to unmap
159 * @size: size of the VM area to unmap
161 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
275 * map_kernel_range_noflush - map kernel VM area with the specified pages
276 * @addr: start of the VM area to map
277 * @size: size of the VM area to map
281 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
442 * find a lowest match of free area.
734 * free area is inserted. If VA has been merged, it is
782 /* Point to the new merged area. */
813 /* Point to the new merged area. */
1083 * Returns a start address of the newly allocated area, if success.
1182 * when fit type of free area is NE_FIT_TYPE. Please note, it
1360 * Finally insert or merge lazily-freed area. It is
1407 * Free a vmap area, caller ensuring that the area has been unmapped
1430 * Free and unmap a vmap area
1877 * vm_area_add_early - add vmap area early during boot
1880 * This function is used to add fixed kernel vm area to vmlist before
1903 * vm_area_register_early - register vmap area early during boot
1907 * This function is used to register kernel vm area before
2011 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2012 * @addr: start of the VM area to unmap
2013 * @size: size of the VM area to unmap
2061 struct vm_struct *area;
2073 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2074 if (unlikely(!area))
2082 kfree(area);
2088 setup_vmalloc_vm(area, va, flags, caller);
2090 return area;
2102 * get_vm_area - reserve a contiguous kernel virtual area
2103 * @size: size of the area
2106 * Search an area of @size in the kernel virtual mapping area,
2107 * and reserved it for out purposes. Returns the area descriptor
2110 * Return: the area descriptor on success or %NULL on failure.
2127 * find_vm_area - find a continuous kernel virtual area
2130 * Search for the kernel VM area starting at @addr, and return it.
2134 * Return: the area descriptor on success or %NULL on failure.
2148 * remove_vm_area - find and remove a continuous kernel virtual area
2151 * Search for the kernel VM area starting at @addr, and remove it.
2152 * This function returns the found VM area, but using it is NOT safe
2155 * Return: the area descriptor on success or %NULL on failure.
2181 static inline void set_area_direct_map(const struct vm_struct *area,
2186 for (i = 0; i < area->nr_pages; i++)
2187 if (page_address(area->pages[i]))
2188 set_direct_map(area->pages[i]);
2192 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2195 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2199 remove_vm_area(area->addr);
2206 * If not deallocating pages, just do the flush of the VM area and
2219 for (i = 0; i < area->nr_pages; i++) {
2220 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2233 set_area_direct_map(area, set_direct_map_invalid_noflush);
2235 set_area_direct_map(area, set_direct_map_default_noflush);
2240 struct vm_struct *area;
2249 area = find_vm_area(addr);
2250 if (unlikely(!area)) {
2251 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2256 debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2257 debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2259 kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2261 vm_remove_mappings(area, deallocate_pages);
2266 for (i = 0; i < area->nr_pages; i++) {
2267 struct page *page = area->pages[i];
2272 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2274 kvfree(area->pages);
2277 kfree(area);
2325 * Free the virtually continuous memory area starting at @addr, as obtained
2357 * Free the virtually contiguous memory area starting at @addr,
2384 * Return: the address of the area or %NULL on failure
2389 struct vm_struct *area;
2398 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2399 if (!area)
2402 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
2404 vunmap(area->addr);
2409 area->pages = pages;
2410 area->nr_pages = count;
2412 return area->addr;
2445 struct vm_struct *area;
2447 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2449 if (!area)
2451 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2453 free_vm_area(area);
2457 flush_cache_vmap((unsigned long)area->addr,
2458 (unsigned long)area->addr + count * PAGE_SIZE);
2460 return area->addr;
2465 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2469 unsigned int nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2482 area->caller);
2488 remove_vm_area(area->addr);
2489 kfree(area);
2493 area->pages = pages;
2494 area->nr_pages = nr_pages;
2496 for (i = 0; i < area->nr_pages; i++) {
2506 area->nr_pages = i;
2507 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2510 area->pages[i] = page;
2514 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2516 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
2520 return area->addr;
2525 (area->nr_pages*PAGE_SIZE), area->size);
2526 __vfree(area->addr);
2534 * @start: vm area range start
2535 * @end: vm area range end
2538 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2546 * Return: the address of the area or %NULL on failure
2553 struct vm_struct *area;
2561 area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
2563 if (!area)
2566 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2575 clear_vm_uninitialized_flag(area);
2577 kmemleak_vmalloc(area, size, gfp_mask);
2671 * The resulting memory area is zeroed so it can be mapped to userspace
2755 * The resulting memory area is 32bit addressable and zeroed so it can be
2788 * To do safe access to this _mapped_ area, we need
2827 * To do safe access to this _mapped_ area, we need
2851 * vread() - read vmalloc area in a safe way.
2856 * This function checks that addr is a valid vmalloc'ed area, and
2857 * copy data from that area to a given buffer. If the given memory range
2859 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2860 * IOREMAP area is treated as memory hole and no copy is done.
2863 * vm_struct area, returns 0. @buf should be kernel's buffer.
2866 * should know vmalloc() area is valid and can use memcpy().
2867 * This is for routines which have to access vmalloc area without
2872 * include any intersection with valid vmalloc area
2911 else /* IOREMAP area is treated as memory hole */
2930 * vwrite() - write vmalloc area in a safe way.
2935 * This function checks that addr is a valid vmalloc'ed area, and
2938 * proper area of @buf. If there are memory holes, no copy to hole.
2939 * IOREMAP area is treated as memory hole and no copy is done.
2942 * vm_struct area, returns 0. @buf should be kernel's buffer.
2945 * should know vmalloc() area is valid and can use memcpy().
2946 * This is for routines which have to access vmalloc area without
2951 * doesn't include any intersection with valid vmalloc area
3009 * @size: size of map area
3013 * This function checks that @kaddr is a valid vmalloc'ed area,
3024 struct vm_struct *area;
3036 area = find_vm_area(kaddr);
3037 if (!area)
3040 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3044 end_index > get_vm_area_size(area))
3075 * This function checks that addr is a valid vmalloc'ed area, and
3090 void free_vm_area(struct vm_struct *area)
3093 ret = remove_vm_area(area->addr);
3094 BUG_ON(ret != area);
3095 kfree(area);
3109 * Returns: vmap_area if it is found. If there is no such area
3168 * @offsets: array containing offset of each area
3169 * @sizes: array containing size of each area
3186 * base address is pulled down to fit the area. Scanning is repeated till
3198 int area, area2, last_area, term_area;
3205 for (last_area = 0, area = 0; area < nr_vms; area++) {
3206 start = offsets[area];
3207 end = start + sizes[area];
3210 BUG_ON(!IS_ALIGNED(offsets[area], align));
3211 BUG_ON(!IS_ALIGNED(sizes[area], align));
3213 /* detect the area with the highest address */
3215 last_area = area;
3217 for (area2 = area + 1; area2 < nr_vms; area2++) {
3236 for (area = 0; area < nr_vms; area++) {
3237 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3238 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3239 if (!vas[area] || !vms[area])
3245 /* start scanning - we scan from the top, begin with the last area */
3246 area = term_area = last_area;
3247 start = offsets[area];
3248 end = start + sizes[area];
3273 term_area = area;
3283 term_area = area;
3288 * This area fits, move on to the previous one. If
3291 area = (area + nr_vms - 1) % nr_vms;
3292 if (area == term_area)
3295 start = offsets[area];
3296 end = start + sizes[area];
3301 for (area = 0; area < nr_vms; area++) {
3304 start = base + offsets[area];
3305 size = sizes[area];
3321 /* Allocated area. */
3322 va = vas[area];
3330 for (area = 0; area < nr_vms; area++) {
3331 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3334 kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3335 sizes[area]);
3340 for (area = 0; area < nr_vms; area++) {
3341 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3343 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3358 while (area--) {
3359 orig_start = vas[area]->va_start;
3360 orig_end = vas[area]->va_end;
3361 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3366 vas[area] = NULL;
3376 for (area = 0; area < nr_vms; area++) {
3377 if (vas[area])
3380 vas[area] = kmem_cache_zalloc(
3382 if (!vas[area])
3390 for (area = 0; area < nr_vms; area++) {
3391 if (vas[area])
3392 kmem_cache_free(vmap_area_cachep, vas[area]);
3394 kfree(vms[area]);
3408 for (area = 0; area < nr_vms; area++) {
3409 orig_start = vas[area]->va_start;
3410 orig_end = vas[area]->va_end;
3411 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3416 vas[area] = NULL;
3417 kfree(vms[area]);
3515 * of vmap area is being tear down or vm_map_ram allocation.