Lines Matching refs:region

95 	 * region. This test is intentionally done in reverse order,
417 * initialise the percpu counter for VM and region record slabs
429 * validate the region tree
430 * - the caller must hold the region lock
435 struct vm_region *region, *last;
447 region = rb_entry(p, struct vm_region, vm_rb);
450 BUG_ON(region->vm_end <= region->vm_start);
451 BUG_ON(region->vm_top < region->vm_end);
452 BUG_ON(region->vm_start < last->vm_top);
464 * add a region into the global tree
466 static void add_nommu_region(struct vm_region *region)
478 if (region->vm_start < pregion->vm_start)
480 else if (region->vm_start > pregion->vm_start)
482 else if (pregion == region)
488 rb_link_node(&region->vm_rb, parent, p);
489 rb_insert_color(&region->vm_rb, &nommu_region_tree);
495 * delete a region from the global tree
497 static void delete_nommu_region(struct vm_region *region)
502 rb_erase(&region->vm_rb, &nommu_region_tree);
520 * release a reference to a region
521 * - the caller must hold the region semaphore for writing, which this releases
522 * - the region may not have been added to the tree yet, in which case vm_top
525 static void __put_nommu_region(struct vm_region *region)
530 if (--region->vm_usage == 0) {
531 if (region->vm_top > region->vm_start)
532 delete_nommu_region(region);
535 if (region->vm_file)
536 fput(region->vm_file);
540 if (region->vm_flags & VM_MAPPED_COPY)
541 free_page_series(region->vm_start, region->vm_top);
542 kmem_cache_free(vm_region_jar, region);
549 * release a reference to a region
551 static void put_nommu_region(struct vm_region *region)
554 __put_nommu_region(region);
976 struct vm_region *region,
1023 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1024 region->vm_start = (unsigned long) base;
1025 region->vm_end = region->vm_start + len;
1026 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1028 vma->vm_start = region->vm_start;
1029 vma->vm_end = region->vm_start + len;
1053 free_page_series(region->vm_start, region->vm_top);
1054 region->vm_start = vma->vm_start = 0;
1055 region->vm_end = vma->vm_end = 0;
1056 region->vm_top = 0;
1079 struct vm_region *region;
1103 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1104 if (!region)
1111 region->vm_usage = 1;
1112 region->vm_flags = vm_flags;
1113 region->vm_pgoff = pgoff;
1119 region->vm_file = get_file(file);
1164 /* new mapping is not a subset of the region */
1170 /* we've found a region we can share */
1191 fput(region->vm_file);
1192 kmem_cache_free(vm_region_jar, region);
1193 region = pregion;
1219 vma->vm_start = region->vm_start = addr;
1220 vma->vm_end = region->vm_end = addr + len;
1225 vma->vm_region = region;
1228 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1233 ret = do_mmap_private(vma, region, len, capabilities);
1236 add_nommu_region(region);
1242 memset((void *)region->vm_start, 0,
1243 region->vm_end - region->vm_start);
1253 /* we flush the region from the icache only when the first executable
1255 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1256 flush_icache_user_range(region->vm_start, region->vm_end);
1257 region->vm_icache_flushed = true;
1267 if (region->vm_file)
1268 fput(region->vm_file);
1269 kmem_cache_free(vm_region_jar, region);
1282 kmem_cache_free(vm_region_jar, region);
1289 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1358 struct vm_region *region;
1362 * only a single usage on the region) */
1369 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1370 if (!region)
1375 kmem_cache_free(vm_region_jar, region);
1380 *region = *vma->vm_region;
1381 new->vm_region = region;
1386 region->vm_top = region->vm_end = new->vm_end = addr;
1388 region->vm_start = new->vm_start = addr;
1389 region->vm_pgoff = new->vm_pgoff += npages;
1421 struct vm_region *region;
1432 /* cut the backing region down to size */
1433 region = vma->vm_region;
1434 BUG_ON(region->vm_usage != 1);
1437 delete_nommu_region(region);
1438 if (from > region->vm_start) {
1439 to = region->vm_top;
1440 region->vm_top = region->vm_end = from;
1442 region->vm_start = to;
1444 add_nommu_region(region);
1563 * as long as it stays within the region allocated by do_mmap_private() and the
1766 struct vm_region *region;
1797 region = vma->vm_region;
1798 r_size = region->vm_top - region->vm_start;
1799 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1802 region->vm_top -= r_top - newsize;
1803 if (region->vm_end > region->vm_top)
1804 region->vm_end = region->vm_top;