Lines Matching refs:region
94 * region. This test is intentionally done in reverse order,
407 * initialise the percpu counter for VM and region record slabs
419 * validate the region tree
420 * - the caller must hold the region lock
425 struct vm_region *region, *last;
437 region = rb_entry(p, struct vm_region, vm_rb);
440 BUG_ON(region->vm_end <= region->vm_start);
441 BUG_ON(region->vm_top < region->vm_end);
442 BUG_ON(region->vm_start < last->vm_top);
454 * add a region into the global tree
456 static void add_nommu_region(struct vm_region *region)
468 if (region->vm_start < pregion->vm_start)
470 else if (region->vm_start > pregion->vm_start)
472 else if (pregion == region)
478 rb_link_node(®ion->vm_rb, parent, p);
479 rb_insert_color(®ion->vm_rb, &nommu_region_tree);
485 * delete a region from the global tree
487 static void delete_nommu_region(struct vm_region *region)
492 rb_erase(®ion->vm_rb, &nommu_region_tree);
510 * release a reference to a region
511 * - the caller must hold the region semaphore for writing, which this releases
512 * - the region may not have been added to the tree yet, in which case vm_top
515 static void __put_nommu_region(struct vm_region *region)
520 if (--region->vm_usage == 0) {
521 if (region->vm_top > region->vm_start)
522 delete_nommu_region(region);
525 if (region->vm_file)
526 fput(region->vm_file);
530 if (region->vm_flags & VM_MAPPED_COPY)
531 free_page_series(region->vm_start, region->vm_top);
532 kmem_cache_free(vm_region_jar, region);
539 * release a reference to a region
541 static void put_nommu_region(struct vm_region *region)
544 __put_nommu_region(region);
917 struct vm_region *region,
968 region->vm_flags = vma->vm_flags;
969 region->vm_start = (unsigned long) base;
970 region->vm_end = region->vm_start + len;
971 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
973 vma->vm_start = region->vm_start;
974 vma->vm_end = region->vm_start + len;
998 free_page_series(region->vm_start, region->vm_top);
999 region->vm_start = vma->vm_start = 0;
1000 region->vm_end = vma->vm_end = 0;
1001 region->vm_top = 0;
1025 struct vm_region *region;
1050 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1051 if (!region)
1058 region->vm_usage = 1;
1059 region->vm_flags = vm_flags;
1060 region->vm_pgoff = pgoff;
1066 region->vm_file = get_file(file);
1111 /* new mapping is not a subset of the region */
1117 /* we've found a region we can share */
1138 fput(region->vm_file);
1139 kmem_cache_free(vm_region_jar, region);
1140 region = pregion;
1166 vma->vm_start = region->vm_start = addr;
1167 vma->vm_end = region->vm_end = addr + len;
1172 vma->vm_region = region;
1175 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1180 ret = do_mmap_private(vma, region, len, capabilities);
1183 add_nommu_region(region);
1189 memset((void *)region->vm_start, 0,
1190 region->vm_end - region->vm_start);
1208 /* we flush the region from the icache only when the first executable
1210 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1211 flush_icache_user_range(region->vm_start, region->vm_end);
1212 region->vm_icache_flushed = true;
1223 if (region->vm_file)
1224 fput(region->vm_file);
1225 kmem_cache_free(vm_region_jar, region);
1238 kmem_cache_free(vm_region_jar, region);
1245 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1312 struct vm_region *region;
1317 * only a single usage on the region) */
1325 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1326 if (!region)
1334 *region = *vma->vm_region;
1335 new->vm_region = region;
1340 region->vm_top = region->vm_end = new->vm_end = addr;
1342 region->vm_start = new->vm_start = addr;
1343 region->vm_pgoff = new->vm_pgoff += npages;
1378 kmem_cache_free(vm_region_jar, region);
1390 struct vm_region *region;
1404 /* cut the backing region down to size */
1405 region = vma->vm_region;
1406 BUG_ON(region->vm_usage != 1);
1409 delete_nommu_region(region);
1410 if (from > region->vm_start) {
1411 to = region->vm_top;
1412 region->vm_top = region->vm_end = from;
1414 region->vm_start = to;
1416 add_nommu_region(region);
1544 * as long as it stays within the region allocated by do_mmap_private() and the
1742 struct vm_region *region;
1773 region = vma->vm_region;
1774 r_size = region->vm_top - region->vm_start;
1775 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1778 region->vm_top -= r_top - newsize;
1779 if (region->vm_end > region->vm_top)
1780 region->vm_end = region->vm_top;