Lines Matching refs:vma
99 struct vm_area_struct *vma;
101 vma = find_vma(current->mm, (unsigned long)objp);
102 if (vma)
103 return vma->vm_end - vma->vm_start;
115 * @vma: memory mapping
123 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
126 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
172 struct vm_area_struct *vma;
175 vma = find_vma(current->mm, (unsigned long)ret);
176 if (vma)
177 vm_flags_set(vma, VM_USERMAP);
353 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
360 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
367 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
547 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
549 vma->vm_mm = mm;
552 if (vma->vm_file) {
553 struct address_space *mapping = vma->vm_file->f_mapping;
557 vma_interval_tree_insert(vma, &mapping->i_mmap);
563 static void cleanup_vma_from_mm(struct vm_area_struct *vma)
565 vma->vm_mm->map_count--;
567 if (vma->vm_file) {
569 mapping = vma->vm_file->f_mapping;
573 vma_interval_tree_remove(vma, &mapping->i_mmap);
582 static int delete_vma_from_mm(struct vm_area_struct *vma)
584 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
586 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
587 if (vma_iter_prealloc(&vmi, vma)) {
588 pr_warn("Allocation of vma tree for process %d failed\n",
592 cleanup_vma_from_mm(vma);
601 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
603 if (vma->vm_ops && vma->vm_ops->close)
604 vma->vm_ops->close(vma);
605 if (vma->vm_file)
606 fput(vma->vm_file);
607 put_nommu_region(vma->vm_region);
608 vm_area_free(vma);
641 struct vm_area_struct *vma;
644 vma = vma_lookup(mm, addr);
645 if (!vma)
647 return vma;
654 int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
673 struct vm_area_struct *vma;
677 vma = vma_iter_load(&vmi);
678 if (!vma)
680 if (vma->vm_start != addr)
682 if (vma->vm_end != end)
685 return vma;
895 static int do_mmap_shared_file(struct vm_area_struct *vma)
899 ret = call_mmap(vma->vm_file, vma);
901 vma->vm_region->vm_top = vma->vm_region->vm_end;
916 static int do_mmap_private(struct vm_area_struct *vma,
932 ret = call_mmap(vma->vm_file, vma);
934 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
937 vma->vm_region->vm_top = vma->vm_region->vm_end;
967 vm_flags_set(vma, VM_MAPPED_COPY);
968 region->vm_flags = vma->vm_flags;
973 vma->vm_start = region->vm_start;
974 vma->vm_end = region->vm_start + len;
976 if (vma->vm_file) {
980 fpos = vma->vm_pgoff;
983 ret = kernel_read(vma->vm_file, base, len, &fpos);
992 vma_set_anonymous(vma);
999 region->vm_start = vma->vm_start = 0;
1000 region->vm_end = vma->vm_end = 0;
1024 struct vm_area_struct *vma;
1054 vma = vm_area_alloc(current->mm);
1055 if (!vma)
1062 vm_flags_init(vma, vm_flags);
1063 vma->vm_pgoff = pgoff;
1067 vma->vm_file = get_file(file);
1119 vma->vm_region = pregion;
1122 vma->vm_start = start;
1123 vma->vm_end = start + len;
1126 vm_flags_set(vma, VM_MAPPED_COPY);
1128 ret = do_mmap_shared_file(vma);
1130 vma->vm_region = NULL;
1131 vma->vm_start = 0;
1132 vma->vm_end = 0;
1166 vma->vm_start = region->vm_start = addr;
1167 vma->vm_end = region->vm_end = addr + len;
1172 vma->vm_region = region;
1177 if (file && vma->vm_flags & VM_SHARED)
1178 ret = do_mmap_shared_file(vma);
1180 ret = do_mmap_private(vma, region, len, capabilities);
1186 if (!vma->vm_file &&
1193 result = vma->vm_start;
1198 BUG_ON(!vma->vm_region);
1199 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1200 if (vma_iter_prealloc(&vmi, vma))
1203 setup_vma_to_mm(vma, current->mm);
1206 vma_iter_store(&vmi, vma);
1210 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1226 if (vma->vm_file)
1227 fput(vma->vm_file);
1228 vm_area_free(vma);
1239 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1305 * split a vma into two pieces at address 'addr', a new vma is allocated either
1308 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1318 if (vma->vm_file)
1321 mm = vma->vm_mm;
1329 new = vm_area_dup(vma);
1334 *region = *vma->vm_region;
1337 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1347 if (vma_iter_prealloc(vmi, vma)) {
1348 pr_warn("Allocation of vma tree for process %d failed\n",
1357 delete_nommu_region(vma->vm_region);
1359 vma->vm_region->vm_start = vma->vm_start = addr;
1360 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1362 vma->vm_region->vm_end = vma->vm_end = addr;
1363 vma->vm_region->vm_top = addr;
1365 add_nommu_region(vma->vm_region);
1369 setup_vma_to_mm(vma, mm);
1387 struct vm_area_struct *vma,
1394 if (from > vma->vm_start) {
1395 if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1397 vma->vm_end = from;
1399 if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1401 vma->vm_start = to;
1405 region = vma->vm_region;
1431 struct vm_area_struct *vma;
1442 vma = vma_find(&vmi, end);
1443 if (!vma) {
1455 if (vma->vm_file) {
1457 if (start > vma->vm_start)
1459 if (end == vma->vm_end)
1461 vma = vma_find(&vmi, end);
1462 } while (vma);
1466 if (start == vma->vm_start && end == vma->vm_end)
1468 if (start < vma->vm_start || end > vma->vm_end)
1472 if (end != vma->vm_end && offset_in_page(end))
1474 if (start != vma->vm_start && end != vma->vm_end) {
1475 ret = split_vma(&vmi, vma, start, 1);
1479 return vmi_shrink_vma(&vmi, vma, start, end);
1483 if (delete_vma_from_mm(vma))
1486 delete_vma(mm, vma);
1513 struct vm_area_struct *vma;
1525 for_each_vma(vmi, vma) {
1526 cleanup_vma_from_mm(vma);
1527 delete_vma(mm, vma);
1553 struct vm_area_struct *vma;
1567 vma = find_vma_exact(current->mm, addr, old_len);
1568 if (!vma)
1571 if (vma->vm_end != vma->vm_start + old_len)
1574 if (is_nommu_shared_mapping(vma->vm_flags))
1577 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1581 vma->vm_end = vma->vm_start + new_len;
1582 return vma->vm_start;
1597 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1603 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1609 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1614 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1617 unsigned long vm_len = vma->vm_end - vma->vm_start;
1619 pfn += vma->vm_pgoff;
1620 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1624 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1627 unsigned int size = vma->vm_end - vma->vm_start;
1629 if (!(vma->vm_flags & VM_USERMAP))
1632 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1633 vma->vm_end = vma->vm_start + size;
1657 struct vm_area_struct *vma;
1664 vma = find_vma(mm, addr);
1665 if (vma) {
1667 if (addr + len >= vma->vm_end)
1668 len = vma->vm_end - addr;
1671 if (write && vma->vm_flags & VM_MAYWRITE)
1672 copy_to_user_page(vma, NULL, addr,
1674 else if (!write && vma->vm_flags & VM_MAYREAD)
1675 copy_from_user_page(vma, NULL, addr,
1741 struct vm_area_struct *vma;
1753 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1756 if (vma->vm_flags & VM_SHARED) {
1769 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1770 if (!(vma->vm_flags & VM_SHARED))
1773 region = vma->vm_region;