Lines Matching refs:vma
100 struct vm_area_struct *vma;
102 vma = find_vma(current->mm, (unsigned long)objp);
103 if (vma)
104 return vma->vm_end - vma->vm_start;
116 * @vma: memory mapping
124 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
173 struct vm_area_struct *vma;
176 vma = find_vma(current->mm, (unsigned long)ret);
177 if (vma)
178 vma->vm_flags |= VM_USERMAP;
363 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
370 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
377 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
563 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
569 BUG_ON(!vma->vm_region);
572 vma->vm_mm = mm;
575 if (vma->vm_file) {
576 mapping = vma->vm_file->f_mapping;
580 vma_interval_tree_insert(vma, &mapping->i_mmap);
594 if (vma->vm_start < pvma->vm_start)
596 else if (vma->vm_start > pvma->vm_start) {
599 } else if (vma->vm_end < pvma->vm_end)
601 else if (vma->vm_end > pvma->vm_end) {
604 } else if (vma < pvma)
606 else if (vma > pvma) {
613 rb_link_node(&vma->vm_rb, parent, p);
614 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
621 __vma_link_list(mm, vma, prev);
627 static void delete_vma_from_mm(struct vm_area_struct *vma)
631 struct mm_struct *mm = vma->vm_mm;
636 /* if the vma is cached, invalidate the entire cache */
637 if (curr->vmacache.vmas[i] == vma) {
644 if (vma->vm_file) {
645 mapping = vma->vm_file->f_mapping;
649 vma_interval_tree_remove(vma, &mapping->i_mmap);
655 rb_erase(&vma->vm_rb, &mm->mm_rb);
657 __vma_unlink_list(mm, vma);
663 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
665 if (vma->vm_ops && vma->vm_ops->close)
666 vma->vm_ops->close(vma);
667 if (vma->vm_file)
668 fput(vma->vm_file);
669 put_nommu_region(vma->vm_region);
670 vm_area_free(vma);
679 struct vm_area_struct *vma;
682 vma = vmacache_find(mm, addr);
683 if (likely(vma))
684 return vma;
688 for (vma = mm->mmap; vma; vma = vma->vm_next) {
689 if (vma->vm_start > addr)
691 if (vma->vm_end > addr) {
692 vmacache_update(addr, vma);
693 return vma;
714 int expand_stack(struct vm_area_struct *vma, unsigned long address)
727 struct vm_area_struct *vma;
731 vma = vmacache_find_exact(mm, addr, end);
732 if (vma)
733 return vma;
737 for (vma = mm->mmap; vma; vma = vma->vm_next) {
738 if (vma->vm_start < addr)
740 if (vma->vm_start > addr)
742 if (vma->vm_end == end) {
743 vmacache_update(addr, vma);
744 return vma;
954 static int do_mmap_shared_file(struct vm_area_struct *vma)
958 ret = call_mmap(vma->vm_file, vma);
960 vma->vm_region->vm_top = vma->vm_region->vm_end;
975 static int do_mmap_private(struct vm_area_struct *vma,
989 ret = call_mmap(vma->vm_file, vma);
992 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
993 vma->vm_region->vm_top = vma->vm_region->vm_end;
1023 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1028 vma->vm_start = region->vm_start;
1029 vma->vm_end = region->vm_start + len;
1031 if (vma->vm_file) {
1035 fpos = vma->vm_pgoff;
1038 ret = kernel_read(vma->vm_file, base, len, &fpos);
1047 vma_set_anonymous(vma);
1054 region->vm_start = vma->vm_start = 0;
1055 region->vm_end = vma->vm_end = 0;
1078 struct vm_area_struct *vma;
1107 vma = vm_area_alloc(current->mm);
1108 if (!vma)
1115 vma->vm_flags = vm_flags;
1116 vma->vm_pgoff = pgoff;
1120 vma->vm_file = get_file(file);
1172 vma->vm_region = pregion;
1175 vma->vm_start = start;
1176 vma->vm_end = start + len;
1179 vma->vm_flags |= VM_MAPPED_COPY;
1181 ret = do_mmap_shared_file(vma);
1183 vma->vm_region = NULL;
1184 vma->vm_start = 0;
1185 vma->vm_end = 0;
1219 vma->vm_start = region->vm_start = addr;
1220 vma->vm_end = region->vm_end = addr + len;
1225 vma->vm_region = region;
1230 if (file && vma->vm_flags & VM_SHARED)
1231 ret = do_mmap_shared_file(vma);
1233 ret = do_mmap_private(vma, region, len, capabilities);
1239 if (!vma->vm_file &&
1246 result = vma->vm_start;
1251 add_vma_to_mm(current->mm, vma);
1255 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1270 if (vma->vm_file)
1271 fput(vma->vm_file);
1272 vm_area_free(vma);
1283 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1351 * split a vma into two pieces at address 'addr', a new vma is allocated either
1354 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1363 if (vma->vm_file)
1373 new = vm_area_dup(vma);
1380 *region = *vma->vm_region;
1383 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1395 delete_vma_from_mm(vma);
1397 delete_nommu_region(vma->vm_region);
1399 vma->vm_region->vm_start = vma->vm_start = addr;
1400 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1402 vma->vm_region->vm_end = vma->vm_end = addr;
1403 vma->vm_region->vm_top = addr;
1405 add_nommu_region(vma->vm_region);
1408 add_vma_to_mm(mm, vma);
1418 struct vm_area_struct *vma,
1425 delete_vma_from_mm(vma);
1426 if (from > vma->vm_start)
1427 vma->vm_end = from;
1429 vma->vm_start = to;
1430 add_vma_to_mm(mm, vma);
1433 region = vma->vm_region;
1458 struct vm_area_struct *vma;
1469 vma = find_vma(mm, start);
1470 if (!vma) {
1482 if (vma->vm_file) {
1484 if (start > vma->vm_start)
1486 if (end == vma->vm_end)
1488 vma = vma->vm_next;
1489 } while (vma);
1493 if (start == vma->vm_start && end == vma->vm_end)
1495 if (start < vma->vm_start || end > vma->vm_end)
1499 if (end != vma->vm_end && offset_in_page(end))
1501 if (start != vma->vm_start && end != vma->vm_end) {
1502 ret = split_vma(mm, vma, start, 1);
1506 return shrink_vma(mm, vma, start, end);
1510 delete_vma_from_mm(vma);
1511 delete_vma(mm, vma);
1538 struct vm_area_struct *vma;
1545 while ((vma = mm->mmap)) {
1546 mm->mmap = vma->vm_next;
1547 delete_vma_from_mm(vma);
1548 delete_vma(mm, vma);
1572 struct vm_area_struct *vma;
1586 vma = find_vma_exact(current->mm, addr, old_len);
1587 if (!vma)
1590 if (vma->vm_end != vma->vm_start + old_len)
1593 if (vma->vm_flags & VM_MAYSHARE)
1596 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1600 vma->vm_end = vma->vm_start + new_len;
1601 return vma->vm_start;
1616 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1622 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1628 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1633 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1636 unsigned long vm_len = vma->vm_end - vma->vm_start;
1638 pfn += vma->vm_pgoff;
1639 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1643 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1646 unsigned int size = vma->vm_end - vma->vm_start;
1648 if (!(vma->vm_flags & VM_USERMAP))
1651 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1652 vma->vm_end = vma->vm_start + size;
1681 struct vm_area_struct *vma;
1688 vma = find_vma(mm, addr);
1689 if (vma) {
1691 if (addr + len >= vma->vm_end)
1692 len = vma->vm_end - addr;
1695 if (write && vma->vm_flags & VM_MAYWRITE)
1696 copy_to_user_page(vma, NULL, addr,
1698 else if (!write && vma->vm_flags & VM_MAYREAD)
1699 copy_from_user_page(vma, NULL, addr,
1765 struct vm_area_struct *vma;
1777 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1780 if (vma->vm_flags & VM_SHARED) {
1793 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1794 if (!(vma->vm_flags & VM_SHARED))
1797 region = vma->vm_region;