Lines Matching refs:va
758 * All vmap_area objects in this tree are sorted by va->va_start
776 va_size(struct vmap_area *va)
778 return (va->va_end - va->va_start);
784 struct vmap_area *va;
786 va = rb_entry_safe(node, struct vmap_area, rb_node);
787 return va ? va->subtree_max_size : 0;
808 struct vmap_area *va = NULL;
818 va = tmp;
827 return va;
837 struct vmap_area *va;
839 va = rb_entry(n, struct vmap_area, rb_node);
840 if (addr < va->va_start)
842 else if (addr >= va->va_end)
845 return va;
860 find_va_links(struct vmap_area *va,
880 * it link, where the new va->rb_node will be attached to.
890 if (va->va_end <= tmp_va->va_start)
892 else if (va->va_start >= tmp_va->va_end)
896 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
925 __link_va(struct vmap_area *va, struct rb_root *root,
940 rb_link_node(&va->rb_node, parent, link);
944 * to the tree. We do not set va->subtree_max_size to
953 rb_insert_augmented(&va->rb_node,
955 va->subtree_max_size = 0;
957 rb_insert_color(&va->rb_node, root);
961 list_add(&va->list, head);
965 link_va(struct vmap_area *va, struct rb_root *root,
969 __link_va(va, root, parent, link, head, false);
973 link_va_augment(struct vmap_area *va, struct rb_root *root,
977 __link_va(va, root, parent, link, head, true);
981 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
983 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
987 rb_erase_augmented(&va->rb_node,
990 rb_erase(&va->rb_node, root);
992 list_del_init(&va->list);
993 RB_CLEAR_NODE(&va->rb_node);
997 unlink_va(struct vmap_area *va, struct rb_root *root)
999 __unlink_va(va, root, false);
1003 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1005 __unlink_va(va, root, true);
1013 compute_subtree_max_size(struct vmap_area *va)
1015 return max3(va_size(va),
1016 get_subtree_max_size(va->rb_node.rb_left),
1017 get_subtree_max_size(va->rb_node.rb_right));
1023 struct vmap_area *va;
1026 list_for_each_entry(va, &free_vmap_area_list, list) {
1027 computed_size = compute_subtree_max_size(va);
1028 if (computed_size != va->subtree_max_size)
1030 va_size(va), va->subtree_max_size);
1063 augment_tree_propagate_from(struct vmap_area *va)
1070 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1078 insert_vmap_area(struct vmap_area *va,
1084 link = find_va_links(va, root, NULL, &parent);
1086 link_va(va, root, parent, link, head);
1090 insert_vmap_area_augment(struct vmap_area *va,
1098 link = find_va_links(va, NULL, from, &parent);
1100 link = find_va_links(va, root, NULL, &parent);
1103 link_va_augment(va, root, parent, link, head);
1104 augment_tree_propagate_from(va);
1120 __merge_or_add_vmap_area(struct vmap_area *va,
1133 link = find_va_links(va, root, NULL, &parent);
1153 if (sibling->va_start == va->va_end) {
1154 sibling->va_start = va->va_start;
1157 kmem_cache_free(vmap_area_cachep, va);
1160 va = sibling;
1174 if (sibling->va_end == va->va_start) {
1183 __unlink_va(va, root, augment);
1185 sibling->va_end = va->va_end;
1188 kmem_cache_free(vmap_area_cachep, va);
1191 va = sibling;
1198 __link_va(va, root, parent, link, head, augment);
1200 return va;
1204 merge_or_add_vmap_area(struct vmap_area *va,
1207 return __merge_or_add_vmap_area(va, root, head, false);
1211 merge_or_add_vmap_area_augment(struct vmap_area *va,
1214 va = __merge_or_add_vmap_area(va, root, head, true);
1215 if (va)
1216 augment_tree_propagate_from(va);
1218 return va;
1222 is_within_this_va(struct vmap_area *va, unsigned long size,
1227 if (va->va_start > vstart)
1228 nva_start_addr = ALIGN(va->va_start, align);
1237 return (nva_start_addr + size <= va->va_end);
1251 struct vmap_area *va;
1262 va = rb_entry(node, struct vmap_area, rb_node);
1265 vstart < va->va_start) {
1268 if (is_within_this_va(va, size, align, vstart))
1269 return va;
1288 va = rb_entry(node, struct vmap_area, rb_node);
1289 if (is_within_this_va(va, size, align, vstart))
1290 return va;
1293 vstart <= va->va_start) {
1300 vstart = va->va_start + 1;
1318 struct vmap_area *va;
1320 list_for_each_entry(va, head, list) {
1321 if (!is_within_this_va(va, size, align, vstart))
1324 return va;
1359 classify_va_fit_type(struct vmap_area *va,
1365 if (nva_start_addr < va->va_start ||
1366 nva_start_addr + size > va->va_end)
1370 if (va->va_start == nva_start_addr) {
1371 if (va->va_end == nva_start_addr + size)
1375 } else if (va->va_end == nva_start_addr + size) {
1386 struct vmap_area *va, unsigned long nva_start_addr,
1390 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1400 unlink_va_augment(va, root);
1401 kmem_cache_free(vmap_area_cachep, va);
1410 va->va_start += size;
1419 va->va_end = nva_start_addr;
1463 lva->va_start = va->va_start;
1469 va->va_start = nva_start_addr + size;
1475 augment_tree_propagate_from(va);
1478 insert_vmap_area_augment(lva, &va->rb_node, root, head);
1495 struct vmap_area *va;
1510 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1511 if (unlikely(!va))
1514 if (va->va_start > vstart)
1515 nva_start_addr = ALIGN(va->va_start, align);
1524 ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
1538 static void free_vmap_area(struct vmap_area *va)
1544 unlink_va(va, &vmap_area_root);
1551 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1558 struct vmap_area *va = NULL;
1570 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1574 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1575 kmem_cache_free(vmap_area_cachep, va);
1588 struct vmap_area *va;
1603 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1604 if (unlikely(!va))
1611 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1628 va->va_start = addr;
1629 va->va_end = addr + size;
1630 va->vm = NULL;
1631 va->flags = va_flags;
1634 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1637 BUG_ON(!IS_ALIGNED(va->va_start, align));
1638 BUG_ON(va->va_start < vstart);
1639 BUG_ON(va->va_end > vend);
1643 free_vmap_area(va);
1647 return va;
1668 kmem_cache_free(vmap_area_cachep, va);
1729 struct vmap_area *va, *n_va;
1753 list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
1754 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1755 unsigned long orig_start = va->va_start;
1756 unsigned long orig_end = va->va_end;
1763 va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1766 if (!va)
1771 va->va_start, va->va_end);
1817 static void free_vmap_area_noflush(struct vmap_area *va)
1820 unsigned long va_start = va->va_start;
1823 if (WARN_ON_ONCE(!list_empty(&va->list)))
1826 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1833 merge_or_add_vmap_area(va,
1839 /* After this point, we may free va at any time */
1847 static void free_unmap_vmap_area(struct vmap_area *va)
1849 flush_cache_vunmap(va->va_start, va->va_end);
1850 vunmap_range_noflush(va->va_start, va->va_end);
1852 flush_tlb_kernel_range(va->va_start, va->va_end);
1854 free_vmap_area_noflush(va);
1859 struct vmap_area *va;
1862 va = __find_vmap_area(addr, &vmap_area_root);
1865 return va;
1870 struct vmap_area *va;
1873 va = __find_vmap_area(addr, &vmap_area_root);
1874 if (va)
1875 unlink_va(va, &vmap_area_root);
1878 return va;
1935 struct vmap_area *va;
2026 struct vmap_area *va;
2039 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2043 if (IS_ERR(va)) {
2045 return ERR_CAST(va);
2048 vaddr = vmap_block_vaddr(va->va_start, 0);
2050 vb->va = va;
2061 xa = addr_to_vb_xa(va->va_start);
2062 vb_idx = addr_to_vb_idx(va->va_start);
2066 free_vmap_area(va);
2083 xa = addr_to_vb_xa(vb->va->va_start);
2084 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2088 unlink_va(vb->va, &vmap_area_root);
2091 free_vmap_area_noflush(vb->va);
2195 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2285 unsigned long va_start = vb->va->va_start;
2342 struct vmap_area *va;
2358 va = find_unlink_vmap_area(addr);
2359 if (WARN_ON_ONCE(!va))
2362 debug_check_no_locks_freed((void *)va->va_start,
2363 (va->va_end - va->va_start));
2364 free_unmap_vmap_area(va);
2394 struct vmap_area *va;
2395 va = alloc_vmap_area(size, PAGE_SIZE,
2398 if (IS_ERR(va))
2401 addr = va->va_start;
2542 struct vmap_area *va, unsigned long flags, const void *caller)
2545 vm->addr = (void *)va->va_start;
2546 vm->size = va->va_end - va->va_start;
2548 va->vm = vm;
2551 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2555 setup_vmalloc_vm_locked(vm, va, flags, caller);
2575 struct vmap_area *va;
2595 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
2596 if (IS_ERR(va)) {
2601 setup_vmalloc_vm(area, va, flags, caller);
2665 struct vmap_area *va;
2667 va = find_vmap_area((unsigned long)addr);
2668 if (!va)
2671 return va->vm;
2686 struct vmap_area *va;
2695 va = find_unlink_vmap_area((unsigned long)addr);
2696 if (!va || !va->vm)
2698 vm = va->vm;
2705 free_unmap_vmap_area(va);
3679 start = vmap_block_vaddr(vb->va->va_start, rs);
3744 struct vmap_area *va;
3758 va = find_vmap_area_exceed_addr((unsigned long)addr);
3759 if (!va)
3763 if ((unsigned long)addr + remains <= va->va_start)
3766 list_for_each_entry_from(va, &vmap_area_list, list) {
3772 vm = va->vm;
3773 flags = va->flags & VMAP_FLAGS_MASK;
3789 vaddr = (char *) va->va_start;
3790 size = vm ? get_vm_area_size(vm) : va_size(va);
3942 * i.e. va->va_start < addr && va->va_end < addr or NULL
3948 struct vmap_area *va, *tmp;
3952 va = NULL;
3957 va = tmp;
3967 return va;
3973 * @va:
3981 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3986 if (likely(*va)) {
3987 list_for_each_entry_from_reverse((*va),
3989 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3990 if ((*va)->va_start < addr)
4028 struct vmap_area **vas, *va;
4081 va = pvm_find_va_enclose_addr(vmalloc_end);
4082 base = pvm_determine_end_from_reverse(&va, align) - end;
4095 if (va == NULL)
4102 if (base + end > va->va_end) {
4103 base = pvm_determine_end_from_reverse(&va, align) - end;
4111 if (base + start < va->va_start) {
4112 va = node_to_va(rb_prev(&va->rb_node));
4113 base = pvm_determine_end_from_reverse(&va, align) - end;
4128 va = pvm_find_va_enclose_addr(base + end);
4131 /* we've found a fitting base, insert all va's */
4138 va = pvm_find_va_enclose_addr(start);
4139 if (WARN_ON_ONCE(va == NULL))
4145 va, start, size);
4151 va = vas[area];
4152 va->va_start = start;
4153 va->va_end = start + size;
4197 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4199 if (va)
4201 va->va_start, va->va_end);
4247 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4249 if (va)
4251 va->va_start, va->va_end);
4284 struct vmap_area *va;
4290 va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
4291 if (!va) {
4296 vm = va->vm;
4361 struct vmap_area *va;
4364 list_for_each_entry(va, &purge_vmap_area_list, list) {
4366 (void *)va->va_start, (void *)va->va_end,
4367 va->va_end - va->va_start);
4374 struct vmap_area *va;
4377 va = list_entry(p, struct vmap_area, list);
4379 if (!va->vm) {
4380 if (va->flags & VMAP_RAM)
4382 (void *)va->va_start, (void *)va->va_end,
4383 va->va_end - va->va_start);
4388 v = va->vm;
4427 if (list_is_last(&va->list, &vmap_area_list))
4456 struct vmap_area *va;
4480 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4481 if (WARN_ON_ONCE(!va))
4484 va->va_start = (unsigned long)tmp->addr;
4485 va->va_end = va->va_start + tmp->size;
4486 va->vm = tmp;
4487 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);