Lines Matching refs:va
436 * All vmap_area objects in this tree are sorted by va->va_start
454 va_size(struct vmap_area *va)
456 return (va->va_end - va->va_start);
462 struct vmap_area *va;
464 va = rb_entry_safe(node, struct vmap_area, rb_node);
465 return va ? va->subtree_max_size : 0;
472 compute_subtree_max_size(struct vmap_area *va)
474 return max3(va_size(va),
475 get_subtree_max_size(va->rb_node.rb_left),
476 get_subtree_max_size(va->rb_node.rb_right));
498 struct vmap_area *va;
500 va = rb_entry(n, struct vmap_area, rb_node);
501 if (addr < va->va_start)
503 else if (addr >= va->va_end)
506 return va;
521 find_va_links(struct vmap_area *va,
541 * it link, where the new va->rb_node will be attached to.
551 if (va->va_start < tmp_va->va_end &&
552 va->va_end <= tmp_va->va_start)
554 else if (va->va_end > tmp_va->va_start &&
555 va->va_start >= tmp_va->va_end)
559 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
588 link_va(struct vmap_area *va, struct rb_root *root,
602 rb_link_node(&va->rb_node, parent, link);
606 * to the tree. We do not set va->subtree_max_size to
615 rb_insert_augmented(&va->rb_node,
617 va->subtree_max_size = 0;
619 rb_insert_color(&va->rb_node, root);
623 list_add(&va->list, head);
627 unlink_va(struct vmap_area *va, struct rb_root *root)
629 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
633 rb_erase_augmented(&va->rb_node,
636 rb_erase(&va->rb_node, root);
638 list_del(&va->list);
639 RB_CLEAR_NODE(&va->rb_node);
646 struct vmap_area *va;
649 list_for_each_entry(va, &free_vmap_area_list, list) {
650 computed_size = compute_subtree_max_size(va);
651 if (computed_size != va->subtree_max_size)
653 va_size(va), va->subtree_max_size);
686 augment_tree_propagate_from(struct vmap_area *va)
693 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
701 insert_vmap_area(struct vmap_area *va,
707 link = find_va_links(va, root, NULL, &parent);
709 link_va(va, root, parent, link, head);
713 insert_vmap_area_augment(struct vmap_area *va,
721 link = find_va_links(va, NULL, from, &parent);
723 link = find_va_links(va, root, NULL, &parent);
726 link_va(va, root, parent, link, head);
727 augment_tree_propagate_from(va);
743 merge_or_add_vmap_area(struct vmap_area *va,
756 link = find_va_links(va, root, NULL, &parent);
776 if (sibling->va_start == va->va_end) {
777 sibling->va_start = va->va_start;
780 kmem_cache_free(vmap_area_cachep, va);
783 va = sibling;
797 if (sibling->va_end == va->va_start) {
806 unlink_va(va, root);
808 sibling->va_end = va->va_end;
811 kmem_cache_free(vmap_area_cachep, va);
814 va = sibling;
821 link_va(va, root, parent, link, head);
826 augment_tree_propagate_from(va);
827 return va;
831 is_within_this_va(struct vmap_area *va, unsigned long size,
836 if (va->va_start > vstart)
837 nva_start_addr = ALIGN(va->va_start, align);
846 return (nva_start_addr + size <= va->va_end);
858 struct vmap_area *va;
869 va = rb_entry(node, struct vmap_area, rb_node);
872 vstart < va->va_start) {
875 if (is_within_this_va(va, size, align, vstart))
876 return va;
894 va = rb_entry(node, struct vmap_area, rb_node);
895 if (is_within_this_va(va, size, align, vstart))
896 return va;
899 vstart <= va->va_start) {
917 struct vmap_area *va;
919 list_for_each_entry(va, &free_vmap_area_list, list) {
920 if (!is_within_this_va(va, size, align, vstart))
923 return va;
957 classify_va_fit_type(struct vmap_area *va,
963 if (nva_start_addr < va->va_start ||
964 nva_start_addr + size > va->va_end)
968 if (va->va_start == nva_start_addr) {
969 if (va->va_end == nva_start_addr + size)
973 } else if (va->va_end == nva_start_addr + size) {
983 adjust_va_to_fit_type(struct vmap_area *va,
997 unlink_va(va, &free_vmap_area_root);
998 kmem_cache_free(vmap_area_cachep, va);
1007 va->va_start += size;
1016 va->va_end = nva_start_addr;
1060 lva->va_start = va->va_start;
1066 va->va_start = nva_start_addr + size;
1072 augment_tree_propagate_from(va);
1075 insert_vmap_area_augment(lva, &va->rb_node,
1091 struct vmap_area *va;
1095 va = find_vmap_lowest_match(size, align, vstart);
1096 if (unlikely(!va))
1099 if (va->va_start > vstart)
1100 nva_start_addr = ALIGN(va->va_start, align);
1109 type = classify_va_fit_type(va, nva_start_addr, size);
1114 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1128 static void free_vmap_area(struct vmap_area *va)
1134 unlink_va(va, &vmap_area_root);
1141 merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1154 struct vmap_area *va, *pva;
1169 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1170 if (unlikely(!va))
1177 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1220 va->va_start = addr;
1221 va->va_end = addr + size;
1222 va->vm = NULL;
1226 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1229 BUG_ON(!IS_ALIGNED(va->va_start, align));
1230 BUG_ON(va->va_start < vstart);
1231 BUG_ON(va->va_end > vend);
1235 free_vmap_area(va);
1239 return va;
1261 kmem_cache_free(vmap_area_cachep, va);
1330 struct vmap_area *va;
1343 llist_for_each_entry(va, valist, purge_list) {
1344 if (va->va_start < start)
1345 start = va->va_start;
1346 if (va->va_end > end)
1347 end = va->va_end;
1354 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1355 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1356 unsigned long orig_start = va->va_start;
1357 unsigned long orig_end = va->va_end;
1364 va = merge_or_add_vmap_area(va, &free_vmap_area_root,
1367 if (!va)
1372 va->va_start, va->va_end);
1411 static void free_vmap_area_noflush(struct vmap_area *va)
1416 unlink_va(va, &vmap_area_root);
1419 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1422 /* After this point, we may free va at any time */
1423 llist_add(&va->purge_list, &vmap_purge_list);
1432 static void free_unmap_vmap_area(struct vmap_area *va)
1434 flush_cache_vunmap(va->va_start, va->va_end);
1435 unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
1437 flush_tlb_kernel_range(va->va_start, va->va_end);
1439 free_vmap_area_noflush(va);
1444 struct vmap_area *va;
1447 va = __find_vmap_area(addr);
1450 return va;
1490 struct vmap_area *va;
1543 struct vmap_area *va;
1555 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1558 if (IS_ERR(va)) {
1560 return ERR_CAST(va);
1563 vaddr = vmap_block_vaddr(va->va_start, 0);
1565 vb->va = va;
1574 vb_idx = addr_to_vb_idx(va->va_start);
1578 free_vmap_area(va);
1595 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1598 free_vmap_area_noflush(vb->va);
1676 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1750 unsigned long va_start = vb->va->va_start;
1804 struct vmap_area *va;
1820 va = find_vmap_area(addr);
1821 BUG_ON(!va);
1822 debug_check_no_locks_freed((void *)va->va_start,
1823 (va->va_end - va->va_start));
1824 free_unmap_vmap_area(va);
1854 struct vmap_area *va;
1855 va = alloc_vmap_area(size, PAGE_SIZE,
1857 if (IS_ERR(va))
1860 addr = va->va_start;
1970 struct vmap_area *va;
1993 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1994 if (WARN_ON_ONCE(!va))
1997 va->va_start = (unsigned long)tmp->addr;
1998 va->va_end = va->va_start + tmp->size;
1999 va->vm = tmp;
2000 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2028 struct vmap_area *va, unsigned long flags, const void *caller)
2031 vm->addr = (void *)va->va_start;
2032 vm->size = va->va_end - va->va_start;
2034 va->vm = vm;
2037 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2041 setup_vmalloc_vm_locked(vm, va, flags, caller);
2060 struct vmap_area *va;
2080 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2081 if (IS_ERR(va)) {
2086 kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2088 setup_vmalloc_vm(area, va, flags, caller);
2138 struct vmap_area *va;
2140 va = find_vmap_area((unsigned long)addr);
2141 if (!va)
2144 return va->vm;
2159 struct vmap_area *va;
2164 va = __find_vmap_area((unsigned long)addr);
2165 if (va && va->vm) {
2166 struct vm_struct *vm = va->vm;
2168 va->vm = NULL;
2172 free_unmap_vmap_area(va);
2876 struct vmap_area *va;
2887 list_for_each_entry(va, &vmap_area_list, list) {
2891 if (!va->vm)
2894 vm = va->vm;
2955 struct vmap_area *va;
2967 list_for_each_entry(va, &vmap_area_list, list) {
2971 if (!va->vm)
2974 vm = va->vm;
3111 * i.e. va->va_start < addr && va->va_end < addr or NULL
3117 struct vmap_area *va, *tmp;
3121 va = NULL;
3126 va = tmp;
3136 return va;
3142 * @va:
3149 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3154 if (likely(*va)) {
3155 list_for_each_entry_from_reverse((*va),
3157 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3158 if ((*va)->va_start < addr)
3196 struct vmap_area **vas, *va;
3250 va = pvm_find_va_enclose_addr(vmalloc_end);
3251 base = pvm_determine_end_from_reverse(&va, align) - end;
3264 if (va == NULL)
3271 if (base + end > va->va_end) {
3272 base = pvm_determine_end_from_reverse(&va, align) - end;
3280 if (base + start < va->va_start) {
3281 va = node_to_va(rb_prev(&va->rb_node));
3282 base = pvm_determine_end_from_reverse(&va, align) - end;
3297 va = pvm_find_va_enclose_addr(base + end);
3300 /* we've found a fitting base, insert all va's */
3307 va = pvm_find_va_enclose_addr(start);
3308 if (WARN_ON_ONCE(va == NULL))
3312 type = classify_va_fit_type(va, start, size);
3317 ret = adjust_va_to_fit_type(va, start, size, type);
3322 va = vas[area];
3323 va->va_start = start;
3324 va->va_end = start + size;
3361 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3363 if (va)
3365 va->va_start, va->va_end);
3411 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3413 if (va)
3415 va->va_start, va->va_end);
3493 struct vmap_area *va;
3499 llist_for_each_entry(va, head, purge_list) {
3501 (void *)va->va_start, (void *)va->va_end,
3502 va->va_end - va->va_start);
3508 struct vmap_area *va;
3511 va = list_entry(p, struct vmap_area, list);
3517 if (!va->vm) {
3519 (void *)va->va_start, (void *)va->va_end,
3520 va->va_end - va->va_start);
3525 v = va->vm;
3566 if (list_is_last(&va->list, &vmap_area_list))