Lines Matching refs:resv
437 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
441 VM_BUG_ON(resv->region_cache_count <= 0);
443 resv->region_cache_count--;
444 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
467 struct resv_map *resv,
486 if (!resv->pages_per_hpage)
487 resv->pages_per_hpage = pages_per_huge_page(h);
491 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
519 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
524 if (&prg->link != &resv->regions && prg->to == rg->from &&
536 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
565 * Must be called with resv->lock held.
572 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
577 struct list_head *head = &resv->regions;
612 add += hugetlb_resv_map_add(resv, iter->link.prev,
626 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
632 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
634 static int allocate_file_region_entries(struct resv_map *resv,
636 __must_hold(&resv->lock)
653 while (resv->region_cache_count <
654 (resv->adds_in_progress + regions_needed)) {
655 to_allocate = resv->adds_in_progress + regions_needed -
656 resv->region_cache_count;
662 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
664 spin_unlock(&resv->lock);
672 spin_lock(&resv->lock);
674 list_splice(&allocated_regions, &resv->region_cache);
675 resv->region_cache_count += to_allocate;
705 static long region_add(struct resv_map *resv, long f, long t,
711 spin_lock(&resv->lock);
715 add_reservation_in_range(resv, f, t, NULL, NULL,
728 resv->region_cache_count <
729 resv->adds_in_progress +
737 resv, actual_regions_needed - in_regions_needed)) {
744 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
746 resv->adds_in_progress -= in_regions_needed;
748 spin_unlock(&resv->lock);
764 * resv->adds_in_progress. This value needs to be provided to a follow up call
772 static long region_chg(struct resv_map *resv, long f, long t,
777 spin_lock(&resv->lock);
780 chg = add_reservation_in_range(resv, f, t, NULL, NULL,
786 if (allocate_file_region_entries(resv, *out_regions_needed))
789 resv->adds_in_progress += *out_regions_needed;
791 spin_unlock(&resv->lock);
808 static void region_abort(struct resv_map *resv, long f, long t,
811 spin_lock(&resv->lock);
812 VM_BUG_ON(!resv->region_cache_count);
813 resv->adds_in_progress -= regions_needed;
814 spin_unlock(&resv->lock);
831 static long region_del(struct resv_map *resv, long f, long t)
833 struct list_head *head = &resv->regions;
839 spin_lock(&resv->lock);
860 resv->region_cache_count > resv->adds_in_progress) {
861 nrg = list_first_entry(&resv->region_cache,
865 resv->region_cache_count--;
869 spin_unlock(&resv->lock);
878 resv, rg, t - f, false);
898 hugetlb_cgroup_uncharge_file_region(resv, rg,
906 hugetlb_cgroup_uncharge_file_region(resv, rg,
912 hugetlb_cgroup_uncharge_file_region(resv, rg,
920 spin_unlock(&resv->lock);
958 static long region_count(struct resv_map *resv, long f, long t)
960 struct list_head *head = &resv->regions;
964 spin_lock(&resv->lock);
980 spin_unlock(&resv->lock);
2718 struct resv_map *resv;
2723 resv = vma_resv_map(vma);
2724 if (!resv)
2730 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2732 * 1 page, and that adding to resv map a 1 page entry can only
2738 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2743 region_abort(resv, idx, idx + 1, 1);
2748 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2752 region_abort(resv, idx, idx + 1, 1);
2753 ret = region_del(resv, idx, idx + 1);
2758 region_abort(resv, idx, idx + 1, 1);
2759 ret = region_del(resv, idx, idx + 1);
2761 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
4840 struct resv_map *resv = vma_resv_map(vma);
4851 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4852 resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4853 kref_get(&resv->refs);
4879 struct resv_map *resv;
4886 resv = vma_resv_map(vma);
4887 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4893 reserve = (end - start) - region_count(resv, start, end);
4894 hugetlb_cgroup_uncharge_counter(resv, start, end);
4904 kref_put(&resv->refs, resv_map_release);