Lines Matching defs:add
569 * indicate the number of file_regions needed in the cache to carry out to add
576 long add = 0;
612 add += hugetlb_resv_map_add(resv, iter->link.prev,
626 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
629 return add;
646 * the number of in progress add operations plus regions_needed.
709 long add = 0, actual_regions_needed = 0;
714 /* Count how many regions are actually needed to execute this add. */
720 * this add operation. Note that actual_regions_needed may be greater
744 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
749 return add;
757 * map to add the specified range [f, t). region_chg does
796 * Abort the in progress add operation. The adds_in_progress field
1265 * properly, so add work-around here.
2687 * to add the page to the reservation map. If the page allocation fails,
2903 * not add an entry, set hugetlb_restore_reserve
3231 free_huge_folio(folio); /* add to the hugepage allocator */
4166 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4221 pr_err("HugeTLB: Unable to add hstate %s", h->name);
5950 * fault mutex is held when add a hugetlb page
6715 long chg = -1, add = -1;
6811 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6813 if (unlikely(add < 0)) {
6816 } else if (unlikely(chg > add)) {
6832 (chg - add) * pages_per_huge_page(h), h_cg);
6835 chg - add);
6861 if (chg >= 0 && add < 0)