Lines Matching defs:count

102 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
127 spool->count = 1;
144 BUG_ON(!spool->count);
145 spool->count--;
363 * Calling this with regions_needed != NULL will count the number of pages
739 * usage count, and the global reserve count if needed. By incrementing
761 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
775 /* Locate each segment we overlap with, and count that overlap. */
1011 * so, we should decrement reserved count. Without decrementing,
1012 * reserve count remains after releasing inode, because this
2113 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2297 * the global reservation count will be incremented if PagePrivate is set.
2299 * reserve map here to be consistent with global reserve count adjustments
2313 * global reserve count will not be incremented
2436 * Adjust for the subpool count incremented above AND
2438 * the reservation count added in hugetlb_reserve_pages
2605 static void try_to_free_low(struct hstate *h, unsigned long count,
2617 if (count >= h->nr_huge_pages)
2629 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2668 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2688 * Changing node specific huge page count may require a corresponding
2689 * change to the global count. In any case, the passed node mask
2693 unsigned long old_count = count;
2695 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2697 * User may have specified a large count value which caused the
2699 * to allocate as many huge pages as possible. Set count to
2702 if (count < old_count)
2703 count = ULONG_MAX;
2714 if (count > persistent_huge_pages(h)) {
2733 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2738 while (count > persistent_huge_pages(h)) {
2776 min_count = max(count, min_count);
2783 while (count < persistent_huge_pages(h)) {
2840 unsigned long count, size_t len)
2859 * Node specific request. count adjustment happens in
2866 err = set_max_huge_pages(h, count, nid, n_mask);
2876 unsigned long count;
2880 err = kstrtoul(buf, 10, &count);
2885 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2930 struct kobj_attribute *attr, const char *buf, size_t count)
2947 return count;
3550 unsigned long count = h->nr_huge_pages;
3552 total += (PAGE_SIZE << huge_page_order(h)) * count;
3561 count,
3705 * Decrement reserve counts. The global reserve count may be
4171 * perform a COW due to a shared page count, attempt to satisfy
4480 /* Just decrements count, does not deallocate */
4645 /* Just decrements count, does not deallocate */
5466 * decrementing the ref count. If count == 1, the pte page is not shared.
5718 * here as well otherwise the global surplus count will not match