Lines Matching defs:spool
100 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
102 bool free = (spool->count == 0) && (spool->used_hpages == 0);
104 spin_unlock(&spool->lock);
110 if (spool->min_hpages != -1)
111 hugetlb_acct_memory(spool->hstate,
112 -spool->min_hpages);
113 kfree(spool);
120 struct hugepage_subpool *spool;
122 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
123 if (!spool)
126 spin_lock_init(&spool->lock);
127 spool->count = 1;
128 spool->max_hpages = max_hpages;
129 spool->hstate = h;
130 spool->min_hpages = min_hpages;
133 kfree(spool);
136 spool->rsv_hpages = min_hpages;
138 return spool;
141 void hugepage_put_subpool(struct hugepage_subpool *spool)
143 spin_lock(&spool->lock);
144 BUG_ON(!spool->count);
145 spool->count--;
146 unlock_or_release_subpool(spool);
157 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
162 if (!spool)
165 spin_lock(&spool->lock);
167 if (spool->max_hpages != -1) { /* maximum size accounting */
168 if ((spool->used_hpages + delta) <= spool->max_hpages)
169 spool->used_hpages += delta;
177 if (spool->min_hpages != -1 && spool->rsv_hpages) {
178 if (delta > spool->rsv_hpages) {
183 ret = delta - spool->rsv_hpages;
184 spool->rsv_hpages = 0;
187 spool->rsv_hpages -= delta;
192 spin_unlock(&spool->lock);
202 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
207 if (!spool)
210 spin_lock(&spool->lock);
212 if (spool->max_hpages != -1) /* maximum size accounting */
213 spool->used_hpages -= delta;
216 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
217 if (spool->rsv_hpages + delta <= spool->min_hpages)
220 ret = spool->rsv_hpages + delta - spool->min_hpages;
222 spool->rsv_hpages += delta;
223 if (spool->rsv_hpages > spool->min_hpages)
224 spool->rsv_hpages = spool->min_hpages;
228 * If hugetlbfs_put_super couldn't free spool due to an outstanding
231 unlock_or_release_subpool(spool);
238 return HUGETLBFS_SB(inode->i_sb)->spool;
746 struct hugepage_subpool *spool = subpool_inode(inode);
750 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
1455 struct hugepage_subpool *spool =
1482 if (hugepage_subpool_put_pages(spool, 1) == 0)
2338 struct hugepage_subpool *spool = subpool_vma(vma);
2365 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2428 set_page_private(page, (unsigned long)spool);
2443 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2459 hugepage_subpool_put_pages(spool, 1);
3691 struct hugepage_subpool *spool = subpool_vma(vma);
3708 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
5137 struct hugepage_subpool *spool = subpool_inode(inode);
5209 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
5260 rsv_adjust = hugepage_subpool_put_pages(spool,
5276 (void)hugepage_subpool_put_pages(spool, chg);
5298 struct hugepage_subpool *spool = subpool_inode(inode);
5324 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));