Lines Matching refs:pa_free

67  * pa_free   ->  free space available in this prealloc space (in clusters)
76 * pa_free.
88 * enough free space (pa_free) within the prealloc space.
4635 * pa_free in ext4_mb_release_context(), but on failure, we've already
4636 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4665 pa->pa_free += ac->ac_b_ex.fe_len;
4694 BUG_ON(pa->pa_free < len);
4696 pa->pa_free -= len;
4909 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4922 * pa_free > 0 since otherwise we won't actually need
4926 * pa_free become zero when another thread calls:
4931 * 3. Further, after the above calls make pa_free == 0, we will
4937 * 4. Since the pa_free becoming 0 and pa_free getting removed
4941 * pa_free == 0.
4943 WARN_ON_ONCE(tmp_pa->pa_free == 0);
4973 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
5082 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5232 pa->pa_free = pa->pa_len;
5242 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
5283 pa->pa_free = pa->pa_len;
5295 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5310 * after updating the pa_free in ext4_mb_release_context
5366 if (free != pa->pa_free) {
5372 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5373 free, pa->pa_free);
5472 /* we can trust pa_free ... */
5473 free += pa->pa_free;
5935 * possible from this lg. That means pa_free cannot be updated.
5948 order = fls(pa->pa_free) - 1;
5962 if (!added && pa->pa_free < tmp_pa->pa_free) {
5999 pa->pa_free -= ac->ac_b_ex.fe_len;
6009 if (likely(pa->pa_free)) {