Lines Matching refs:pa

302  *  - per-pa lock		(pa)
305 * - new pa
309 * - find and use pa:
310 * pa
312 * - release consumed pa:
313 * pa
319 * pa
323 * pa
328 * pa
692 struct ext4_prealloc_space *pa;
693 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
694 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
696 for (i = 0; i < pa->pa_len; i++)
3043 struct ext4_prealloc_space *pa;
3048 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3049 list_del(&pa->pa_group_list);
3051 kmem_cache_free(ext4_pspace_cachep, pa);
3545 struct ext4_prealloc_space *pa;
3652 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3655 if (pa->pa_deleted)
3657 spin_lock(&pa->pa_lock);
3658 if (pa->pa_deleted) {
3659 spin_unlock(&pa->pa_lock);
3663 pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
3667 ac->ac_o_ex.fe_logical < pa->pa_lstart));
3670 if (pa->pa_lstart >= end || pa_end <= start) {
3671 spin_unlock(&pa->pa_lock);
3674 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3676 /* adjust start or end to be adjacent to this pa */
3680 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3681 BUG_ON(pa->pa_lstart > end);
3682 end = pa->pa_lstart;
3684 spin_unlock(&pa->pa_lock);
3691 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3694 spin_lock(&pa->pa_lock);
3695 if (pa->pa_deleted == 0) {
3696 pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
3697 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3699 spin_unlock(&pa->pa_lock);
3775 struct ext4_prealloc_space *pa = ac->ac_pa;
3779 if (pa == NULL) {
3799 if (pa->pa_type == MB_INODE_PA)
3800 pa->pa_free += ac->ac_b_ex.fe_len;
3807 struct ext4_prealloc_space *pa)
3815 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3816 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3823 ac->ac_pa = pa;
3825 BUG_ON(start < pa->pa_pstart);
3826 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3827 BUG_ON(pa->pa_free < len);
3829 pa->pa_free -= len;
3831 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
3838 struct ext4_prealloc_space *pa)
3842 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3847 ac->ac_pa = pa;
3851 * instead we correct pa later, after blocks are marked
3853 * Other CPUs are prevented from allocating from this pa by lg_mutex
3855 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
3856 pa->pa_lstart-len, len, pa);
3867 struct ext4_prealloc_space *pa,
3873 atomic_inc(&pa->pa_count);
3874 return pa;
3877 new_distance = abs(goal_block - pa->pa_pstart);
3884 atomic_inc(&pa->pa_count);
3885 return pa;
3898 struct ext4_prealloc_space *pa, *cpa = NULL;
3907 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3911 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3912 ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa))
3917 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3922 spin_lock(&pa->pa_lock);
3923 if (pa->pa_deleted == 0 && pa->pa_free) {
3924 atomic_inc(&pa->pa_count);
3925 ext4_mb_use_inode_pa(ac, pa);
3926 spin_unlock(&pa->pa_lock);
3931 spin_unlock(&pa->pa_lock);
3955 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3957 spin_lock(&pa->pa_lock);
3958 if (pa->pa_deleted == 0 &&
3959 pa->pa_free >= ac->ac_o_ex.fe_len) {
3962 pa, cpa);
3964 spin_unlock(&pa->pa_lock);
4012 struct ext4_prealloc_space *pa;
4031 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4032 spin_lock(&pa->pa_lock);
4033 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4035 len = pa->pa_len;
4036 spin_unlock(&pa->pa_lock);
4047 struct ext4_prealloc_space *pa)
4051 if (pa->pa_deleted) {
4052 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4053 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4054 pa->pa_len);
4058 pa->pa_deleted = 1;
4060 if (pa->pa_type == MB_INODE_PA) {
4061 ei = EXT4_I(pa->pa_inode);
4068 struct ext4_prealloc_space *pa;
4069 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4071 BUG_ON(atomic_read(&pa->pa_count));
4072 BUG_ON(pa->pa_deleted == 0);
4073 kmem_cache_free(ext4_pspace_cachep, pa);
4081 struct super_block *sb, struct ext4_prealloc_space *pa)
4087 spin_lock(&pa->pa_lock);
4088 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4089 spin_unlock(&pa->pa_lock);
4093 if (pa->pa_deleted == 1) {
4094 spin_unlock(&pa->pa_lock);
4098 ext4_mb_mark_pa_deleted(sb, pa);
4099 spin_unlock(&pa->pa_lock);
4101 grp_blk = pa->pa_pstart;
4104 * next group when pa is used up
4106 if (pa->pa_type == MB_GROUP_PA)
4126 list_del(&pa->pa_group_list);
4129 spin_lock(pa->pa_obj_lock);
4130 list_del_rcu(&pa->pa_inode_list);
4131 spin_unlock(pa->pa_obj_lock);
4133 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4144 struct ext4_prealloc_space *pa;
4154 pa = ac->ac_pa;
4203 pa->pa_lstart = ac->ac_b_ex.fe_logical;
4204 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4205 pa->pa_len = ac->ac_b_ex.fe_len;
4206 pa->pa_free = pa->pa_len;
4207 spin_lock_init(&pa->pa_lock);
4208 INIT_LIST_HEAD(&pa->pa_inode_list);
4209 INIT_LIST_HEAD(&pa->pa_group_list);
4210 pa->pa_deleted = 0;
4211 pa->pa_type = MB_INODE_PA;
4213 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4214 pa->pa_len, pa->pa_lstart);
4215 trace_ext4_mb_new_inode_pa(ac, pa);
4217 ext4_mb_use_inode_pa(ac, pa);
4218 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4225 pa->pa_obj_lock = &ei->i_prealloc_lock;
4226 pa->pa_inode = ac->ac_inode;
4228 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4230 spin_lock(pa->pa_obj_lock);
4231 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4232 spin_unlock(pa->pa_obj_lock);
4244 struct ext4_prealloc_space *pa;
4253 pa = ac->ac_pa;
4259 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4260 pa->pa_lstart = pa->pa_pstart;
4261 pa->pa_len = ac->ac_b_ex.fe_len;
4262 pa->pa_free = pa->pa_len;
4263 spin_lock_init(&pa->pa_lock);
4264 INIT_LIST_HEAD(&pa->pa_inode_list);
4265 INIT_LIST_HEAD(&pa->pa_group_list);
4266 pa->pa_deleted = 0;
4267 pa->pa_type = MB_GROUP_PA;
4269 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4270 pa->pa_len, pa->pa_lstart);
4271 trace_ext4_mb_new_group_pa(ac, pa);
4273 ext4_mb_use_group_pa(ac, pa);
4274 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4282 pa->pa_obj_lock = &lg->lg_prealloc_lock;
4283 pa->pa_inode = NULL;
4285 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4288 * We will later add the new pa to the right bucket
4304 * @pa must be unlinked from inode and group lists, so that
4311 struct ext4_prealloc_space *pa)
4322 BUG_ON(pa->pa_deleted == 0);
4323 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4324 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4325 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4326 end = bit + pa->pa_len;
4339 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4342 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4345 if (free != pa->pa_free) {
4347 "pa %p: logic %lu, phys. %lu, len %d",
4348 pa, (unsigned long) pa->pa_lstart,
4349 (unsigned long) pa->pa_pstart,
4350 pa->pa_len);
4352 free, pa->pa_free);
4354 * pa is already deleted so we use the value obtained
4365 struct ext4_prealloc_space *pa)
4371 trace_ext4_mb_release_group_pa(sb, pa);
4372 BUG_ON(pa->pa_deleted == 0);
4373 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4374 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
4376 e4b->bd_group, group, pa->pa_pstart);
4379 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4380 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4381 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4401 struct ext4_prealloc_space *pa, *tmp;
4432 list_for_each_entry_safe(pa, tmp,
4434 spin_lock(&pa->pa_lock);
4435 if (atomic_read(&pa->pa_count)) {
4436 spin_unlock(&pa->pa_lock);
4440 if (pa->pa_deleted) {
4441 spin_unlock(&pa->pa_lock);
4446 ext4_mb_mark_pa_deleted(sb, pa);
4452 free += pa->pa_free;
4454 spin_unlock(&pa->pa_lock);
4456 list_del(&pa->pa_group_list);
4457 list_add(&pa->u.pa_tmp_list, &list);
4461 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4464 spin_lock(pa->pa_obj_lock);
4465 list_del_rcu(&pa->pa_inode_list);
4466 spin_unlock(pa->pa_obj_lock);
4468 if (pa->pa_type == MB_GROUP_PA)
4469 ext4_mb_release_group_pa(&e4b, pa);
4471 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4473 list_del(&pa->u.pa_tmp_list);
4474 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4500 struct ext4_prealloc_space *pa, *tmp;
4525 /* first, collect all pa's in the inode */
4528 pa = list_entry(ei->i_prealloc_list.prev,
4530 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4531 spin_lock(&pa->pa_lock);
4532 if (atomic_read(&pa->pa_count)) {
4535 spin_unlock(&pa->pa_lock);
4538 "uh-oh! used pa while discarding");
4544 if (pa->pa_deleted == 0) {
4545 ext4_mb_mark_pa_deleted(sb, pa);
4546 spin_unlock(&pa->pa_lock);
4547 list_del_rcu(&pa->pa_inode_list);
4548 list_add(&pa->u.pa_tmp_list, &list);
4553 /* someone is deleting pa right now */
4554 spin_unlock(&pa->pa_lock);
4558 * doesn't mean pa is already unlinked from
4562 * pa from inode's list may access already
4574 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4575 BUG_ON(pa->pa_type != MB_INODE_PA);
4576 group = ext4_get_group_number(sb, pa->pa_pstart);
4596 list_del(&pa->pa_group_list);
4597 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4603 list_del(&pa->u.pa_tmp_list);
4604 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4610 struct ext4_prealloc_space *pa;
4613 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
4614 if (!pa)
4616 atomic_set(&pa->pa_count, 1);
4617 ac->ac_pa = pa;
4623 struct ext4_prealloc_space *pa = ac->ac_pa;
4625 BUG_ON(!pa);
4627 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
4628 kmem_cache_free(ext4_pspace_cachep, pa);
4643 struct ext4_prealloc_space *pa;
4651 pa = list_entry(cur, struct ext4_prealloc_space,
4653 spin_lock(&pa->pa_lock);
4654 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4656 spin_unlock(&pa->pa_lock);
4658 pa->pa_len);
4825 struct ext4_prealloc_space *pa, *tmp;
4832 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4835 spin_lock(&pa->pa_lock);
4836 if (atomic_read(&pa->pa_count)) {
4838 * This is the pa that we just used
4842 spin_unlock(&pa->pa_lock);
4845 if (pa->pa_deleted) {
4846 spin_unlock(&pa->pa_lock);
4850 BUG_ON(pa->pa_type != MB_GROUP_PA);
4853 ext4_mb_mark_pa_deleted(sb, pa);
4854 spin_unlock(&pa->pa_lock);
4856 list_del_rcu(&pa->pa_inode_list);
4857 list_add(&pa->u.pa_tmp_list, &discard_list);
4872 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4875 group = ext4_get_group_number(sb, pa->pa_pstart);
4884 list_del(&pa->pa_group_list);
4885 ext4_mb_release_group_pa(&e4b, pa);
4889 list_del(&pa->u.pa_tmp_list);
4890 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4908 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4910 order = fls(pa->pa_free) - 1;
4924 if (!added && pa->pa_free < tmp_pa->pa_free) {
4926 list_add_tail_rcu(&pa->pa_inode_list,
4938 list_add_tail_rcu(&pa->pa_inode_list,
4976 struct ext4_prealloc_space *pa = ac->ac_pa;
4977 if (pa) {
4978 if (pa->pa_type == MB_GROUP_PA) {
4980 spin_lock(&pa->pa_lock);
4981 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4982 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4983 pa->pa_free -= ac->ac_b_ex.fe_len;
4984 pa->pa_len -= ac->ac_b_ex.fe_len;
4985 spin_unlock(&pa->pa_lock);
4988 * We want to add the pa to the right bucket.
4993 if (likely(pa->pa_free)) {
4994 spin_lock(pa->pa_obj_lock);
4995 list_del_rcu(&pa->pa_inode_list);
4996 spin_unlock(pa->pa_obj_lock);
5001 if (pa->pa_type == MB_INODE_PA) {
5006 spin_lock(pa->pa_obj_lock);
5007 list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5008 spin_unlock(pa->pa_obj_lock);
5011 ext4_mb_put_pa(ac, ac->ac_sb, pa);
5170 * pa allocated above is added to grp->bb_prealloc_list only
5174 * So we have to free this pa here itself.
5199 * If block allocation fails then the pa allocated above