Lines Matching refs:tmp_pa

4254 	struct ext4_prealloc_space *tmp_pa;
4262 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4264 tmp_pa_start = tmp_pa->pa_lstart;
4265 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4267 spin_lock(&tmp_pa->pa_lock);
4268 if (tmp_pa->pa_deleted == 0)
4270 spin_unlock(&tmp_pa->pa_lock);
4291 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4310 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4312 tmp_pa_start = tmp_pa->pa_lstart;
4313 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4316 spin_lock(&tmp_pa->pa_lock);
4317 if (tmp_pa->pa_deleted == 0)
4320 spin_unlock(&tmp_pa->pa_lock);
4327 if (tmp_pa) {
4328 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4331 left_pa = tmp_pa;
4341 right_pa = tmp_pa;
4360 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4362 left_pa = tmp_pa;
4363 spin_lock(&tmp_pa->pa_lock);
4364 if (tmp_pa->pa_deleted == 0) {
4365 spin_unlock(&tmp_pa->pa_lock);
4368 spin_unlock(&tmp_pa->pa_lock);
4380 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4382 right_pa = tmp_pa;
4383 spin_lock(&tmp_pa->pa_lock);
4384 if (tmp_pa->pa_deleted == 0) {
4385 spin_unlock(&tmp_pa->pa_lock);
4388 spin_unlock(&tmp_pa->pa_lock);
4796 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
4821 * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4825 tmp_pa->pa_lstart, iter)) {
4826 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4832 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4835 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4837 tmp = rb_prev(&tmp_pa->pa_node.inode_node);
4840 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4852 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4857 * valid tmp_pa which is guaranteed to be non deleted.
4859 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4867 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4869 spin_lock(&tmp_pa->pa_lock);
4870 if (tmp_pa->pa_deleted == 0) {
4880 spin_unlock(&tmp_pa->pa_lock);
4884 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4885 BUG_ON(tmp_pa->pa_deleted == 1);
4892 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
4893 spin_unlock(&tmp_pa->pa_lock);
4899 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4905 spin_unlock(&tmp_pa->pa_lock);
4909 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4910 atomic_inc(&tmp_pa->pa_count);
4911 ext4_mb_use_inode_pa(ac, tmp_pa);
4912 spin_unlock(&tmp_pa->pa_lock);
4943 WARN_ON_ONCE(tmp_pa->pa_free == 0);
4945 spin_unlock(&tmp_pa->pa_lock);
4969 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
4971 spin_lock(&tmp_pa->pa_lock);
4972 if (tmp_pa->pa_deleted == 0 &&
4973 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
4976 tmp_pa, cpa);
4978 spin_unlock(&tmp_pa->pa_lock);
5946 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5954 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5957 spin_lock(&tmp_pa->pa_lock);
5958 if (tmp_pa->pa_deleted) {
5959 spin_unlock(&tmp_pa->pa_lock);
5962 if (!added && pa->pa_free < tmp_pa->pa_free) {
5965 &tmp_pa->pa_node.lg_list);
5972 spin_unlock(&tmp_pa->pa_lock);