Lines Matching refs:grp
35 static bool erofs_workgroup_get(struct erofs_workgroup *grp)
37 if (lockref_get_not_zero(&grp->lockref))
40 spin_lock(&grp->lockref.lock);
41 if (__lockref_is_dead(&grp->lockref)) {
42 spin_unlock(&grp->lockref.lock);
46 if (!grp->lockref.count++)
48 spin_unlock(&grp->lockref.lock);
56 struct erofs_workgroup *grp;
60 grp = xa_load(&sbi->managed_pslots, index);
61 if (grp) {
62 if (!erofs_workgroup_get(grp)) {
68 DBG_BUGON(index != grp->index);
71 return grp;
75 struct erofs_workgroup *grp)
80 DBG_BUGON(grp->lockref.count < 1);
83 pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
84 NULL, grp, GFP_NOFS);
94 grp = pre;
97 return grp;
100 static void __erofs_workgroup_free(struct erofs_workgroup *grp)
103 erofs_workgroup_free_rcu(grp);
106 void erofs_workgroup_put(struct erofs_workgroup *grp)
108 if (lockref_put_or_lock(&grp->lockref))
111 DBG_BUGON(__lockref_is_dead(&grp->lockref));
112 if (grp->lockref.count == 1)
114 --grp->lockref.count;
115 spin_unlock(&grp->lockref.lock);
119 struct erofs_workgroup *grp)
123 spin_lock(&grp->lockref.lock);
124 if (grp->lockref.count)
132 if (erofs_try_to_free_all_cached_pages(sbi, grp))
140 DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
142 lockref_mark_dead(&grp->lockref);
145 spin_unlock(&grp->lockref.lock);
147 __erofs_workgroup_free(grp);
154 struct erofs_workgroup *grp;
159 xa_for_each(&sbi->managed_pslots, index, grp) {
161 if (!erofs_try_to_release_workgroup(sbi, grp))