Lines Matching refs:ref

21  * puts the ref back in single atomic_t mode, collecting the per cpu refs and
22 * issuing the appropriate barriers, and then marks the ref as shutting down so
23 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
24 * it's safe to drop the initial ref.
35 * and it's then safe to drop the initial ref with percpu_ref_put().
74 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
76 * with this flag, the ref will stay in atomic mode until
83 * Start dead w/ ref == 0 in atomic mode. Must be revived with
102 struct percpu_ref *ref;
107 * The low bit of the pointer indicates whether the ref is in percpu
121 int __must_check percpu_ref_init(struct percpu_ref *ref,
124 void percpu_ref_exit(struct percpu_ref *ref);
125 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
127 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
128 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
129 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
131 void percpu_ref_resurrect(struct percpu_ref *ref);
132 void percpu_ref_reinit(struct percpu_ref *ref);
133 bool percpu_ref_is_zero(struct percpu_ref *ref);
136 * percpu_ref_kill - drop the initial ref
137 * @ref: percpu_ref to kill
139 * Must be used to drop the initial ref on a percpu refcount; must be called
142 * Switches @ref into atomic mode before gathering up the percpu counters
143 * and dropping the initial ref.
147 static inline void percpu_ref_kill(struct percpu_ref *ref)
149 percpu_ref_kill_and_confirm(ref, NULL);
156 * branches as it can't assume that @ref->percpu_count is not NULL.
158 static inline bool __ref_is_percpu(struct percpu_ref *ref,
164 * The value of @ref->percpu_count_ptr is tested for
174 percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
191 * @ref: percpu_ref to get
196 * This function is safe to call as long as @ref is between init and exit.
198 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
204 if (__ref_is_percpu(ref, &percpu_count))
207 atomic_long_add(nr, &ref->data->count);
214 * @ref: percpu_ref to get
218 * This function is safe to call as long as @ref is between init and exit.
220 static inline void percpu_ref_get(struct percpu_ref *ref)
222 percpu_ref_get_many(ref, 1);
227 * @ref: percpu_ref to try-get
233 * This function is safe to call as long as @ref is between init and exit.
235 static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
243 if (__ref_is_percpu(ref, &percpu_count)) {
247 ret = atomic_long_add_unless(&ref->data->count, nr, 0);
257 * @ref: percpu_ref to try-get
262 * This function is safe to call as long as @ref is between init and exit.
264 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
266 return percpu_ref_tryget_many(ref, 1);
271 * @ref: percpu_ref to try-get
282 * This function is safe to call as long as @ref is between init and exit.
284 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
291 if (__ref_is_percpu(ref, &percpu_count)) {
294 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
295 ret = atomic_long_inc_not_zero(&ref->data->count);
305 * @ref: percpu_ref to put
311 * This function is safe to call as long as @ref is between init and exit.
313 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
319 if (__ref_is_percpu(ref, &percpu_count))
321 else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
322 ref->data->release(ref);
329 * @ref: percpu_ref to put
334 * This function is safe to call as long as @ref is between init and exit.
336 static inline void percpu_ref_put(struct percpu_ref *ref)
338 percpu_ref_put_many(ref, 1);
343 * @ref: percpu_ref to test
345 * Returns %true if @ref is dying or dead.
347 * This function is safe to call as long as @ref is between init and exit
350 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
352 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;