Lines Matching defs:ioc

25  * @ioc: io_context to get
27 * Increment reference count to @ioc.
29 static void get_io_context(struct io_context *ioc)
31 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
32 atomic_long_inc(&ioc->refcount);
43 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
59 static void ioc_exit_icqs(struct io_context *ioc)
63 spin_lock_irq(&ioc->lock);
64 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node)
66 spin_unlock_irq(&ioc->lock);
70 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
75 struct io_context *ioc = icq->ioc;
79 lockdep_assert_held(&ioc->lock);
85 radix_tree_delete(&ioc->icq_tree, icq->q->id);
94 if (rcu_access_pointer(ioc->icq_hint) == icq)
95 rcu_assign_pointer(ioc->icq_hint, NULL);
109 * Slow path for ioc release in put_io_context(). Performs double-lock
110 * dancing to unlink all icq's and then frees ioc.
114 struct io_context *ioc = container_of(work, struct io_context,
116 spin_lock_irq(&ioc->lock);
118 while (!hlist_empty(&ioc->icq_list)) {
119 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
131 spin_unlock(&ioc->lock);
133 spin_lock(&ioc->lock);
142 spin_unlock_irq(&ioc->lock);
144 kmem_cache_free(iocontext_cachep, ioc);
151 static bool ioc_delay_free(struct io_context *ioc)
155 spin_lock_irqsave(&ioc->lock, flags);
156 if (!hlist_empty(&ioc->icq_list)) {
157 queue_work(system_power_efficient_wq, &ioc->release_work);
158 spin_unlock_irqrestore(&ioc->lock, flags);
161 spin_unlock_irqrestore(&ioc->lock, flags);
166 * ioc_clear_queue - break any ioc association with the specified queue
179 * Other context won't hold ioc lock to wait for queue_lock, see
182 spin_lock(&icq->ioc->lock);
184 spin_unlock(&icq->ioc->lock);
189 static inline void ioc_exit_icqs(struct io_context *ioc)
192 static inline bool ioc_delay_free(struct io_context *ioc)
200 * @ioc: io_context to put
202 * Decrement reference count of @ioc and release it if the count reaches
205 void put_io_context(struct io_context *ioc)
207 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
208 if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
209 kmem_cache_free(iocontext_cachep, ioc);
216 struct io_context *ioc;
219 ioc = task->io_context;
223 if (atomic_dec_and_test(&ioc->active_ref)) {
224 ioc_exit_icqs(ioc);
225 put_io_context(ioc);
231 struct io_context *ioc;
233 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
235 if (unlikely(!ioc))
238 atomic_long_set(&ioc->refcount, 1);
239 atomic_set(&ioc->active_ref, 1);
241 spin_lock_init(&ioc->lock);
242 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
243 INIT_HLIST_HEAD(&ioc->icq_list);
244 INIT_WORK(&ioc->release_work, ioc_release_fn);
246 ioc->ioprio = IOPRIO_DEFAULT;
248 return ioc;
271 struct io_context *ioc;
275 ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE);
276 if (!ioc)
281 kmem_cache_free(iocontext_cachep, ioc);
285 kmem_cache_free(iocontext_cachep, ioc);
287 task->io_context = ioc;
298 struct io_context *ioc = current->io_context;
304 atomic_inc(&ioc->active_ref);
305 tsk->io_context = ioc;
306 } else if (ioprio_valid(ioc->ioprio)) {
310 tsk->io_context->ioprio = ioc->ioprio;
318 * ioc_lookup_icq - lookup io_cq from ioc
321 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
326 struct io_context *ioc = current->io_context;
332 * icq's are indexed from @ioc using radix tree and hint pointer,
334 * holding both q and ioc locks, and we're holding q lock - if we
338 icq = rcu_dereference(ioc->icq_hint);
342 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
344 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
357 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
360 * The caller is responsible for ensuring @ioc won't go away and @q is
365 struct io_context *ioc = current->io_context;
380 icq->ioc = ioc;
385 /* lock both q and ioc and try to link @icq */
387 spin_lock(&ioc->lock);
389 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
390 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
401 spin_unlock(&ioc->lock);
409 struct io_context *ioc = current->io_context;
412 if (unlikely(!ioc)) {
413 ioc = alloc_io_context(GFP_ATOMIC, q->node);
414 if (!ioc)
419 kmem_cache_free(iocontext_cachep, ioc);
420 ioc = current->io_context;
422 current->io_context = ioc;
425 get_io_context(ioc);
428 get_io_context(ioc);
438 put_io_context(ioc);