Lines Matching refs:l_ctx
110 * @l_ctx: nfs_lock_context with io_counter to use
116 nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
118 return wait_var_event_killable(&l_ctx->io_count,
119 !atomic_read(&l_ctx->io_count));
126 * @l_ctx: nfs_lock_context with io_counter to check
132 nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
134 struct inode *inode = d_inode(l_ctx->open_context->dentry);
137 if (atomic_read(&l_ctx->io_count) > 0) {
142 if (atomic_read(&l_ctx->io_count) == 0) {
435 __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
440 struct nfs_open_context *ctx = l_ctx->open_context;
449 req->wb_lock_context = l_ctx;
450 refcount_inc(&l_ctx->count);
451 atomic_inc(&l_ctx->io_count);
484 struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
487 if (IS_ERR(l_ctx))
488 return ERR_CAST(l_ctx);
489 ret = __nfs_create_request(l_ctx, page, offset, offset, count);
492 nfs_put_lock_context(l_ctx);
560 struct nfs_lock_context *l_ctx = req->wb_lock_context;
567 if (l_ctx != NULL) {
568 if (atomic_dec_and_test(&l_ctx->io_count)) {
569 wake_up_var(&l_ctx->io_count);
570 ctx = l_ctx->open_context;
574 nfs_put_lock_context(l_ctx);