Lines Matching refs:ctx

97 gss_get_ctx(struct gss_cl_ctx *ctx)
99 refcount_inc(&ctx->count);
100 return ctx;
104 gss_put_ctx(struct gss_cl_ctx *ctx)
106 if (refcount_dec_and_test(&ctx->count))
107 gss_free_ctx(ctx);
116 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
122 gss_get_ctx(ctx);
123 rcu_assign_pointer(gss_cred->gc_ctx, ctx);
133 struct gss_cl_ctx *ctx = NULL;
136 ctx = rcu_dereference(gss_cred->gc_ctx);
137 if (ctx)
138 gss_get_ctx(ctx);
140 return ctx;
146 struct gss_cl_ctx *ctx;
148 ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
149 if (ctx != NULL) {
150 ctx->gc_proc = RPC_GSS_PROC_DATA;
151 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
152 spin_lock_init(&ctx->gc_seq_lock);
153 refcount_set(&ctx->count,1);
155 return ctx;
160 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
178 ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
185 ctx->gc_win = window_size;
186 /* gssd signals an error by passing ctx->gc_win = 0: */
187 if (ctx->gc_win == 0) {
199 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
211 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
225 p = simple_get_netobj(q, end, &ctx->gc_acceptor);
229 trace_rpcgss_context(window_size, ctx->gc_expiry, now, timeout,
230 ctx->gc_acceptor.len, ctx->gc_acceptor.data);
252 struct gss_cl_ctx *ctx;
289 if (gss_msg->ctx != NULL)
290 gss_put_ctx(gss_msg->ctx);
360 if (gss_msg->ctx == NULL)
363 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
606 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
662 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
672 if (gss_msg->ctx) {
674 gss_cred_set_ctx(cred, gss_msg->ctx);
712 struct gss_cl_ctx *ctx;
742 ctx = gss_alloc_context();
743 if (ctx == NULL)
757 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
779 gss_msg->ctx = gss_get_ctx(ctx);
788 gss_put_ctx(ctx);
1241 struct gss_cl_ctx *ctx =
1251 rcu_assign_pointer(new->gc_ctx, ctx);
1252 gss_get_ctx(ctx);
1268 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1274 ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1290 gss_do_free_ctx(struct gss_cl_ctx *ctx)
1292 gss_delete_sec_context(&ctx->gc_gss_ctx);
1293 kfree(ctx->gc_wire_ctx.data);
1294 kfree(ctx->gc_acceptor.data);
1295 kfree(ctx);
1301 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1302 gss_do_free_ctx(ctx);
1306 gss_free_ctx(struct gss_cl_ctx *ctx)
1308 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1329 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1334 if (ctx)
1335 gss_put_ctx(ctx);
1405 struct gss_cl_ctx *ctx;
1410 ctx = rcu_dereference(gss_cred->gc_ctx);
1411 if (!ctx)
1414 len = ctx->gc_acceptor.len;
1426 ctx = rcu_dereference(gss_cred->gc_ctx);
1428 /* did the ctx disappear or was it replaced by one with no acceptor? */
1429 if (!ctx || !ctx->gc_acceptor.len) {
1435 acceptor = &ctx->gc_acceptor;
1463 struct gss_cl_ctx *ctx;
1468 ctx = rcu_dereference(gss_cred->gc_ctx);
1469 if (!ctx || time_after(timeout, ctx->gc_expiry))
1480 struct gss_cl_ctx *ctx;
1487 ctx = rcu_dereference(gss_cred->gc_ctx);
1488 if (!ctx || time_after(jiffies, ctx->gc_expiry)) {
1521 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1532 ctx->gc_wire_ctx.len);
1538 spin_lock(&ctx->gc_seq_lock);
1539 req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
1540 spin_unlock(&ctx->gc_seq_lock);
1546 *p++ = cpu_to_be32(ctx->gc_proc);
1549 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1565 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1574 gss_put_ctx(ctx);
1665 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1694 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1706 gss_put_ctx(ctx);
1720 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1748 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1813 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1856 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1892 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1896 if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1908 status = gss_wrap_req_integ(cred, ctx, task, xdr);
1911 status = gss_wrap_req_priv(cred, ctx, task, xdr);
1917 gss_put_ctx(ctx);
1963 struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
2009 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
2035 struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
2051 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
2066 gss_update_rslack(task, cred, 2 + ctx->gc_gss_ctx->align,
2067 2 + ctx->gc_gss_ctx->slack);
2092 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2096 if (!ctx)
2099 if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
2102 seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
2106 seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
2113 win = ctx->gc_win;
2118 gss_put_ctx(ctx);
2131 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2134 if (ctx->gc_proc != RPC_GSS_PROC_DATA)
2141 status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
2144 status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
2153 gss_put_ctx(ctx);