Lines Matching refs:cb
373 const struct nfsd4_callback *cb,
376 struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
417 struct nfsd4_callback *cb)
419 struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
455 cb->cb_seq_status = status;
463 struct nfsd4_callback *cb)
467 if (cb->cb_clp->cl_minorversion == 0)
470 status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
471 if (unlikely(status || cb->cb_seq_status))
474 return decode_cb_sequence4resok(xdr, cb);
501 const struct nfsd4_callback *cb = data;
502 const struct nfs4_delegation *dp = cb_to_delegation(cb);
504 .ident = cb->cb_clp->cl_cb_ident,
505 .minorversion = cb->cb_clp->cl_minorversion,
509 encode_cb_sequence4args(xdr, cb, &hdr);
521 const struct nfsd4_callback *cb = data;
524 .ident = cb->cb_clp->cl_cb_ident,
525 .minorversion = cb->cb_clp->cl_minorversion,
528 ra = container_of(cb, struct nfsd4_cb_recall_any, ra_cb);
530 encode_cb_sequence4args(xdr, cb, &hdr);
557 struct nfsd4_callback *cb = data;
565 status = decode_cb_sequence4res(xdr, cb);
566 if (unlikely(status || cb->cb_seq_status))
569 return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
580 struct nfsd4_callback *cb = data;
587 status = decode_cb_sequence4res(xdr, cb);
588 if (unlikely(status || cb->cb_seq_status))
590 status = decode_cb_op_status(xdr, OP_CB_RECALL_ANY, &cb->cb_status);
651 const struct nfsd4_callback *cb = data;
653 container_of(cb, struct nfs4_layout_stateid, ls_recall);
656 .minorversion = cb->cb_clp->cl_minorversion,
660 encode_cb_sequence4args(xdr, cb, &hdr);
669 struct nfsd4_callback *cb = data;
677 status = decode_cb_sequence4res(xdr, cb);
678 if (unlikely(status || cb->cb_seq_status))
681 return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
698 const struct nfsd4_callback *cb = data;
700 container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
704 .minorversion = cb->cb_clp->cl_minorversion,
712 encode_cb_sequence4args(xdr, cb, &hdr);
727 struct nfsd4_callback *cb = data;
735 status = decode_cb_sequence4res(xdr, cb);
736 if (unlikely(status || cb->cb_seq_status))
739 return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
803 const struct nfsd4_callback *cb = data;
805 container_of(cb, struct nfsd4_cb_offload, co_cb);
808 .minorversion = cb->cb_clp->cl_minorversion,
812 encode_cb_sequence4args(xdr, cb, &hdr);
821 struct nfsd4_callback *cb = data;
829 status = decode_cb_sequence4res(xdr, cb);
830 if (unlikely(status || cb->cb_seq_status))
833 return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
912 static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
914 return queue_work(callback_wq, &cb->cb_work);
1063 /* XXX: release method to ensure we set the cb channel down if
1100 static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
1102 struct nfs4_client *clp = cb->cb_clp;
1104 if (!cb->cb_holds_slot &&
1114 cb->cb_holds_slot = true;
1118 static void nfsd41_cb_release_slot(struct nfsd4_callback *cb)
1120 struct nfs4_client *clp = cb->cb_clp;
1122 if (cb->cb_holds_slot) {
1123 cb->cb_holds_slot = false;
1129 static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
1131 struct nfs4_client *clp = cb->cb_clp;
1133 nfsd41_cb_release_slot(cb);
1134 if (cb->cb_ops && cb->cb_ops->release)
1135 cb->cb_ops->release(cb);
1145 struct nfsd4_callback *cb = calldata;
1146 struct nfs4_client *clp = cb->cb_clp;
1153 cb->cb_seq_status = 1;
1154 cb->cb_status = 0;
1155 if (minorversion && !nfsd41_cb_get_slot(cb, task))
1160 static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
1162 struct nfs4_client *clp = cb->cb_clp;
1182 if (!cb->cb_holds_slot)
1185 switch (cb->cb_seq_status) {
1201 nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
1219 nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
1221 cb->cb_seq_status);
1224 nfsd41_cb_release_slot(cb);
1239 cb->cb_need_restart = true;
1246 struct nfsd4_callback *cb = calldata;
1247 struct nfs4_client *clp = cb->cb_clp;
1249 if (!nfsd4_cb_sequence_done(task, cb))
1252 if (cb->cb_status) {
1254 task->tk_status = cb->cb_status;
1257 switch (cb->cb_ops->done(cb, task)) {
1277 struct nfsd4_callback *cb = calldata;
1279 if (cb->cb_need_restart)
1280 nfsd4_queue_cb(cb);
1282 nfsd41_destroy_cb(cb);
1343 static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
1346 struct nfs4_client *clp = cb->cb_clp;
1374 memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
1395 struct nfsd4_callback *cb =
1397 struct nfs4_client *clp = cb->cb_clp;
1401 if (cb->cb_need_restart) {
1402 cb->cb_need_restart = false;
1404 if (cb->cb_ops && cb->cb_ops->prepare)
1405 cb->cb_ops->prepare(cb);
1409 nfsd4_process_cb_update(cb);
1414 nfsd41_destroy_cb(cb);
1421 if (!cb->cb_ops && clp->cl_minorversion) {
1423 nfsd41_destroy_cb(cb);
1427 cb->cb_msg.rpc_cred = clp->cl_cb_cred;
1429 rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
1430 cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
1433 void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1436 cb->cb_clp = clp;
1437 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
1438 cb->cb_msg.rpc_argp = cb;
1439 cb->cb_msg.rpc_resp = cb;
1440 cb->cb_ops = ops;
1441 INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
1442 cb->cb_seq_status = 1;
1443 cb->cb_status = 0;
1444 cb->cb_need_restart = false;
1445 cb->cb_holds_slot = false;
1450 * @cb: callback to queue
1455 bool nfsd4_run_cb(struct nfsd4_callback *cb)
1457 struct nfs4_client *clp = cb->cb_clp;
1461 queued = nfsd4_queue_cb(cb);