Lines Matching refs:rqstp

565 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
570 if (svc_is_backchannel(rqstp))
584 rqstp->rq_pages[arghi++] = p;
594 svc_release_buffer(struct svc_rqst *rqstp)
598 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
599 if (rqstp->rq_pages[i])
600 put_page(rqstp->rq_pages[i]);
606 struct svc_rqst *rqstp;
608 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
609 if (!rqstp)
610 return rqstp;
612 __set_bit(RQ_BUSY, &rqstp->rq_flags);
613 spin_lock_init(&rqstp->rq_lock);
614 rqstp->rq_server = serv;
615 rqstp->rq_pool = pool;
617 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
618 if (!rqstp->rq_argp)
621 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
622 if (!rqstp->rq_resp)
625 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
628 return rqstp;
630 svc_rqst_free(rqstp);
638 struct svc_rqst *rqstp;
640 rqstp = svc_rqst_alloc(serv, pool, node);
641 if (!rqstp)
647 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
649 return rqstp;
690 struct svc_rqst *rqstp;
696 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
697 set_bit(RQ_VICTIM, &rqstp->rq_flags);
698 list_del_rcu(&rqstp->rq_all);
699 task = rqstp->rq_task;
710 struct svc_rqst *rqstp;
721 rqstp = svc_prepare_thread(serv, chosen_pool, node);
722 if (IS_ERR(rqstp))
723 return PTR_ERR(rqstp);
726 task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
730 svc_exit_thread(rqstp);
734 rqstp->rq_task = task;
773 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
803 struct svc_rqst *rqstp;
812 rqstp = kthread_data(task);
815 svc_exit_thread(rqstp);
846 svc_rqst_free(struct svc_rqst *rqstp)
848 svc_release_buffer(rqstp);
849 kfree(rqstp->rq_resp);
850 kfree(rqstp->rq_argp);
851 kfree(rqstp->rq_auth_data);
852 kfree_rcu(rqstp, rq_rcu_head);
857 svc_exit_thread(struct svc_rqst *rqstp)
859 struct svc_serv *serv = rqstp->rq_server;
860 struct svc_pool *pool = rqstp->rq_pool;
864 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
865 list_del_rcu(&rqstp->rq_all);
868 svc_rqst_free(rqstp);
1145 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1156 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1161 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1165 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err)
1167 set_bit(RQ_AUTHERR, &rqstp->rq_flags);
1173 svc_get_autherr(struct svc_rqst *rqstp, __be32 *statp)
1175 if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags))
1181 svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
1183 struct kvec *argv = &rqstp->rq_arg.head[0];
1184 struct kvec *resv = &rqstp->rq_res.head[0];
1185 const struct svc_procedure *procp = rqstp->rq_procinfo;
1192 !procp->pc_decode(rqstp, argv->iov_base)) {
1197 *statp = procp->pc_func(rqstp);
1200 test_bit(RQ_DROPME, &rqstp->rq_flags))
1203 if (test_bit(RQ_AUTHERR, &rqstp->rq_flags))
1211 !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
1220 svc_generic_init_request(struct svc_rqst *rqstp,
1227 if (rqstp->rq_vers >= progp->pg_nvers )
1229 versp = progp->pg_vers[rqstp->rq_vers];
1244 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1245 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1248 if (rqstp->rq_proc >= versp->vs_nproc)
1250 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1255 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1256 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1259 versp->vs_count[rqstp->rq_proc]++;
1276 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1280 struct svc_serv *serv = rqstp->rq_server;
1294 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
1296 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1297 clear_bit(RQ_DROPME, &rqstp->rq_flags);
1299 svc_putu32(resv, rqstp->rq_xid);
1314 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
1315 rqstp->rq_vers = svc_getnl(argv); /* version number */
1316 rqstp->rq_proc = svc_getnl(argv); /* procedure number */
1327 auth_res = svc_authenticate(rqstp, &auth_stat);
1331 auth_res = progp->pg_authenticate(rqstp);
1334 trace_svc_authenticate(rqstp, auth_res, auth_stat);
1356 rpc_stat = progp->pg_init_request(rqstp, progp, &process);
1368 procp = rqstp->rq_procinfo;
1375 trace_svc_process(rqstp, progp->pg_name);
1385 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1389 if (!svc_generic_dispatch(rqstp, statp))
1393 auth_stat = svc_get_autherr(rqstp, statp);
1398 if (!process.dispatch(rqstp, statp))
1408 procp->pc_release(rqstp);
1414 if (svc_authorise(rqstp))
1420 procp->pc_release(rqstp);
1422 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1427 svc_authorise(rqstp);
1429 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1430 svc_close_xprt(rqstp->rq_xprt);
1435 svc_printk(rqstp, "short len %zd, dropping request\n",
1449 procp->pc_release(rqstp);
1454 xdr_ressize_check(rqstp, reply_statp);
1467 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1468 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1477 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1484 svc_printk(rqstp, "failed to decode args\n");
1497 svc_process(struct svc_rqst *rqstp)
1499 struct kvec *argv = &rqstp->rq_arg.head[0];
1500 struct kvec *resv = &rqstp->rq_res.head[0];
1501 struct svc_serv *serv = rqstp->rq_server;
1508 rqstp->rq_next_page = &rqstp->rq_respages[1];
1509 resv->iov_base = page_address(rqstp->rq_respages[0]);
1511 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1512 rqstp->rq_res.len = 0;
1513 rqstp->rq_res.page_base = 0;
1514 rqstp->rq_res.page_len = 0;
1515 rqstp->rq_res.buflen = PAGE_SIZE;
1516 rqstp->rq_res.tail[0].iov_base = NULL;
1517 rqstp->rq_res.tail[0].iov_len = 0;
1522 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1528 if (likely(svc_process_common(rqstp, argv, resv)))
1529 return svc_send(rqstp);
1532 svc_drop(rqstp);
1544 struct svc_rqst *rqstp)
1546 struct kvec *argv = &rqstp->rq_arg.head[0];
1547 struct kvec *resv = &rqstp->rq_res.head[0];
1555 rqstp->rq_xid = req->rq_xid;
1556 rqstp->rq_prot = req->rq_xprt->prot;
1557 rqstp->rq_server = serv;
1558 rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1560 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1561 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1562 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1563 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1566 rqstp->rq_arg.len = req->rq_private_buf.len;
1567 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1568 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1569 rqstp->rq_arg.page_len = 0;
1570 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1571 rqstp->rq_arg.page_len)
1572 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1573 rqstp->rq_arg.head[0].iov_len;
1575 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1576 rqstp->rq_arg.page_len;
1589 proc_error = svc_process_common(rqstp, argv, resv);
1599 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1620 u32 svc_max_payload(const struct svc_rqst *rqstp)
1622 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1624 if (rqstp->rq_server->sv_max_payload < max)
1625 max = rqstp->rq_server->sv_max_payload;
1632 * @rqstp: svc_rqst to operate on
1633 * @offset: payload's byte offset in rqstp->rq_res
1639 int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset,
1642 return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length);
1648 * @rqstp: svc_rqst to operate on
1653 * Fills in rqstp::rq_vec, and returns the number of elements.
1655 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages,
1658 struct kvec *vec = rqstp->rq_vec;
1680 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
1687 * @rqstp: svc_rqst to operate on
1696 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,