Lines Matching refs:xprt
19 #include <linux/sunrpc/xprt.h>
30 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
34 static void svc_delete_xprt(struct svc_xprt *xprt);
56 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
154 * @xprt: transport instance
159 void svc_xprt_deferred_close(struct svc_xprt *xprt)
161 if (!test_and_set_bit(XPT_CLOSE, &xprt->xpt_flags))
162 svc_xprt_enqueue(xprt);
168 struct svc_xprt *xprt =
170 struct module *owner = xprt->xpt_class->xcl_owner;
171 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
172 svcauth_unix_info_release(xprt);
173 put_cred(xprt->xpt_cred);
174 put_net_track(xprt->xpt_net, &xprt->ns_tracker);
176 if (xprt->xpt_bc_xprt)
177 xprt_put(xprt->xpt_bc_xprt);
178 if (xprt->xpt_bc_xps)
179 xprt_switch_put(xprt->xpt_bc_xps);
180 trace_svc_xprt_free(xprt);
181 xprt->xpt_ops->xpo_free(xprt);
185 void svc_xprt_put(struct svc_xprt *xprt)
187 kref_put(&xprt->xpt_ref, svc_xprt_free);
196 struct svc_xprt *xprt, struct svc_serv *serv)
198 memset(xprt, 0, sizeof(*xprt));
199 xprt->xpt_class = xcl;
200 xprt->xpt_ops = xcl->xcl_ops;
201 kref_init(&xprt->xpt_ref);
202 xprt->xpt_server = serv;
203 INIT_LIST_HEAD(&xprt->xpt_list);
204 INIT_LIST_HEAD(&xprt->xpt_ready);
205 INIT_LIST_HEAD(&xprt->xpt_deferred);
206 INIT_LIST_HEAD(&xprt->xpt_users);
207 mutex_init(&xprt->xpt_mutex);
208 spin_lock_init(&xprt->xpt_lock);
209 set_bit(XPT_BUSY, &xprt->xpt_flags);
210 xprt->xpt_net = get_net_track(net, &xprt->ns_tracker, GFP_ATOMIC);
211 strcpy(xprt->xpt_remotebuf, "uninitialized");
234 struct svc_xprt *xprt;
253 xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
254 if (IS_ERR(xprt))
256 xcl->xcl_name, sap, len, xprt);
257 return xprt;
262 * @xprt: controlling transport
270 void svc_xprt_received(struct svc_xprt *xprt)
272 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
273 WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
277 /* As soon as we clear busy, the xprt could be closed and
280 svc_xprt_get(xprt);
282 clear_bit(XPT_BUSY, &xprt->xpt_flags);
283 svc_xprt_enqueue(xprt);
284 svc_xprt_put(xprt);
364 * Copy the local and remote xprt addresses to the rqstp structure
366 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
368 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
369 rqstp->rq_addrlen = xprt->xpt_remotelen;
375 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
376 rqstp->rq_daddrlen = xprt->xpt_locallen;
393 static bool svc_xprt_slots_in_range(struct svc_xprt *xprt)
396 int nrqsts = atomic_read(&xprt->xpt_nr_rqsts);
401 static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt)
404 if (!svc_xprt_slots_in_range(xprt))
406 atomic_inc(&xprt->xpt_nr_rqsts);
414 struct svc_xprt *xprt = rqstp->rq_xprt;
416 atomic_dec(&xprt->xpt_nr_rqsts);
418 svc_xprt_enqueue(xprt);
422 static bool svc_xprt_ready(struct svc_xprt *xprt)
435 xpt_flags = READ_ONCE(xprt->xpt_flags);
437 trace_svc_xprt_enqueue(xprt, xpt_flags);
443 if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
444 svc_xprt_slots_in_range(xprt))
446 trace_svc_xprt_no_write_space(xprt);
454 * @xprt: transport with data pending
457 void svc_xprt_enqueue(struct svc_xprt *xprt)
461 if (!svc_xprt_ready(xprt))
469 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
472 pool = svc_pool_for_cpu(xprt->xpt_server);
476 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
488 struct svc_xprt *xprt = NULL;
495 xprt = list_first_entry(&pool->sp_sockets,
497 list_del_init(&xprt->xpt_ready);
498 svc_xprt_get(xprt);
502 return xprt;
517 struct svc_xprt *xprt = rqstp->rq_xprt;
521 if (xprt && space < rqstp->rq_reserved) {
522 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
525 svc_xprt_enqueue(xprt);
530 static void free_deferred(struct svc_xprt *xprt, struct svc_deferred_req *dr)
535 xprt->xpt_ops->xpo_release_ctxt(xprt, dr->xprt_ctxt);
541 struct svc_xprt *xprt = rqstp->rq_xprt;
543 xprt->xpt_ops->xpo_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
546 free_deferred(xprt, rqstp->rq_deferred);
567 svc_xprt_put(xprt);
574 * Some svc_serv's will have occasional work to do, even when a xprt is not
627 struct svc_xprt *xprt = NULL;
639 xprt = list_entry(serv->sv_tempsocks.prev,
642 set_bit(XPT_CLOSE, &xprt->xpt_flags);
643 svc_xprt_get(xprt);
647 if (xprt) {
648 svc_xprt_enqueue(xprt);
649 svc_xprt_put(xprt);
787 static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
792 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
793 if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
794 xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
795 svc_delete_xprt(xprt);
796 /* Leave XPT_BUSY set on the dead xprt: */
799 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
805 __module_get(xprt->xpt_class->xcl_owner);
806 svc_check_conn_limits(xprt->xpt_server);
807 newxpt = xprt->xpt_ops->xpo_accept(xprt);
809 newxpt->xpt_cred = get_cred(xprt->xpt_cred);
813 module_put(xprt->xpt_class->xcl_owner);
815 svc_xprt_received(xprt);
816 } else if (test_bit(XPT_HANDSHAKE, &xprt->xpt_flags)) {
817 xprt->xpt_ops->xpo_handshake(xprt);
818 svc_xprt_received(xprt);
819 } else if (svc_xprt_reserve_slot(rqstp, xprt)) {
821 rqstp->rq_deferred = svc_deferred_dequeue(xprt);
825 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
827 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
829 svc_xprt_received(xprt);
845 struct svc_xprt *xprt = NULL;
857 xprt = svc_get_next_xprt(rqstp);
858 if (!xprt)
861 len = svc_handle_xprt(rqstp, xprt);
869 clear_bit(XPT_OLD, &xprt->xpt_flags);
903 struct svc_xprt *xprt;
907 xprt = rqstp->rq_xprt;
908 if (!xprt)
919 status = xprt->xpt_ops->xpo_sendto(rqstp);
932 struct svc_xprt *xprt;
945 xprt = list_entry(le, struct svc_xprt, xpt_list);
949 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
951 if (kref_read(&xprt->xpt_ref) > 1 ||
952 test_bit(XPT_BUSY, &xprt->xpt_flags))
955 set_bit(XPT_CLOSE, &xprt->xpt_flags);
956 dprintk("queuing xprt %p for closing\n", xprt);
959 svc_xprt_enqueue(xprt);
974 struct svc_xprt *xprt;
980 xprt = list_entry(le, struct svc_xprt, xpt_list);
982 &xprt->xpt_local)) {
983 dprintk("svc_age_temp_xprts_now: found %p\n", xprt);
992 xprt = list_entry(le, struct svc_xprt, xpt_list);
993 set_bit(XPT_CLOSE, &xprt->xpt_flags);
994 set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
995 dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
996 xprt);
997 svc_xprt_enqueue(xprt);
1002 static void call_xpt_users(struct svc_xprt *xprt)
1006 spin_lock(&xprt->xpt_lock);
1007 while (!list_empty(&xprt->xpt_users)) {
1008 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
1012 spin_unlock(&xprt->xpt_lock);
1018 static void svc_delete_xprt(struct svc_xprt *xprt)
1020 struct svc_serv *serv = xprt->xpt_server;
1023 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
1026 trace_svc_xprt_detach(xprt);
1027 xprt->xpt_ops->xpo_detach(xprt);
1028 if (xprt->xpt_bc_xprt)
1029 xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt);
1032 list_del_init(&xprt->xpt_list);
1033 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
1034 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
1038 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
1039 free_deferred(xprt, dr);
1041 call_xpt_users(xprt);
1042 svc_xprt_put(xprt);
1047 * @xprt: transport to disconnect
1050 void svc_xprt_close(struct svc_xprt *xprt)
1052 trace_svc_xprt_close(xprt);
1053 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1054 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
1063 svc_delete_xprt(xprt);
1069 struct svc_xprt *xprt;
1073 list_for_each_entry(xprt, xprt_list, xpt_list) {
1074 if (xprt->xpt_net != net)
1077 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1078 svc_xprt_enqueue(xprt);
1087 struct svc_xprt *xprt;
1095 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
1096 if (xprt->xpt_net != net)
1098 list_del_init(&xprt->xpt_ready);
1100 return xprt;
1109 struct svc_xprt *xprt;
1111 while ((xprt = svc_dequeue_net(serv, net))) {
1112 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1113 svc_delete_xprt(xprt);
1154 struct svc_xprt *xprt = dr->xprt;
1156 spin_lock(&xprt->xpt_lock);
1157 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
1158 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
1159 spin_unlock(&xprt->xpt_lock);
1161 free_deferred(xprt, dr);
1162 svc_xprt_put(xprt);
1165 dr->xprt = NULL;
1166 list_add(&dr->handle.recent, &xprt->xpt_deferred);
1167 spin_unlock(&xprt->xpt_lock);
1169 svc_xprt_enqueue(xprt);
1170 svc_xprt_put(xprt);
1177 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
1179 * This code can only handle requests that consist of an xprt-header
1217 dr->xprt = rqstp->rq_xprt;
1254 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1258 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1260 spin_lock(&xprt->xpt_lock);
1261 if (!list_empty(&xprt->xpt_deferred)) {
1262 dr = list_entry(xprt->xpt_deferred.next,
1267 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1268 spin_unlock(&xprt->xpt_lock);
1292 struct svc_xprt *xprt;
1300 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1301 if (xprt->xpt_net != net)
1303 if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1305 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1307 if (port != 0 && port != svc_xprt_local_port(xprt))
1309 found = xprt;
1310 svc_xprt_get(xprt);
1318 static int svc_one_xprt_name(const struct svc_xprt *xprt,
1324 xprt->xpt_class->xcl_name,
1325 svc_xprt_local_port(xprt));
1345 struct svc_xprt *xprt;
1357 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1358 len = svc_one_xprt_name(xprt, pos, buflen - totlen);