Lines Matching refs:xprt

3  *  linux/net/sunrpc/xprt.c
70 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void xprt_destroy(struct rpc_xprt *xprt);
216 static void xprt_clear_locked(struct rpc_xprt *xprt)
218 xprt->snd_task = NULL;
219 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
221 clear_bit(XPRT_LOCKED, &xprt->state);
224 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
230 * @xprt: pointer to the target transport
236 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
240 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
241 if (task == xprt->snd_task)
245 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
247 xprt->snd_task = task;
250 trace_xprt_reserve_xprt(xprt, task);
254 xprt_clear_locked(xprt);
258 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
261 rpc_sleep_on(&xprt->sending, task, NULL);
267 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
269 return test_bit(XPRT_CWND_WAIT, &xprt->state);
273 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
275 if (!list_empty(&xprt->xmit_queue)) {
277 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
281 set_bit(XPRT_CWND_WAIT, &xprt->state);
285 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
287 if (!RPCXPRT_CONGESTED(xprt))
288 clear_bit(XPRT_CWND_WAIT, &xprt->state);
300 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
304 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
305 if (task == xprt->snd_task)
310 xprt->snd_task = task;
313 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
315 if (!xprt_need_congestion_window_wait(xprt)) {
316 xprt->snd_task = task;
320 xprt_clear_locked(xprt);
324 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
327 rpc_sleep_on(&xprt->sending, task, NULL);
330 trace_xprt_reserve_cong(xprt, task);
335 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
339 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
341 spin_lock(&xprt->transport_lock);
342 retval = xprt->ops->reserve_xprt(xprt, task);
343 spin_unlock(&xprt->transport_lock);
349 struct rpc_xprt *xprt = data;
351 xprt->snd_task = task;
355 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
357 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
359 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
361 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
362 __xprt_lock_write_func, xprt))
365 xprt_clear_locked(xprt);
368 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
370 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
372 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
374 if (xprt_need_congestion_window_wait(xprt))
376 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
377 __xprt_lock_write_func, xprt))
380 xprt_clear_locked(xprt);
385 * @xprt: transport with other tasks potentially waiting
390 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
392 if (xprt->snd_task == task) {
393 xprt_clear_locked(xprt);
394 __xprt_lock_write_next(xprt);
396 trace_xprt_release_xprt(xprt, task);
402 * @xprt: transport with other tasks potentially waiting
408 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
410 if (xprt->snd_task == task) {
411 xprt_clear_locked(xprt);
412 __xprt_lock_write_next_cong(xprt);
414 trace_xprt_release_cong(xprt, task);
418 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
420 if (xprt->snd_task != task)
422 spin_lock(&xprt->transport_lock);
423 xprt->ops->release_xprt(xprt, task);
424 spin_unlock(&xprt->transport_lock);
432 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
436 trace_xprt_get_cong(xprt, req->rq_task);
437 if (RPCXPRT_CONGESTED(xprt)) {
438 xprt_set_congestion_window_wait(xprt);
442 xprt->cong += RPC_CWNDSCALE;
451 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
456 xprt->cong -= RPC_CWNDSCALE;
457 xprt_test_and_clear_congestion_window_wait(xprt);
458 trace_xprt_put_cong(xprt, req->rq_task);
459 __xprt_lock_write_next_cong(xprt);
464 * @xprt: pointer to transport
470 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
476 spin_lock(&xprt->transport_lock);
477 ret = __xprt_get_cong(xprt, req) != 0;
478 spin_unlock(&xprt->transport_lock);
497 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
499 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
500 __xprt_lock_write_next_cong(xprt);
505 * entry on xprt->sending
508 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
510 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
511 spin_lock(&xprt->transport_lock);
512 __xprt_lock_write_next_cong(xprt);
513 spin_unlock(&xprt->transport_lock);
519 * @xprt: pointer to xprt
533 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
536 unsigned long cwnd = xprt->cwnd;
538 if (result >= 0 && cwnd <= xprt->cong) {
542 if (cwnd > RPC_MAXCWND(xprt))
543 cwnd = RPC_MAXCWND(xprt);
544 __xprt_lock_write_next_cong(xprt);
551 xprt->cong, xprt->cwnd, cwnd);
552 xprt->cwnd = cwnd;
553 __xprt_put_cong(xprt, req);
559 * @xprt: transport with waiting tasks
563 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
566 rpc_wake_up_status(&xprt->pending, status);
568 rpc_wake_up(&xprt->pending);
574 * @xprt: transport
580 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
582 set_bit(XPRT_WRITE_SPACE, &xprt->state);
587 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
589 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
590 __xprt_lock_write_next(xprt);
592 "xprt %p\n", xprt);
600 * @xprt: transport with waiting tasks
604 bool xprt_write_space(struct rpc_xprt *xprt)
608 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
610 spin_lock(&xprt->transport_lock);
611 ret = xprt_clear_write_space_locked(xprt);
612 spin_unlock(&xprt->transport_lock);
652 struct rpc_xprt *xprt = req->rq_xprt;
654 if (likely(xprt && xprt_connected(xprt)))
670 struct rpc_xprt *xprt = req->rq_xprt;
689 spin_lock(&xprt->transport_lock);
691 spin_unlock(&xprt->transport_lock);
705 struct rpc_xprt *xprt =
709 trace_xprt_disconnect_auto(xprt);
710 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
711 xprt->ops->close(xprt);
712 xprt_release_write(xprt, NULL);
713 wake_up_bit(&xprt->state, XPRT_LOCKED);
719 * @xprt: transport to flag for disconnect
722 void xprt_disconnect_done(struct rpc_xprt *xprt)
724 trace_xprt_disconnect_done(xprt);
725 spin_lock(&xprt->transport_lock);
726 xprt_clear_connected(xprt);
727 xprt_clear_write_space_locked(xprt);
728 xprt_clear_congestion_window_wait_locked(xprt);
729 xprt_wake_pending_tasks(xprt, -ENOTCONN);
730 spin_unlock(&xprt->transport_lock);
736 * @xprt: transport to disconnect
738 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
740 if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
742 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
743 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
744 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
745 rpc_wake_up_queued_task_set_status(&xprt->pending,
746 xprt->snd_task, -ENOTCONN);
751 * @xprt: transport to disconnect
754 void xprt_force_disconnect(struct rpc_xprt *xprt)
756 trace_xprt_disconnect_force(xprt);
759 spin_lock(&xprt->transport_lock);
760 xprt_schedule_autoclose_locked(xprt);
761 spin_unlock(&xprt->transport_lock);
766 xprt_connect_cookie(struct rpc_xprt *xprt)
768 return READ_ONCE(xprt->connect_cookie);
775 struct rpc_xprt *xprt = req->rq_xprt;
777 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
778 !xprt_connected(xprt);
783 * @xprt: transport to disconnect
792 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
795 spin_lock(&xprt->transport_lock);
796 if (cookie != xprt->connect_cookie)
798 if (test_bit(XPRT_CLOSING, &xprt->state))
800 xprt_schedule_autoclose_locked(xprt);
802 spin_unlock(&xprt->transport_lock);
806 xprt_has_timer(const struct rpc_xprt *xprt)
808 return xprt->idle_timeout != 0;
812 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
813 __must_hold(&xprt->transport_lock)
815 xprt->last_used = jiffies;
816 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
817 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
823 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
825 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
827 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
828 xprt->last_used = jiffies;
829 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
831 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
834 bool xprt_lock_connect(struct rpc_xprt *xprt,
840 spin_lock(&xprt->transport_lock);
841 if (!test_bit(XPRT_LOCKED, &xprt->state))
843 if (xprt->snd_task != task)
845 set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
846 xprt->snd_task = cookie;
849 spin_unlock(&xprt->transport_lock);
854 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
856 spin_lock(&xprt->transport_lock);
857 if (xprt->snd_task != cookie)
859 if (!test_bit(XPRT_LOCKED, &xprt->state))
861 xprt->snd_task =NULL;
862 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
863 xprt->ops->release_xprt(xprt, NULL);
864 xprt_schedule_autodisconnect(xprt);
866 spin_unlock(&xprt->transport_lock);
867 wake_up_bit(&xprt->state, XPRT_LOCKED);
878 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
880 trace_xprt_connect(xprt);
882 if (!xprt_bound(xprt)) {
886 if (!xprt_lock_write(xprt, task))
889 if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
890 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
891 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
894 if (test_bit(XPRT_CLOSING, &xprt->state))
896 if (xprt_test_and_set_connecting(xprt))
899 if (!xprt_connected(xprt)) {
900 xprt->stat.connect_start = jiffies;
901 xprt->ops->connect(xprt, task);
903 xprt_clear_connecting(xprt);
905 rpc_wake_up_queued_task(&xprt->pending, task);
908 xprt_release_write(xprt, task);
913 * @xprt: transport instance
916 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
920 start = xprt->stat.connect_start + xprt->reestablish_timeout;
929 * @xprt: transport instance
933 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
935 xprt->reestablish_timeout <<= 1;
936 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
937 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
938 if (xprt->reestablish_timeout < init_to)
939 xprt->reestablish_timeout = init_to;
959 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
961 struct rb_node *n = xprt->recv_queue.rb_node;
981 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
983 struct rb_node **p = &xprt->recv_queue.rb_node;
1003 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1007 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1009 rb_erase(&req->rq_recv, &xprt->recv_queue);
1014 * @xprt: transport on which the original request was transmitted
1017 * Caller holds xprt->queue_lock.
1019 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1023 entry = xprt_request_rb_find(xprt, xid);
1025 trace_xprt_lookup_rqst(xprt, xid, 0);
1032 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1033 xprt->stat.bad_xids++;
1049 * so should be holding xprt->queue_lock.
1061 * Caller should be holding xprt->queue_lock.
1102 struct rpc_xprt *xprt = req->rq_xprt;
1108 spin_lock(&xprt->queue_lock);
1115 xprt_request_rb_insert(xprt, req);
1117 spin_unlock(&xprt->queue_lock);
1120 del_singleshot_timer_sync(&xprt->timer);
1127 * Caller must hold xprt->queue_lock.
1142 * Caller holds xprt->queue_lock.
1164 * Caller holds xprt->queue_lock.
1169 struct rpc_xprt *xprt = req->rq_xprt;
1171 xprt->stat.recvs++;
1179 rpc_wake_up_queued_task(&xprt->pending, task);
1186 struct rpc_xprt *xprt = req->rq_xprt;
1191 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1193 if (xprt->ops->timer)
1194 xprt->ops->timer(xprt, task);
1250 struct rpc_xprt *xprt = req->rq_xprt;
1259 spin_lock(&xprt->queue_lock);
1261 xprt->ops->wait_for_reply_request(task);
1268 rpc_wake_up_queued_task_set_status(&xprt->pending,
1271 spin_unlock(&xprt->queue_lock);
1290 struct rpc_xprt *xprt = req->rq_xprt;
1294 spin_lock(&xprt->queue_lock);
1300 xprt_clear_congestion_window_wait(xprt);
1301 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1310 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1318 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1322 spin_unlock(&xprt->queue_lock);
1331 * Caller must hold xprt->queue_lock
1362 struct rpc_xprt *xprt = req->rq_xprt;
1364 spin_lock(&xprt->queue_lock);
1366 spin_unlock(&xprt->queue_lock);
1380 struct rpc_xprt *xprt = req->rq_xprt;
1385 spin_lock(&xprt->queue_lock);
1390 spin_unlock(&xprt->queue_lock);
1392 spin_lock(&xprt->queue_lock);
1395 spin_unlock(&xprt->queue_lock);
1409 struct rpc_xprt *xprt = req->rq_xprt;
1411 if (xprt->ops->prepare_request)
1412 xprt->ops->prepare_request(req);
1435 struct rpc_xprt *xprt = req->rq_xprt;
1437 if (!xprt_lock_write(xprt, task)) {
1440 rpc_wake_up_queued_task_set_status(&xprt->sending,
1450 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1452 xprt_inject_disconnect(xprt);
1453 xprt_release_write(xprt, task);
1469 struct rpc_xprt *xprt = req->rq_xprt;
1499 connect_cookie = xprt->connect_cookie;
1500 status = xprt->ops->send_request(req);
1510 xprt_inject_disconnect(xprt);
1513 spin_lock(&xprt->transport_lock);
1515 xprt->stat.sends++;
1516 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1517 xprt->stat.bklog_u += xprt->backlog.qlen;
1518 xprt->stat.sending_u += xprt->sending.qlen;
1519 xprt->stat.pending_u += xprt->pending.qlen;
1520 spin_unlock(&xprt->transport_lock);
1526 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1543 struct rpc_xprt *xprt = req->rq_xprt;
1546 spin_lock(&xprt->queue_lock);
1548 next = list_first_entry_or_null(&xprt->xmit_queue,
1553 spin_unlock(&xprt->queue_lock);
1557 spin_lock(&xprt->queue_lock);
1568 cond_resched_lock(&xprt->queue_lock);
1570 spin_unlock(&xprt->queue_lock);
1579 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1581 set_bit(XPRT_CONGESTED, &xprt->state);
1582 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1598 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1600 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1601 clear_bit(XPRT_CONGESTED, &xprt->state);
1608 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1612 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1614 spin_lock(&xprt->reserve_lock);
1615 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1616 xprt_add_backlog(xprt, task);
1619 spin_unlock(&xprt->reserve_lock);
1624 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1629 if (xprt->num_reqs >= xprt->max_reqs)
1631 ++xprt->num_reqs;
1632 spin_unlock(&xprt->reserve_lock);
1636 spin_lock(&xprt->reserve_lock);
1639 --xprt->num_reqs;
1645 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1647 if (xprt->num_reqs > xprt->min_reqs) {
1648 --xprt->num_reqs;
1655 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1659 spin_lock(&xprt->reserve_lock);
1660 if (!list_empty(&xprt->free)) {
1661 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1665 req = xprt_dynamic_alloc_slot(xprt);
1675 xprt_add_backlog(xprt, task);
1681 spin_unlock(&xprt->reserve_lock);
1684 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1685 xprt->num_reqs);
1686 spin_unlock(&xprt->reserve_lock);
1693 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1695 spin_lock(&xprt->reserve_lock);
1696 if (!xprt_wake_up_backlog(xprt, req) &&
1697 !xprt_dynamic_free_slot(xprt, req)) {
1699 list_add(&req->rq_list, &xprt->free);
1701 spin_unlock(&xprt->reserve_lock);
1705 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1708 while (!list_empty(&xprt->free)) {
1709 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1719 struct rpc_xprt *xprt;
1723 xprt = kzalloc(size, GFP_KERNEL);
1724 if (xprt == NULL)
1727 xprt_init(xprt, net);
1733 list_add(&req->rq_list, &xprt->free);
1736 xprt->max_reqs = max_alloc;
1738 xprt->max_reqs = num_prealloc;
1739 xprt->min_reqs = num_prealloc;
1740 xprt->num_reqs = num_prealloc;
1742 return xprt;
1745 xprt_free(xprt);
1751 void xprt_free(struct rpc_xprt *xprt)
1753 put_net(xprt->xprt_net);
1754 xprt_free_all_slots(xprt);
1755 kfree_rcu(xprt, rcu);
1760 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1762 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1766 xprt_alloc_xid(struct rpc_xprt *xprt)
1770 spin_lock(&xprt->reserve_lock);
1771 xid = (__force __be32)xprt->xid++;
1772 spin_unlock(&xprt->reserve_lock);
1777 xprt_init_xid(struct rpc_xprt *xprt)
1779 xprt->xid = prandom_u32();
1785 struct rpc_xprt *xprt = task->tk_xprt;
1789 req->rq_xprt = xprt;
1791 req->rq_xid = xprt_alloc_xid(xprt);
1792 xprt_init_connect_cookie(req, xprt);
1806 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1808 xprt->ops->alloc_slot(xprt, task);
1823 struct rpc_xprt *xprt = task->tk_xprt;
1830 if (!xprt_throttle_congested(xprt, task))
1831 xprt_do_reserve(xprt, task);
1845 struct rpc_xprt *xprt = task->tk_xprt;
1852 xprt_do_reserve(xprt, task);
1862 struct rpc_xprt *xprt;
1867 xprt = task->tk_xprt;
1868 xprt_release_write(xprt, task);
1873 xprt = req->rq_xprt;
1875 spin_lock(&xprt->transport_lock);
1876 xprt->ops->release_xprt(xprt, task);
1877 if (xprt->ops->release_request)
1878 xprt->ops->release_request(task);
1879 xprt_schedule_autodisconnect(xprt);
1880 spin_unlock(&xprt->transport_lock);
1882 xprt->ops->buf_free(task);
1892 xprt->ops->free_slot(xprt, req);
1915 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1917 kref_init(&xprt->kref);
1919 spin_lock_init(&xprt->transport_lock);
1920 spin_lock_init(&xprt->reserve_lock);
1921 spin_lock_init(&xprt->queue_lock);
1923 INIT_LIST_HEAD(&xprt->free);
1924 xprt->recv_queue = RB_ROOT;
1925 INIT_LIST_HEAD(&xprt->xmit_queue);
1927 spin_lock_init(&xprt->bc_pa_lock);
1928 INIT_LIST_HEAD(&xprt->bc_pa_list);
1930 INIT_LIST_HEAD(&xprt->xprt_switch);
1932 xprt->last_used = jiffies;
1933 xprt->cwnd = RPC_INITCWND;
1934 xprt->bind_index = 0;
1936 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1937 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1938 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1939 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1941 xprt_init_xid(xprt);
1943 xprt->xprt_net = get_net(net);
1953 struct rpc_xprt *xprt;
1968 xprt = t->setup(args);
1969 if (IS_ERR(xprt))
1972 xprt->idle_timeout = 0;
1973 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1974 if (xprt_has_timer(xprt))
1975 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1977 timer_setup(&xprt->timer, NULL, 0);
1980 xprt_destroy(xprt);
1983 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1984 if (xprt->servername == NULL) {
1985 xprt_destroy(xprt);
1989 rpc_xprt_debugfs_register(xprt);
1991 trace_xprt_create(xprt);
1993 return xprt;
1998 struct rpc_xprt *xprt =
2001 trace_xprt_destroy(xprt);
2003 rpc_xprt_debugfs_unregister(xprt);
2004 rpc_destroy_wait_queue(&xprt->binding);
2005 rpc_destroy_wait_queue(&xprt->pending);
2006 rpc_destroy_wait_queue(&xprt->sending);
2007 rpc_destroy_wait_queue(&xprt->backlog);
2008 kfree(xprt->servername);
2012 xprt_destroy_backchannel(xprt, UINT_MAX);
2017 xprt->ops->destroy(xprt);
2022 * @xprt: transport to destroy
2025 static void xprt_destroy(struct rpc_xprt *xprt)
2030 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2037 spin_lock(&xprt->transport_lock);
2038 del_timer_sync(&xprt->timer);
2039 spin_unlock(&xprt->transport_lock);
2045 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2046 schedule_work(&xprt->task_cleanup);
2056 * @xprt: pointer to the transport
2059 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2061 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2062 return xprt;
2069 * @xprt: pointer to the transport
2072 void xprt_put(struct rpc_xprt *xprt)
2074 if (xprt != NULL)
2075 kref_put(&xprt->kref, xprt_destroy_kref);