Lines Matching refs:xprt

3  *  linux/net/sunrpc/xprt.c
72 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
73 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
74 static void xprt_destroy(struct rpc_xprt *xprt);
247 static void xprt_clear_locked(struct rpc_xprt *xprt)
249 xprt->snd_task = NULL;
250 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state))
251 clear_bit_unlock(XPRT_LOCKED, &xprt->state);
253 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
259 * @xprt: pointer to the target transport
265 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
269 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
270 if (task == xprt->snd_task)
274 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
276 xprt->snd_task = task;
279 trace_xprt_reserve_xprt(xprt, task);
283 xprt_clear_locked(xprt);
287 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
290 rpc_sleep_on(&xprt->sending, task, NULL);
296 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
298 return test_bit(XPRT_CWND_WAIT, &xprt->state);
302 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
304 if (!list_empty(&xprt->xmit_queue)) {
306 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
310 set_bit(XPRT_CWND_WAIT, &xprt->state);
314 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
316 if (!RPCXPRT_CONGESTED(xprt))
317 clear_bit(XPRT_CWND_WAIT, &xprt->state);
329 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
333 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
334 if (task == xprt->snd_task)
339 xprt->snd_task = task;
342 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
344 if (!xprt_need_congestion_window_wait(xprt)) {
345 xprt->snd_task = task;
349 xprt_clear_locked(xprt);
353 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
356 rpc_sleep_on(&xprt->sending, task, NULL);
359 trace_xprt_reserve_cong(xprt, task);
364 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
368 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
370 spin_lock(&xprt->transport_lock);
371 retval = xprt->ops->reserve_xprt(xprt, task);
372 spin_unlock(&xprt->transport_lock);
378 struct rpc_xprt *xprt = data;
380 xprt->snd_task = task;
384 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
386 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
388 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
390 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
391 __xprt_lock_write_func, xprt))
394 xprt_clear_locked(xprt);
397 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
399 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
401 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
403 if (xprt_need_congestion_window_wait(xprt))
405 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
406 __xprt_lock_write_func, xprt))
409 xprt_clear_locked(xprt);
414 * @xprt: transport with other tasks potentially waiting
419 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
421 if (xprt->snd_task == task) {
422 xprt_clear_locked(xprt);
423 __xprt_lock_write_next(xprt);
425 trace_xprt_release_xprt(xprt, task);
431 * @xprt: transport with other tasks potentially waiting
437 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
439 if (xprt->snd_task == task) {
440 xprt_clear_locked(xprt);
441 __xprt_lock_write_next_cong(xprt);
443 trace_xprt_release_cong(xprt, task);
447 void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
449 if (xprt->snd_task != task)
451 spin_lock(&xprt->transport_lock);
452 xprt->ops->release_xprt(xprt, task);
453 spin_unlock(&xprt->transport_lock);
461 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
465 trace_xprt_get_cong(xprt, req->rq_task);
466 if (RPCXPRT_CONGESTED(xprt)) {
467 xprt_set_congestion_window_wait(xprt);
471 xprt->cong += RPC_CWNDSCALE;
480 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
485 xprt->cong -= RPC_CWNDSCALE;
486 xprt_test_and_clear_congestion_window_wait(xprt);
487 trace_xprt_put_cong(xprt, req->rq_task);
488 __xprt_lock_write_next_cong(xprt);
493 * @xprt: pointer to transport
499 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
505 spin_lock(&xprt->transport_lock);
506 ret = __xprt_get_cong(xprt, req) != 0;
507 spin_unlock(&xprt->transport_lock);
526 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
528 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
529 __xprt_lock_write_next_cong(xprt);
534 * entry on xprt->sending
537 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
539 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
540 spin_lock(&xprt->transport_lock);
541 __xprt_lock_write_next_cong(xprt);
542 spin_unlock(&xprt->transport_lock);
548 * @xprt: pointer to xprt
562 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
565 unsigned long cwnd = xprt->cwnd;
567 if (result >= 0 && cwnd <= xprt->cong) {
571 if (cwnd > RPC_MAXCWND(xprt))
572 cwnd = RPC_MAXCWND(xprt);
573 __xprt_lock_write_next_cong(xprt);
580 xprt->cong, xprt->cwnd, cwnd);
581 xprt->cwnd = cwnd;
582 __xprt_put_cong(xprt, req);
588 * @xprt: transport with waiting tasks
592 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
595 rpc_wake_up_status(&xprt->pending, status);
597 rpc_wake_up(&xprt->pending);
603 * @xprt: transport
609 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
611 set_bit(XPRT_WRITE_SPACE, &xprt->state);
616 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
618 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
619 __xprt_lock_write_next(xprt);
621 "xprt %p\n", xprt);
629 * @xprt: transport with waiting tasks
633 bool xprt_write_space(struct rpc_xprt *xprt)
637 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
639 spin_lock(&xprt->transport_lock);
640 ret = xprt_clear_write_space_locked(xprt);
641 spin_unlock(&xprt->transport_lock);
681 struct rpc_xprt *xprt = req->rq_xprt;
683 if (likely(xprt && xprt_connected(xprt)))
699 struct rpc_xprt *xprt = req->rq_xprt;
718 spin_lock(&xprt->transport_lock);
720 spin_unlock(&xprt->transport_lock);
734 struct rpc_xprt *xprt =
738 trace_xprt_disconnect_auto(xprt);
739 xprt->connect_cookie++;
741 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
742 xprt->ops->close(xprt);
743 xprt_release_write(xprt, NULL);
744 wake_up_bit(&xprt->state, XPRT_LOCKED);
750 * @xprt: transport to flag for disconnect
753 void xprt_disconnect_done(struct rpc_xprt *xprt)
755 trace_xprt_disconnect_done(xprt);
756 spin_lock(&xprt->transport_lock);
757 xprt_clear_connected(xprt);
758 xprt_clear_write_space_locked(xprt);
759 xprt_clear_congestion_window_wait_locked(xprt);
760 xprt_wake_pending_tasks(xprt, -ENOTCONN);
761 spin_unlock(&xprt->transport_lock);
767 * @xprt: transport to disconnect
769 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
771 if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
773 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
774 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
775 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
776 rpc_wake_up_queued_task_set_status(&xprt->pending,
777 xprt->snd_task, -ENOTCONN);
782 * @xprt: transport to disconnect
785 void xprt_force_disconnect(struct rpc_xprt *xprt)
787 trace_xprt_disconnect_force(xprt);
790 spin_lock(&xprt->transport_lock);
791 xprt_schedule_autoclose_locked(xprt);
792 spin_unlock(&xprt->transport_lock);
797 xprt_connect_cookie(struct rpc_xprt *xprt)
799 return READ_ONCE(xprt->connect_cookie);
806 struct rpc_xprt *xprt = req->rq_xprt;
808 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
809 !xprt_connected(xprt);
814 * @xprt: transport to disconnect
823 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
826 spin_lock(&xprt->transport_lock);
827 if (cookie != xprt->connect_cookie)
829 if (test_bit(XPRT_CLOSING, &xprt->state))
831 xprt_schedule_autoclose_locked(xprt);
833 spin_unlock(&xprt->transport_lock);
837 xprt_has_timer(const struct rpc_xprt *xprt)
839 return xprt->idle_timeout != 0;
843 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
844 __must_hold(&xprt->transport_lock)
846 xprt->last_used = jiffies;
847 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
848 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
854 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
856 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
858 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
859 xprt->last_used = jiffies;
860 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
862 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
866 static void xprt_inject_disconnect(struct rpc_xprt *xprt)
870 xprt->ops->inject_disconnect(xprt);
873 static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
878 bool xprt_lock_connect(struct rpc_xprt *xprt,
884 spin_lock(&xprt->transport_lock);
885 if (!test_bit(XPRT_LOCKED, &xprt->state))
887 if (xprt->snd_task != task)
889 set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
890 xprt->snd_task = cookie;
893 spin_unlock(&xprt->transport_lock);
898 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
900 spin_lock(&xprt->transport_lock);
901 if (xprt->snd_task != cookie)
903 if (!test_bit(XPRT_LOCKED, &xprt->state))
905 xprt->snd_task =NULL;
906 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
907 xprt->ops->release_xprt(xprt, NULL);
908 xprt_schedule_autodisconnect(xprt);
910 spin_unlock(&xprt->transport_lock);
911 wake_up_bit(&xprt->state, XPRT_LOCKED);
922 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
924 trace_xprt_connect(xprt);
926 if (!xprt_bound(xprt)) {
930 if (!xprt_lock_write(xprt, task))
933 if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
934 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
935 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
938 if (test_bit(XPRT_CLOSING, &xprt->state))
940 if (xprt_test_and_set_connecting(xprt))
943 if (!xprt_connected(xprt)) {
944 xprt->stat.connect_start = jiffies;
945 xprt->ops->connect(xprt, task);
947 xprt_clear_connecting(xprt);
949 rpc_wake_up_queued_task(&xprt->pending, task);
952 xprt_release_write(xprt, task);
957 * @xprt: transport instance
960 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
964 start = xprt->stat.connect_start + xprt->reestablish_timeout;
973 * @xprt: transport instance
977 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
979 xprt->reestablish_timeout <<= 1;
980 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
981 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
982 if (xprt->reestablish_timeout < init_to)
983 xprt->reestablish_timeout = init_to;
1003 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
1005 struct rb_node *n = xprt->recv_queue.rb_node;
1025 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1027 struct rb_node **p = &xprt->recv_queue.rb_node;
1047 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1051 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1053 rb_erase(&req->rq_recv, &xprt->recv_queue);
1058 * @xprt: transport on which the original request was transmitted
1061 * Caller holds xprt->queue_lock.
1063 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1067 entry = xprt_request_rb_find(xprt, xid);
1069 trace_xprt_lookup_rqst(xprt, xid, 0);
1076 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1077 xprt->stat.bad_xids++;
1093 * so should be holding xprt->queue_lock.
1105 * Caller should be holding xprt->queue_lock.
1146 struct rpc_xprt *xprt = req->rq_xprt;
1155 spin_lock(&xprt->queue_lock);
1162 xprt_request_rb_insert(xprt, req);
1164 spin_unlock(&xprt->queue_lock);
1167 del_timer_sync(&xprt->timer);
1175 * Caller must hold xprt->queue_lock.
1190 * Caller holds xprt->queue_lock.
1212 * Caller holds xprt->queue_lock.
1217 struct rpc_xprt *xprt = req->rq_xprt;
1219 xprt->stat.recvs++;
1229 rpc_wake_up_queued_task(&xprt->pending, task);
1236 struct rpc_xprt *xprt = req->rq_xprt;
1241 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1243 if (xprt->ops->timer)
1244 xprt->ops->timer(xprt, task);
1300 struct rpc_xprt *xprt = req->rq_xprt;
1309 spin_lock(&xprt->queue_lock);
1311 xprt->ops->wait_for_reply_request(task);
1318 rpc_wake_up_queued_task_set_status(&xprt->pending,
1321 spin_unlock(&xprt->queue_lock);
1340 struct rpc_xprt *xprt = req->rq_xprt;
1350 spin_lock(&xprt->queue_lock);
1356 xprt_clear_congestion_window_wait(xprt);
1357 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1366 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1374 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1377 atomic_long_inc(&xprt->xmit_queuelen);
1379 spin_unlock(&xprt->queue_lock);
1388 * Caller must hold xprt->queue_lock
1421 struct rpc_xprt *xprt = req->rq_xprt;
1423 spin_lock(&xprt->queue_lock);
1425 spin_unlock(&xprt->queue_lock);
1439 struct rpc_xprt *xprt = req->rq_xprt;
1444 spin_lock(&xprt->queue_lock);
1447 spin_unlock(&xprt->queue_lock);
1449 spin_lock(&xprt->queue_lock);
1454 spin_unlock(&xprt->queue_lock);
1471 struct rpc_xprt *xprt = req->rq_xprt;
1473 if (xprt->ops->prepare_request)
1474 return xprt->ops->prepare_request(req, buf);
1498 struct rpc_xprt *xprt = req->rq_xprt;
1500 if (!xprt_lock_write(xprt, task)) {
1503 rpc_wake_up_queued_task_set_status(&xprt->sending,
1508 if (atomic_read(&xprt->swapper))
1516 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1518 xprt_inject_disconnect(xprt);
1519 xprt_release_write(xprt, task);
1535 struct rpc_xprt *xprt = req->rq_xprt;
1565 connect_cookie = xprt->connect_cookie;
1566 status = xprt->ops->send_request(req);
1578 xprt_inject_disconnect(xprt);
1581 spin_lock(&xprt->transport_lock);
1583 xprt->stat.sends++;
1584 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1585 xprt->stat.bklog_u += xprt->backlog.qlen;
1586 xprt->stat.sending_u += xprt->sending.qlen;
1587 xprt->stat.pending_u += xprt->pending.qlen;
1588 spin_unlock(&xprt->transport_lock);
1594 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1611 struct rpc_xprt *xprt = req->rq_xprt;
1614 spin_lock(&xprt->queue_lock);
1616 next = list_first_entry_or_null(&xprt->xmit_queue,
1621 spin_unlock(&xprt->queue_lock);
1625 spin_lock(&xprt->queue_lock);
1636 cond_resched_lock(&xprt->queue_lock);
1638 spin_unlock(&xprt->queue_lock);
1647 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1649 set_bit(XPRT_CONGESTED, &xprt->state);
1650 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1666 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1668 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1669 clear_bit(XPRT_CONGESTED, &xprt->state);
1676 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1680 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1682 spin_lock(&xprt->reserve_lock);
1683 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1684 xprt_add_backlog(xprt, task);
1687 spin_unlock(&xprt->reserve_lock);
1692 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1696 if (xprt->num_reqs >= xprt->max_reqs)
1698 ++xprt->num_reqs;
1699 spin_unlock(&xprt->reserve_lock);
1701 spin_lock(&xprt->reserve_lock);
1704 --xprt->num_reqs;
1710 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1712 if (xprt->num_reqs > xprt->min_reqs) {
1713 --xprt->num_reqs;
1720 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1724 spin_lock(&xprt->reserve_lock);
1725 if (!list_empty(&xprt->free)) {
1726 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1730 req = xprt_dynamic_alloc_slot(xprt);
1740 xprt_add_backlog(xprt, task);
1746 spin_unlock(&xprt->reserve_lock);
1749 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1750 xprt->num_reqs);
1751 spin_unlock(&xprt->reserve_lock);
1758 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1760 spin_lock(&xprt->reserve_lock);
1761 if (!xprt_wake_up_backlog(xprt, req) &&
1762 !xprt_dynamic_free_slot(xprt, req)) {
1764 list_add(&req->rq_list, &xprt->free);
1766 spin_unlock(&xprt->reserve_lock);
1770 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1773 while (!list_empty(&xprt->free)) {
1774 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1787 static int xprt_alloc_id(struct rpc_xprt *xprt)
1795 xprt->id = id;
1799 static void xprt_free_id(struct rpc_xprt *xprt)
1801 ida_free(&rpc_xprt_ids, xprt->id);
1808 struct rpc_xprt *xprt;
1812 xprt = kzalloc(size, GFP_KERNEL);
1813 if (xprt == NULL)
1816 xprt_alloc_id(xprt);
1817 xprt_init(xprt, net);
1823 list_add(&req->rq_list, &xprt->free);
1825 xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc);
1826 xprt->min_reqs = num_prealloc;
1827 xprt->num_reqs = num_prealloc;
1829 return xprt;
1832 xprt_free(xprt);
1838 void xprt_free(struct rpc_xprt *xprt)
1840 put_net_track(xprt->xprt_net, &xprt->ns_tracker);
1841 xprt_free_all_slots(xprt);
1842 xprt_free_id(xprt);
1843 rpc_sysfs_xprt_destroy(xprt);
1844 kfree_rcu(xprt, rcu);
1849 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1851 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1855 xprt_alloc_xid(struct rpc_xprt *xprt)
1859 spin_lock(&xprt->reserve_lock);
1860 xid = (__force __be32)xprt->xid++;
1861 spin_unlock(&xprt->reserve_lock);
1866 xprt_init_xid(struct rpc_xprt *xprt)
1868 xprt->xid = get_random_u32();
1874 struct rpc_xprt *xprt = task->tk_xprt;
1878 req->rq_xprt = xprt;
1880 req->rq_xid = xprt_alloc_xid(xprt);
1881 xprt_init_connect_cookie(req, xprt);
1895 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1897 xprt->ops->alloc_slot(xprt, task);
1912 struct rpc_xprt *xprt = task->tk_xprt;
1919 if (!xprt_throttle_congested(xprt, task))
1920 xprt_do_reserve(xprt, task);
1934 struct rpc_xprt *xprt = task->tk_xprt;
1941 xprt_do_reserve(xprt, task);
1951 struct rpc_xprt *xprt;
1956 xprt = task->tk_xprt;
1957 xprt_release_write(xprt, task);
1962 xprt = req->rq_xprt;
1964 spin_lock(&xprt->transport_lock);
1965 xprt->ops->release_xprt(xprt, task);
1966 if (xprt->ops->release_request)
1967 xprt->ops->release_request(task);
1968 xprt_schedule_autodisconnect(xprt);
1969 spin_unlock(&xprt->transport_lock);
1971 xprt->ops->buf_free(task);
1979 xprt->ops->free_slot(xprt, req);
2002 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
2004 kref_init(&xprt->kref);
2006 spin_lock_init(&xprt->transport_lock);
2007 spin_lock_init(&xprt->reserve_lock);
2008 spin_lock_init(&xprt->queue_lock);
2010 INIT_LIST_HEAD(&xprt->free);
2011 xprt->recv_queue = RB_ROOT;
2012 INIT_LIST_HEAD(&xprt->xmit_queue);
2014 spin_lock_init(&xprt->bc_pa_lock);
2015 INIT_LIST_HEAD(&xprt->bc_pa_list);
2017 INIT_LIST_HEAD(&xprt->xprt_switch);
2019 xprt->last_used = jiffies;
2020 xprt->cwnd = RPC_INITCWND;
2021 xprt->bind_index = 0;
2023 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
2024 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
2025 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
2026 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
2028 xprt_init_xid(xprt);
2030 xprt->xprt_net = get_net_track(net, &xprt->ns_tracker, GFP_KERNEL);
2040 struct rpc_xprt *xprt;
2049 xprt = t->setup(args);
2052 if (IS_ERR(xprt))
2055 xprt->idle_timeout = 0;
2056 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
2057 if (xprt_has_timer(xprt))
2058 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2060 timer_setup(&xprt->timer, NULL, 0);
2063 xprt_destroy(xprt);
2066 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
2067 if (xprt->servername == NULL) {
2068 xprt_destroy(xprt);
2072 rpc_xprt_debugfs_register(xprt);
2074 trace_xprt_create(xprt);
2076 return xprt;
2081 struct rpc_xprt *xprt =
2084 trace_xprt_destroy(xprt);
2086 rpc_xprt_debugfs_unregister(xprt);
2087 rpc_destroy_wait_queue(&xprt->binding);
2088 rpc_destroy_wait_queue(&xprt->pending);
2089 rpc_destroy_wait_queue(&xprt->sending);
2090 rpc_destroy_wait_queue(&xprt->backlog);
2091 kfree(xprt->servername);
2095 xprt_destroy_backchannel(xprt, UINT_MAX);
2100 xprt->ops->destroy(xprt);
2105 * @xprt: transport to destroy
2108 static void xprt_destroy(struct rpc_xprt *xprt)
2113 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2120 spin_lock(&xprt->transport_lock);
2121 del_timer_sync(&xprt->timer);
2122 spin_unlock(&xprt->transport_lock);
2128 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2129 schedule_work(&xprt->task_cleanup);
2139 * @xprt: pointer to the transport
2142 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2144 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2145 return xprt;
2152 * @xprt: pointer to the transport
2155 void xprt_put(struct rpc_xprt *xprt)
2157 if (xprt != NULL)
2158 kref_put(&xprt->kref, xprt_destroy_kref);
2162 void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2164 if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) {
2171 void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2173 if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) {
2180 void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2182 if (test_and_set_bit(XPRT_REMOVE, &xprt->state))
2185 xprt_force_disconnect(xprt);
2186 if (!test_bit(XPRT_CONNECTED, &xprt->state))
2189 if (!xprt->sending.qlen && !xprt->pending.qlen &&
2190 !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen))
2191 rpc_xprt_switch_remove_xprt(xps, xprt, true);