Lines Matching defs:clt_path
54 struct rtrs_clt_path *clt_path;
58 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
59 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
173 * @clt_path: client path pointer
180 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
186 id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
188 return to_clt_con(clt_path->s.con[id]);
195 * @clt_path: client path to change the state of.
203 static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
209 lockdep_assert_held(&clt_path->state_wq.lock);
211 old_state = clt_path->state;
285 clt_path->state = new_state;
286 wake_up_locked(&clt_path->state_wq);
292 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
298 spin_lock_irq(&clt_path->state_wq.lock);
299 if (clt_path->state == old_state)
300 changed = rtrs_clt_change_state(clt_path, new_state);
301 spin_unlock_irq(&clt_path->state_wq.lock);
306 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
309 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
311 trace_rtrs_rdma_error_recovery(clt_path);
313 if (rtrs_clt_change_state_from_to(clt_path,
316 queue_work(rtrs_wq, &clt_path->err_recovery_work);
323 rtrs_clt_change_state_from_to(clt_path,
384 struct rtrs_clt_path *clt_path;
391 clt_path = to_clt_path(con->c.path);
437 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
443 atomic_dec(&clt_path->stats->inflight);
450 errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
451 clt_path->hca_port, notify);
463 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
476 sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
482 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
485 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
494 static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
499 if (WARN_ON(msg_id >= clt_path->queue_depth))
502 req = &clt_path->reqs[msg_id];
512 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
514 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
526 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
534 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
543 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
547 rtrs_err(clt_path->clt,
553 if (WARN_ON(buf_id >= clt_path->queue_depth))
566 clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
567 process_io_rsp(clt_path, msg_id, err, w_inval);
569 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
606 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
613 rtrs_err(clt_path->clt, "RDMA failed: %s\n",
638 process_io_rsp(clt_path, msg_id, err, w_inval);
641 rtrs_send_hb_ack(&clt_path->s);
642 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
646 clt_path->s.hb_missed_cnt = 0;
647 clt_path->s.hb_cur_latency =
648 ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
649 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
676 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
691 rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
699 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
702 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
716 static int post_recv_path(struct rtrs_clt_path *clt_path)
721 for (cid = 0; cid < clt_path->s.con_num; cid++) {
725 q_size = clt_path->queue_depth;
733 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
735 rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
754 * @clt_path: The element to take the next clt_path from.
763 rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
765 return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
767 READ_ONCE((&clt_path->s.entry)->next),
768 typeof(*clt_path), s.entry);
820 struct rtrs_clt_path *clt_path;
824 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
825 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
828 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
831 inflight = atomic_read(&clt_path->stats->inflight);
835 min_path = clt_path;
871 struct rtrs_clt_path *clt_path;
875 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
876 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
879 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
882 latency = clt_path->s.hb_cur_latency;
886 min_path = clt_path;
933 * @clt_path: client path
945 struct rtrs_clt_path *clt_path,
963 req->con = rtrs_permit_to_clt_con(clt_path, permit);
969 req->mp_policy = clt_path->clt->mp_policy;
979 rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
988 req = &clt_path->reqs[permit->mem_id];
989 rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
1019 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1039 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
1045 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
1051 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
1054 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
1080 struct rtrs_clt_path *clt_path = to_clt_path(s);
1093 if (tsize > clt_path->chunk_size) {
1095 tsize, clt_path->chunk_size);
1099 count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
1116 rbuf = &clt_path->rbufs[buf_id];
1124 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
1158 ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
1159 clt_path->hca_port);
1161 atomic_dec(&clt_path->stats->inflight);
1163 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
1174 struct rtrs_clt_path *clt_path = to_clt_path(s);
1176 struct rtrs_ib_dev *dev = clt_path->s.dev;
1186 if (tsize > clt_path->chunk_size) {
1189 tsize, clt_path->chunk_size);
1259 ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
1264 ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
1265 clt_path->hca_port);
1267 atomic_dec(&clt_path->stats->inflight);
1315 static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
1317 struct rtrs_clt_sess *clt = clt_path->clt;
1321 if (!clt_path->reqs)
1323 for (i = 0; i < clt_path->queue_depth; ++i) {
1324 req = &clt_path->reqs[i];
1342 static void free_path_reqs(struct rtrs_clt_path *clt_path)
1347 if (!clt_path->reqs)
1349 for (i = 0; i < clt_path->queue_depth; ++i) {
1350 req = &clt_path->reqs[i];
1354 rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
1356 kfree(clt_path->reqs);
1357 clt_path->reqs = NULL;
1360 static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
1365 clt_path->reqs = kcalloc(clt_path->queue_depth,
1366 sizeof(*clt_path->reqs),
1368 if (!clt_path->reqs)
1371 for (i = 0; i < clt_path->queue_depth; ++i) {
1372 req = &clt_path->reqs[i];
1373 req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
1374 clt_path->s.dev->ib_dev,
1384 req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
1386 clt_path->max_pages_per_mr);
1390 pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
1391 clt_path->max_pages_per_mr);
1401 free_path_reqs(clt_path);
1451 static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
1457 ib_dev = clt_path->s.dev->ib_dev;
1467 clt_path->max_pages_per_mr =
1468 min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
1470 clt_path->clt->max_segments =
1471 min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
1474 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
1480 spin_lock_irq(&clt_path->state_wq.lock);
1482 *old_state = clt_path->state;
1483 changed = rtrs_clt_change_state(clt_path, new_state);
1484 spin_unlock_irq(&clt_path->state_wq.lock);
1496 static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
1498 rtrs_init_hb(&clt_path->s, &io_comp_cqe,
1510 struct rtrs_clt_path *clt_path;
1514 clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
1515 clt = clt_path->clt;
1517 rtrs_clt_stop_and_destroy_conns(clt_path);
1518 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
1527 struct rtrs_clt_path *clt_path;
1532 clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
1533 if (!clt_path)
1541 clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
1543 if (!clt_path->s.con)
1546 clt_path->s.con_num = total_con;
1547 clt_path->s.irq_con_num = con_num + 1;
1549 clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
1550 if (!clt_path->stats)
1553 mutex_init(&clt_path->init_mutex);
1554 uuid_gen(&clt_path->s.uuid);
1555 memcpy(&clt_path->s.dst_addr, path->dst,
1564 memcpy(&clt_path->s.src_addr, path->src,
1566 strscpy(clt_path->s.sessname, clt->sessname,
1567 sizeof(clt_path->s.sessname));
1568 clt_path->clt = clt;
1569 clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
1570 init_waitqueue_head(&clt_path->state_wq);
1571 clt_path->state = RTRS_CLT_CONNECTING;
1572 atomic_set(&clt_path->connected_cnt, 0);
1573 INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
1574 INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
1575 INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
1576 rtrs_clt_init_hb(clt_path);
1578 clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
1579 if (!clt_path->mp_skip_entry)
1583 INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
1585 err = rtrs_clt_init_stats(clt_path->stats);
1589 return clt_path;
1592 free_percpu(clt_path->mp_skip_entry);
1594 kfree(clt_path->stats);
1596 kfree(clt_path->s.con);
1598 kfree(clt_path);
1603 void free_path(struct rtrs_clt_path *clt_path)
1605 free_percpu(clt_path->mp_skip_entry);
1606 mutex_destroy(&clt_path->init_mutex);
1607 kfree(clt_path->s.con);
1608 kfree(clt_path->rbufs);
1609 kfree(clt_path);
1612 static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
1623 con->c.path = &clt_path->s;
1628 clt_path->s.con[cid] = &con->c;
1635 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1637 clt_path->s.con[con->c.cid] = NULL;
1644 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1653 if (WARN_ON(clt_path->s.dev))
1661 clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1663 if (!clt_path->s.dev) {
1664 rtrs_wrn(clt_path->clt,
1668 clt_path->s.dev_ref = 1;
1669 query_fast_reg_mode(clt_path);
1670 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
1687 if (WARN_ON(!clt_path->s.dev))
1689 if (WARN_ON(!clt_path->queue_depth))
1692 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
1694 clt_path->s.dev_ref++;
1697 clt_path->queue_depth * 4 + 1);
1699 clt_path->queue_depth * 3 + 1);
1705 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1708 clt_path->s.dev->ib_dev,
1715 cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
1716 if (con->c.cid >= clt_path->s.irq_con_num)
1717 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1721 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1733 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1742 rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
1747 if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
1748 rtrs_ib_dev_put(clt_path->s.dev);
1749 clt_path->s.dev = NULL;
1787 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1788 struct rtrs_clt_sess *clt = clt_path->clt;
1805 .cid_num = cpu_to_le16(clt_path->s.con_num),
1806 .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
1808 msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
1809 uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
1822 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1823 struct rtrs_clt_sess *clt = clt_path->clt;
1854 if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
1860 clt_path->reconnect_attempts = -1;
1866 if (!clt_path->rbufs) {
1867 clt_path->rbufs = kcalloc(queue_depth,
1868 sizeof(*clt_path->rbufs),
1870 if (!clt_path->rbufs)
1873 clt_path->queue_depth = queue_depth;
1874 clt_path->s.signal_interval = min_not_zero(queue_depth,
1876 clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1877 clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
1878 clt_path->flags = le32_to_cpu(msg->flags);
1879 clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
1890 clt->queue_depth = clt_path->queue_depth;
1891 clt->max_io_size = min_not_zero(clt_path->max_io_size,
1898 clt_path->hca_port = con->c.cm_id->port_num;
1899 scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
1900 clt_path->s.dev->ib_dev->name);
1901 clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
1903 clt_path->for_new_clt = 1;
1911 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1913 atomic_inc(&clt_path->connected_cnt);
1948 void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
1950 trace_rtrs_clt_close_conns(clt_path);
1952 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
1953 queue_work(rtrs_wq, &clt_path->close_work);
1955 flush_work(&clt_path->close_work);
1961 struct rtrs_clt_path *clt_path;
1963 clt_path = to_clt_path(con->c.path);
1964 if (atomic_dec_and_test(&clt_path->connected_cnt))
1966 wake_up(&clt_path->state_wq);
1976 struct rtrs_clt_path *clt_path = to_clt_path(s);
1994 wake_up(&clt_path->state_wq);
2023 rtrs_clt_close_conns(clt_path, false);
2048 struct rtrs_clt_path *clt_path = to_clt_path(s);
2053 clt_path->s.dst_addr.ss_family == AF_IB ?
2069 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
2070 (struct sockaddr *)&clt_path->s.dst_addr,
2082 clt_path->state_wq,
2083 con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
2093 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
2100 static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
2102 struct rtrs_clt_sess *clt = clt_path->clt;
2126 clt_path->established = true;
2127 clt_path->reconnect_attempts = 0;
2128 clt_path->stats->reconnects.successful_cnt++;
2131 static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
2133 struct rtrs_clt_sess *clt = clt_path->clt;
2135 if (!clt_path->established)
2138 clt_path->established = false;
2146 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
2151 WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
2157 mutex_lock(&clt_path->init_mutex);
2158 mutex_unlock(&clt_path->init_mutex);
2166 rtrs_stop_hb(&clt_path->s);
2175 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2176 if (!clt_path->s.con[cid])
2178 con = to_clt_con(clt_path->s.con[cid]);
2181 fail_all_outstanding_reqs(clt_path);
2182 free_path_reqs(clt_path);
2183 rtrs_clt_path_down(clt_path);
2193 wait_event_timeout(clt_path->state_wq,
2194 !atomic_read(&clt_path->connected_cnt),
2197 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2198 if (!clt_path->s.con[cid])
2200 con = to_clt_con(clt_path->s.con[cid]);
2209 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
2211 struct rtrs_clt_sess *clt = clt_path->clt;
2217 list_del_rcu(&clt_path->s.entry);
2258 next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
2270 lockdep_is_held(&clt->paths_mutex)) != clt_path)
2283 if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
2299 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
2301 struct rtrs_clt_sess *clt = clt_path->clt;
2306 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2312 struct rtrs_clt_path *clt_path;
2314 clt_path = container_of(work, struct rtrs_clt_path, close_work);
2316 cancel_work_sync(&clt_path->err_recovery_work);
2317 cancel_delayed_work_sync(&clt_path->reconnect_dwork);
2318 rtrs_clt_stop_and_destroy_conns(clt_path);
2319 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
2322 static int init_conns(struct rtrs_clt_path *clt_path)
2332 clt_path->s.recon_cnt++;
2335 for (cid = 0; cid < clt_path->s.con_num; cid++) {
2336 err = create_con(clt_path, cid);
2340 err = create_cm(to_clt_con(clt_path->s.con[cid]));
2344 err = alloc_path_reqs(clt_path);
2355 if (!clt_path->s.con[i])
2358 con = to_clt_con(clt_path->s.con[i]);
2373 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
2381 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2385 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
2388 rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
2390 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
2397 static int process_info_rsp(struct rtrs_clt_path *clt_path,
2404 if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
2405 rtrs_err(clt_path->clt,
2415 if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
2417 rtrs_err(clt_path->clt,
2419 MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
2423 for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
2434 if (!len || (len % clt_path->chunk_size)) {
2435 rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
2440 for ( ; len && i < clt_path->queue_depth; i++) {
2441 clt_path->rbufs[i].addr = addr;
2442 clt_path->rbufs[i].rkey = rkey;
2444 len -= clt_path->chunk_size;
2445 addr += clt_path->chunk_size;
2449 if (sgi != sg_cnt || i != clt_path->queue_depth) {
2450 rtrs_err(clt_path->clt,
2454 if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
2455 rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
2465 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2477 rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
2484 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2488 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
2492 rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
2499 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2503 err = process_info_rsp(clt_path, msg);
2507 err = post_recv_path(clt_path);
2515 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
2516 rtrs_clt_change_state_get_old(clt_path, state, NULL);
2519 static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
2521 struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
2528 rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
2531 clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
2533 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
2542 rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2549 memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
2551 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
2558 rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
2564 wait_event_interruptible_timeout(clt_path->state_wq,
2565 clt_path->state != RTRS_CLT_CONNECTING,
2568 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
2569 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
2577 rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
2579 rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
2582 rtrs_clt_change_state_get_old(clt_path,
2590 * @clt_path: client path.
2594 static int init_path(struct rtrs_clt_path *clt_path)
2599 .src = &clt_path->s.src_addr,
2600 .dst = &clt_path->s.dst_addr,
2605 mutex_lock(&clt_path->init_mutex);
2606 err = init_conns(clt_path);
2608 rtrs_err(clt_path->clt,
2610 str, clt_path->hca_name, clt_path->hca_port);
2613 err = rtrs_send_path_info(clt_path);
2615 rtrs_err(clt_path->clt,
2617 err, str, clt_path->hca_name, clt_path->hca_port);
2620 rtrs_clt_path_up(clt_path);
2621 rtrs_start_hb(&clt_path->s);
2623 mutex_unlock(&clt_path->init_mutex);
2630 struct rtrs_clt_path *clt_path;
2634 clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
2636 clt = clt_path->clt;
2638 trace_rtrs_clt_reconnect_work(clt_path);
2640 if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
2643 if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
2645 rtrs_clt_close_conns(clt_path, false);
2648 clt_path->reconnect_attempts++;
2651 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
2652 err = init_path(clt_path);
2660 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
2661 clt_path->stats->reconnects.fail_cnt++;
2662 queue_work(rtrs_wq, &clt_path->err_recovery_work);
2793 struct rtrs_clt_path *clt_path, *tmp;
2812 struct rtrs_clt_path *clt_path;
2814 clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
2816 if (IS_ERR(clt_path)) {
2817 err = PTR_ERR(clt_path);
2821 clt_path->for_new_clt = 1;
2822 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2824 err = init_path(clt_path);
2826 list_del_rcu(&clt_path->s.entry);
2827 rtrs_clt_close_conns(clt_path, true);
2828 free_percpu(clt_path->stats->pcpu_stats);
2829 kfree(clt_path->stats);
2830 free_path(clt_path);
2834 err = rtrs_clt_create_path_files(clt_path);
2836 list_del_rcu(&clt_path->s.entry);
2837 rtrs_clt_close_conns(clt_path, true);
2838 free_percpu(clt_path->stats->pcpu_stats);
2839 kfree(clt_path->stats);
2840 free_path(clt_path);
2851 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2852 rtrs_clt_destroy_path_files(clt_path, NULL);
2853 rtrs_clt_close_conns(clt_path, true);
2854 kobject_put(&clt_path->kobj);
2870 struct rtrs_clt_path *clt_path, *tmp;
2876 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2877 rtrs_clt_close_conns(clt_path, true);
2878 rtrs_clt_destroy_path_files(clt_path, NULL);
2879 kobject_put(&clt_path->kobj);
2886 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
2892 changed = rtrs_clt_change_state_get_old(clt_path,
2896 clt_path->reconnect_attempts = 0;
2897 rtrs_clt_stop_and_destroy_conns(clt_path);
2898 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
2906 flush_delayed_work(&clt_path->reconnect_dwork);
2907 err = (READ_ONCE(clt_path->state) ==
2914 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
2930 rtrs_clt_close_conns(clt_path, true);
2931 changed = rtrs_clt_change_state_get_old(clt_path,
2937 rtrs_clt_remove_path_from_arr(clt_path);
2938 rtrs_clt_destroy_path_files(clt_path, sysfs_self);
2939 kobject_put(&clt_path->kobj);
2985 struct rtrs_clt_path *clt_path;
3007 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3008 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
3011 if (usr_len + hdr_len > clt_path->max_hdr_size) {
3012 rtrs_wrn_rl(clt_path->clt,
3015 usr_len, hdr_len, clt_path->max_hdr_size);
3019 req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
3045 struct rtrs_clt_path *clt_path;
3050 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3051 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
3054 con = clt_path->s.con[index + 1];
3092 struct rtrs_clt_path *clt_path;
3095 clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
3096 if (IS_ERR(clt_path))
3097 return PTR_ERR(clt_path);
3106 clt_path->for_new_clt = 1;
3116 rtrs_clt_add_path_to_arr(clt_path);
3118 err = init_path(clt_path);
3122 err = rtrs_clt_create_path_files(clt_path);
3129 rtrs_clt_remove_path_from_arr(clt_path);
3130 rtrs_clt_close_conns(clt_path, true);
3131 free_percpu(clt_path->stats->pcpu_stats);
3132 kfree(clt_path->stats);
3133 free_path(clt_path);