Lines Matching refs:sess

49 	struct rtrs_clt_sess *sess;
53 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
54 connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
170 * @sess: client session pointer
177 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
183 id = (permit->cpu_id % (sess->s.con_num - 1)) + 1;
185 return to_clt_con(sess->s.con[id]);
192 * @sess: client session to change the state of.
200 static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
206 lockdep_assert_held(&sess->state_wq.lock);
208 old_state = sess->state;
282 sess->state = new_state;
283 wake_up_locked(&sess->state_wq);
289 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
295 spin_lock_irq(&sess->state_wq.lock);
296 if (sess->state == old_state)
297 changed = __rtrs_clt_change_state(sess, new_state);
298 spin_unlock_irq(&sess->state_wq.lock);
305 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
307 if (rtrs_clt_change_state_from_to(sess,
310 struct rtrs_clt *clt = sess->clt;
317 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
326 rtrs_clt_change_state_from_to(sess,
337 rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
357 rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
387 struct rtrs_clt_sess *sess;
394 sess = to_clt_sess(con->c.sess);
423 rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
437 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
440 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
441 atomic_dec(&sess->stats->inflight);
455 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
460 rtrs_wrn(con->c.sess,
468 sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
474 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
477 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
485 static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
490 if (WARN_ON(msg_id >= sess->queue_depth))
493 req = &sess->reqs[msg_id];
503 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
505 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
510 rtrs_err(con->c.sess, "post iu failed %d\n", err);
517 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
525 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
530 rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
534 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
538 rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
543 if (WARN_ON(buf_id >= sess->queue_depth))
556 sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
557 process_io_rsp(sess, msg_id, err, w_inval);
559 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
596 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
603 rtrs_err(sess->clt, "RDMA failed: %s\n",
628 process_io_rsp(sess, msg_id, err, w_inval);
631 rtrs_send_hb_ack(&sess->s);
632 if (sess->flags & RTRS_MSG_NEW_RKEY_F)
636 sess->s.hb_missed_cnt = 0;
637 if (sess->flags & RTRS_MSG_NEW_RKEY_F)
640 rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
652 rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
665 if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
679 rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
687 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
690 if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
704 static int post_recv_sess(struct rtrs_clt_sess *sess)
709 for (cid = 0; cid < sess->s.con_num; cid++) {
713 q_size = sess->queue_depth;
721 err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
723 rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
809 struct rtrs_clt_sess *sess;
813 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
814 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
817 if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
820 inflight = atomic_read(&sess->stats->inflight);
824 min_path = sess;
869 * @sess: client session
881 struct rtrs_clt_sess *sess,
899 req->con = rtrs_permit_to_clt_con(sess, permit);
913 rtrs_clt_get_req(struct rtrs_clt_sess *sess,
922 req = &sess->reqs[permit->mem_id];
923 rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
951 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
961 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
965 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
973 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
976 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
987 struct rtrs_sess *s = con->c.sess;
988 struct rtrs_clt_sess *sess = to_clt_sess(s);
997 if (unlikely(tsize > sess->chunk_size)) {
999 tsize, sess->chunk_size);
1003 count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
1020 rbuf = &sess->rbufs[buf_id];
1033 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1034 atomic_dec(&sess->stats->inflight);
1036 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
1061 struct rtrs_sess *s = con->c.sess;
1062 struct rtrs_clt_sess *sess = to_clt_sess(s);
1074 s = &sess->s;
1075 dev = sess->s.dev;
1077 if (unlikely(tsize > sess->chunk_size)) {
1080 tsize, sess->chunk_size);
1150 ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
1154 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1155 atomic_dec(&sess->stats->inflight);
1204 static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
1206 struct rtrs_clt *clt = sess->clt;
1210 if (!sess->reqs)
1212 for (i = 0; i < sess->queue_depth; ++i) {
1213 req = &sess->reqs[i];
1231 static void free_sess_reqs(struct rtrs_clt_sess *sess)
1236 if (!sess->reqs)
1238 for (i = 0; i < sess->queue_depth; ++i) {
1239 req = &sess->reqs[i];
1243 rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
1245 kfree(sess->reqs);
1246 sess->reqs = NULL;
1249 static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
1252 struct rtrs_clt *clt = sess->clt;
1255 sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
1257 if (!sess->reqs)
1260 for (i = 0; i < sess->queue_depth; ++i) {
1261 req = &sess->reqs[i];
1262 req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
1263 sess->s.dev->ib_dev,
1274 req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
1275 sess->max_pages_per_mr);
1279 pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
1280 sess->max_pages_per_mr);
1290 free_sess_reqs(sess);
1343 static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
1349 ib_dev = sess->s.dev->ib_dev;
1359 sess->max_pages_per_mr =
1360 min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
1362 sess->max_send_sge = ib_dev->attrs.max_send_sge;
1365 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
1371 spin_lock_irq(&sess->state_wq.lock);
1372 *old_state = sess->state;
1373 changed = __rtrs_clt_change_state(sess, new_state);
1374 spin_unlock_irq(&sess->state_wq.lock);
1379 static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
1384 return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
1394 static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
1396 rtrs_init_hb(&sess->s, &io_comp_cqe,
1403 static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
1405 rtrs_start_hb(&sess->s);
1408 static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
1410 rtrs_stop_hb(&sess->s);
1421 struct rtrs_clt_sess *sess;
1425 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
1426 if (!sess)
1432 sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
1433 if (!sess->s.con)
1436 sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
1437 if (!sess->stats)
1440 mutex_init(&sess->init_mutex);
1441 uuid_gen(&sess->s.uuid);
1442 memcpy(&sess->s.dst_addr, path->dst,
1448 * the sess->src_addr will contain only zeros, which is then fine.
1451 memcpy(&sess->s.src_addr, path->src,
1453 strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
1454 sess->s.con_num = con_num;
1455 sess->clt = clt;
1456 sess->max_pages_per_mr = max_segments * max_segment_size >> 12;
1457 init_waitqueue_head(&sess->state_wq);
1458 sess->state = RTRS_CLT_CONNECTING;
1459 atomic_set(&sess->connected_cnt, 0);
1460 INIT_WORK(&sess->close_work, rtrs_clt_close_work);
1461 INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
1462 rtrs_clt_init_hb(sess);
1464 sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
1465 if (!sess->mp_skip_entry)
1469 INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
1471 err = rtrs_clt_init_stats(sess->stats);
1475 return sess;
1478 free_percpu(sess->mp_skip_entry);
1480 kfree(sess->stats);
1482 kfree(sess->s.con);
1484 kfree(sess);
1489 void free_sess(struct rtrs_clt_sess *sess)
1491 free_percpu(sess->mp_skip_entry);
1492 mutex_destroy(&sess->init_mutex);
1493 kfree(sess->s.con);
1494 kfree(sess->rbufs);
1495 kfree(sess);
1498 static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
1509 con->c.sess = &sess->s;
1512 sess->s.con[cid] = &con->c;
1519 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1521 sess->s.con[con->c.cid] = NULL;
1527 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1551 if (WARN_ON(sess->s.dev))
1559 sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1561 if (!sess->s.dev) {
1562 rtrs_wrn(sess->clt,
1566 sess->s.dev_ref = 1;
1567 query_fast_reg_mode(sess);
1574 if (WARN_ON(!sess->s.dev))
1576 if (WARN_ON(!sess->queue_depth))
1580 sess->s.dev_ref++;
1582 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
1584 sess->queue_depth * 3 + 1);
1586 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
1587 sess->queue_depth * 3 + 1);
1590 if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1592 GFP_KERNEL, sess->s.dev->ib_dev,
1600 cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
1601 err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
1613 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1622 rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
1626 if (sess->s.dev_ref && !--sess->s.dev_ref) {
1627 rtrs_ib_dev_put(sess->s.dev);
1628 sess->s.dev = NULL;
1647 struct rtrs_sess *s = con->c.sess;
1664 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1665 struct rtrs_clt *clt = sess->clt;
1682 .cid_num = cpu_to_le16(sess->s.con_num),
1683 .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
1685 msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
1686 uuid_copy(&msg.sess_uuid, &sess->s.uuid);
1699 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1700 struct rtrs_clt *clt = sess->clt;
1731 if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
1737 sess->reconnect_attempts = -1;
1743 if (!sess->rbufs) {
1744 kfree(sess->rbufs);
1745 sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
1747 if (!sess->rbufs)
1750 sess->queue_depth = queue_depth;
1751 sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1752 sess->max_io_size = le32_to_cpu(msg->max_io_size);
1753 sess->flags = le32_to_cpu(msg->flags);
1754 sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
1765 clt->queue_depth = sess->queue_depth;
1766 clt->max_io_size = min_not_zero(sess->max_io_size,
1773 sess->hca_port = con->c.cm_id->port_num;
1774 scnprintf(sess->hca_name, sizeof(sess->hca_name),
1775 sess->s.dev->ib_dev->name);
1776 sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
1778 sess->for_new_clt = 1;
1786 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1788 atomic_inc(&sess->connected_cnt);
1795 struct rtrs_sess *s = con->c.sess;
1823 static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
1825 if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
1826 queue_work(rtrs_wq, &sess->close_work);
1828 flush_work(&sess->close_work);
1834 struct rtrs_clt_sess *sess;
1836 sess = to_clt_sess(con->c.sess);
1837 if (atomic_dec_and_test(&sess->connected_cnt))
1839 wake_up(&sess->state_wq);
1848 struct rtrs_sess *s = con->c.sess;
1849 struct rtrs_clt_sess *sess = to_clt_sess(s);
1867 wake_up(&sess->state_wq);
1892 rtrs_clt_close_conns(sess, false);
1914 struct rtrs_sess *s = con->c.sess;
1915 struct rtrs_clt_sess *sess = to_clt_sess(s);
1920 sess->s.dst_addr.ss_family == AF_IB ?
1936 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
1937 (struct sockaddr *)&sess->s.dst_addr,
1949 sess->state_wq,
1950 con->cm_err || sess->state != RTRS_CLT_CONNECTING,
1962 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
1980 static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
1982 struct rtrs_clt *clt = sess->clt;
2006 sess->established = true;
2007 sess->reconnect_attempts = 0;
2008 sess->stats->reconnects.successful_cnt++;
2011 static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
2013 struct rtrs_clt *clt = sess->clt;
2015 if (!sess->established)
2018 sess->established = false;
2026 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
2031 WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
2037 mutex_lock(&sess->init_mutex);
2038 mutex_unlock(&sess->init_mutex);
2046 rtrs_clt_stop_hb(sess);
2055 for (cid = 0; cid < sess->s.con_num; cid++) {
2056 if (!sess->s.con[cid])
2058 con = to_clt_con(sess->s.con[cid]);
2061 fail_all_outstanding_reqs(sess);
2062 free_sess_reqs(sess);
2063 rtrs_clt_sess_down(sess);
2073 wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
2076 for (cid = 0; cid < sess->s.con_num; cid++) {
2077 if (!sess->s.con[cid])
2079 con = to_clt_con(sess->s.con[cid]);
2087 struct rtrs_clt_sess *sess,
2094 return sess == cmpxchg(ppcpu_path, sess, next);
2097 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
2099 struct rtrs_clt *clt = sess->clt;
2105 list_del_rcu(&sess->s.entry);
2111 * At this point nobody sees @sess in the list, but still we have
2112 * dangling pointer @pcpu_path which _can_ point to @sess. Since
2113 * nobody can observe @sess in the list, we guarantee that IO path
2114 * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
2115 * to @sess, but can never again become @sess.
2142 * Get @next connection from current @sess which is going to be
2143 * removed. If @sess is the last element, then @next is NULL.
2146 next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
2159 lockdep_is_held(&clt->paths_mutex)) != sess)
2172 if (xchg_sessions(ppcpu_path, sess, next))
2176 * @sess and dereferencing it right now, so wait for
2187 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess,
2190 struct rtrs_clt *clt = sess->clt;
2195 list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2201 struct rtrs_clt_sess *sess;
2203 sess = container_of(work, struct rtrs_clt_sess, close_work);
2205 cancel_delayed_work_sync(&sess->reconnect_dwork);
2206 rtrs_clt_stop_and_destroy_conns(sess);
2207 rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
2210 static int init_conns(struct rtrs_clt_sess *sess)
2220 sess->s.recon_cnt++;
2223 for (cid = 0; cid < sess->s.con_num; cid++) {
2224 err = create_con(sess, cid);
2228 err = create_cm(to_clt_con(sess->s.con[cid]));
2230 destroy_con(to_clt_con(sess->s.con[cid]));
2234 err = alloc_sess_reqs(sess);
2238 rtrs_clt_start_hb(sess);
2244 struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
2256 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2264 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2268 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2271 rtrs_err(sess->clt, "Sess info request send failed: %s\n",
2273 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2280 static int process_info_rsp(struct rtrs_clt_sess *sess,
2294 (ilog2(sess->chunk_size - 1) + 1) >
2296 rtrs_err(sess->clt,
2298 MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
2301 if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
2302 rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
2307 for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
2318 if (unlikely(!len || (len % sess->chunk_size))) {
2319 rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
2323 for ( ; len && i < sess->queue_depth; i++) {
2324 sess->rbufs[i].addr = addr;
2325 sess->rbufs[i].rkey = rkey;
2327 len -= sess->chunk_size;
2328 addr += sess->chunk_size;
2332 if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
2333 rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
2336 if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
2337 rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
2347 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2359 rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
2366 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2370 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
2374 rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
2381 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2385 err = process_info_rsp(sess, msg);
2389 err = post_recv_sess(sess);
2397 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2398 rtrs_clt_change_state(sess, state);
2401 static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
2403 struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
2413 sess->s.dev->ib_dev, DMA_TO_DEVICE,
2415 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
2424 rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2431 memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
2433 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
2439 rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
2445 wait_event_interruptible_timeout(sess->state_wq,
2446 sess->state != RTRS_CLT_CONNECTING,
2449 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
2450 if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
2459 rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
2461 rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
2464 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2471 * @sess: client session.
2475 static int init_sess(struct rtrs_clt_sess *sess)
2479 mutex_lock(&sess->init_mutex);
2480 err = init_conns(sess);
2482 rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
2485 err = rtrs_send_sess_info(sess);
2487 rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
2490 rtrs_clt_sess_up(sess);
2492 mutex_unlock(&sess->init_mutex);
2499 struct rtrs_clt_sess *sess;
2504 sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
2506 clt = sess->clt;
2508 if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
2511 if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
2513 rtrs_clt_close_conns(sess, false);
2516 sess->reconnect_attempts++;
2519 rtrs_clt_stop_and_destroy_conns(sess);
2521 if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
2522 err = init_sess(sess);
2530 if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
2531 sess->stats->reconnects.fail_cnt++;
2533 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
2672 struct rtrs_clt_sess *sess, *tmp;
2685 struct rtrs_clt_sess *sess;
2687 sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
2689 if (IS_ERR(sess)) {
2690 err = PTR_ERR(sess);
2694 sess->for_new_clt = 1;
2695 list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2697 err = init_sess(sess);
2699 list_del_rcu(&sess->s.entry);
2700 rtrs_clt_close_conns(sess, true);
2701 free_percpu(sess->stats->pcpu_stats);
2702 kfree(sess->stats);
2703 free_sess(sess);
2707 err = rtrs_clt_create_sess_files(sess);
2709 list_del_rcu(&sess->s.entry);
2710 rtrs_clt_close_conns(sess, true);
2711 free_percpu(sess->stats->pcpu_stats);
2712 kfree(sess->stats);
2713 free_sess(sess);
2724 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2725 rtrs_clt_destroy_sess_files(sess, NULL);
2726 rtrs_clt_close_conns(sess, true);
2727 kobject_put(&sess->kobj);
2744 struct rtrs_clt_sess *sess, *tmp;
2751 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2752 rtrs_clt_close_conns(sess, true);
2753 rtrs_clt_destroy_sess_files(sess, NULL);
2754 kobject_put(&sess->kobj);
2761 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
2767 changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
2770 sess->reconnect_attempts = 0;
2771 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
2779 flush_delayed_work(&sess->reconnect_dwork);
2780 err = (READ_ONCE(sess->state) ==
2787 int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
2789 rtrs_clt_close_conns(sess, true);
2794 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
2810 rtrs_clt_close_conns(sess, true);
2811 changed = rtrs_clt_change_state_get_old(sess,
2817 rtrs_clt_remove_path_from_arr(sess);
2818 rtrs_clt_destroy_sess_files(sess, sysfs_self);
2819 kobject_put(&sess->kobj);
2865 struct rtrs_clt_sess *sess;
2887 (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
2888 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
2891 if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
2892 rtrs_wrn_rl(sess->clt,
2895 usr_len, hdr_len, sess->max_hdr_size);
2899 req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
2945 struct rtrs_clt_sess *sess;
2948 sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments,
2950 if (IS_ERR(sess))
2951 return PTR_ERR(sess);
2958 rtrs_clt_add_path_to_arr(sess, addr);
2960 err = init_sess(sess);
2964 err = rtrs_clt_create_sess_files(sess);
2971 rtrs_clt_remove_path_from_arr(sess);
2972 rtrs_clt_close_conns(sess, true);
2973 free_percpu(sess->stats->pcpu_stats);
2974 kfree(sess->stats);
2975 free_sess(sess);