Lines Matching refs:con

185 	return to_clt_con(sess->s.con[id]);
303 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
305 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
334 struct rtrs_clt_con *con = cq->cq_context;
337 rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
339 rtrs_rdma_error_recovery(con);
354 struct rtrs_clt_con *con = cq->cq_context;
357 rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
359 rtrs_rdma_error_recovery(con);
371 struct rtrs_clt_con *con = req->con;
380 return ib_post_send(con->c.qp, &wr, NULL);
386 struct rtrs_clt_con *con = req->con;
392 if (WARN_ON(!req->con))
394 sess = to_clt_sess(con->c.sess);
423 rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
444 req->con = NULL;
450 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
455 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
460 rtrs_wrn(con->c.sess,
474 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
480 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
499 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
503 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
508 err = rtrs_iu_post_recv(&con->c, iu);
510 rtrs_err(con->c.sess, "post iu failed %d\n", err);
511 rtrs_rdma_error_recovery(con);
515 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
517 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
530 rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
561 return rtrs_clt_recv_done(con, wc);
563 rtrs_rdma_error_recovery(con);
576 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
590 return ib_post_recv(con->qp, wr, NULL);
595 struct rtrs_clt_con *con = cq->cq_context;
596 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
605 rtrs_rdma_error_recovery(con);
609 rtrs_clt_update_wc_stats(con);
630 WARN_ON(con->c.cid);
633 return rtrs_clt_recv_done(con, wc);
635 WARN_ON(con->c.cid);
638 return rtrs_clt_recv_done(con, wc);
640 rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
648 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
650 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
652 rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
654 rtrs_rdma_error_recovery(con);
667 return rtrs_clt_recv_done(con, wc);
669 return rtrs_clt_rkey_rsp_done(con, wc);
684 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
687 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
691 struct rtrs_iu *iu = &con->rsp_ius[i];
693 err = rtrs_iu_post_recv(&con->c, iu);
695 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
721 err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
899 req->con = rtrs_permit_to_clt_con(sess, permit);
946 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
951 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
973 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
979 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
986 struct rtrs_clt_con *con = req->con;
987 struct rtrs_sess *s = con->c.sess;
1028 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
1060 struct rtrs_clt_con *con = req->con;
1061 struct rtrs_sess *s = con->c.sess;
1150 ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
1389 struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1391 rtrs_rdma_error_recovery(con);
1432 sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
1433 if (!sess->s.con)
1482 kfree(sess->s.con);
1493 kfree(sess->s.con);
1500 struct rtrs_clt_con *con;
1502 con = kzalloc(sizeof(*con), GFP_KERNEL);
1503 if (!con)
1507 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1508 con->c.cid = cid;
1509 con->c.sess = &sess->s;
1510 atomic_set(&con->io_cnt, 0);
1512 sess->s.con[cid] = &con->c;
1517 static void destroy_con(struct rtrs_clt_con *con)
1519 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1521 sess->s.con[con->c.cid] = NULL;
1522 kfree(con);
1525 static int create_con_cq_qp(struct rtrs_clt_con *con)
1527 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1541 if (con->c.cid == 0) {
1559 sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1590 if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1591 con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
1595 if (!con->rsp_ius)
1597 con->queue_size = max_recv_wr;
1600 cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
1601 err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
1611 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1613 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1620 rtrs_cq_qp_destroy(&con->c);
1621 if (con->rsp_ius) {
1622 rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
1623 con->rsp_ius = NULL;
1624 con->queue_size = 0;
1632 static void stop_cm(struct rtrs_clt_con *con)
1634 rdma_disconnect(con->c.cm_id);
1635 if (con->c.qp)
1636 ib_drain_qp(con->c.qp);
1639 static void destroy_cm(struct rtrs_clt_con *con)
1641 rdma_destroy_id(con->c.cm_id);
1642 con->c.cm_id = NULL;
1645 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1647 struct rtrs_sess *s = con->c.sess;
1650 err = create_con_cq_qp(con);
1655 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1662 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1664 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1681 .cid = cpu_to_le16(con->c.cid),
1689 err = rdma_connect_locked(con->c.cm_id, &param);
1696 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1699 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1728 if (con->c.cid == 0) {
1773 sess->hca_port = con->c.cm_id->port_num;
1776 sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
1784 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1786 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1789 con->cm_err = 1;
1792 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1795 struct rtrs_sess *s = con->c.sess;
1802 rej_msg = rdma_reject_msg(con->c.cm_id, status);
1803 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1831 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1833 if (con->cm_err == 1) {
1836 sess = to_clt_sess(con->c.sess);
1841 con->cm_err = cm_err;
1847 struct rtrs_clt_con *con = cm_id->context;
1848 struct rtrs_sess *s = con->c.sess;
1854 cm_err = rtrs_rdma_addr_resolved(con);
1857 cm_err = rtrs_rdma_route_resolved(con);
1860 cm_err = rtrs_rdma_conn_established(con, ev);
1866 flag_success_on_conn(con);
1872 cm_err = rtrs_rdma_conn_rejected(con, ev);
1905 flag_error_on_conn(con, cm_err);
1906 rtrs_rdma_error_recovery(con);
1912 static int create_cm(struct rtrs_clt_con *con)
1914 struct rtrs_sess *s = con->c.sess;
1919 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
1928 con->c.cm_id = cm_id;
1929 con->cm_err = 0;
1950 con->cm_err || sess->state != RTRS_CLT_CONNECTING,
1958 if (con->cm_err < 0) {
1959 err = con->cm_err;
1971 stop_cm(con);
1973 destroy_con_cq_qp(con);
1975 destroy_cm(con);
2028 struct rtrs_clt_con *con;
2056 if (!sess->s.con[cid])
2058 con = to_clt_con(sess->s.con[cid]);
2059 stop_cm(con);
2077 if (!sess->s.con[cid])
2079 con = to_clt_con(sess->s.con[cid]);
2080 destroy_con_cq_qp(con);
2081 destroy_cm(con);
2082 destroy_con(con);
2228 err = create_cm(to_clt_con(sess->s.con[cid]));
2230 destroy_con(to_clt_con(sess->s.con[cid]));
2244 struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
2246 stop_cm(con);
2247 destroy_con_cq_qp(con);
2248 destroy_cm(con);
2249 destroy_con(con);
2263 struct rtrs_clt_con *con = cq->cq_context;
2264 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2277 rtrs_clt_update_wc_stats(con);
2346 struct rtrs_clt_con *con = cq->cq_context;
2347 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2356 WARN_ON(con->c.cid);
2396 rtrs_clt_update_wc_stats(con);
2403 struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);