Lines Matching refs:con
188 return to_clt_con(clt_path->s.con[id]);
307 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
309 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
331 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
334 rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
336 rtrs_rdma_error_recovery(con);
351 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
354 rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
356 rtrs_rdma_error_recovery(con);
368 struct rtrs_clt_con *con = req->con;
377 return ib_post_send(con->c.qp, &wr, NULL);
383 struct rtrs_clt_con *con = req->con;
389 if (WARN_ON(!req->con))
391 clt_path = to_clt_path(con->c.path);
421 rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
446 req->con = NULL;
449 rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
458 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
463 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
468 rtrs_wrn(con->c.path,
482 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
489 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
508 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
512 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
517 err = rtrs_iu_post_recv(&con->c, iu);
519 rtrs_err(con->c.path, "post iu failed %d\n", err);
520 rtrs_rdma_error_recovery(con);
524 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
526 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
539 rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
571 return rtrs_clt_recv_done(con, wc);
573 rtrs_rdma_error_recovery(con);
586 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
600 return ib_post_recv(con->qp, wr, NULL);
605 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
606 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
615 rtrs_rdma_error_recovery(con);
619 rtrs_clt_update_wc_stats(con);
640 WARN_ON(con->c.cid);
643 return rtrs_clt_recv_done(con, wc);
645 WARN_ON(con->c.cid);
650 return rtrs_clt_recv_done(con, wc);
652 rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
660 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
662 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
664 rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
666 rtrs_rdma_error_recovery(con);
678 return rtrs_clt_recv_done(con, wc);
680 return rtrs_clt_rkey_rsp_done(con, wc);
696 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
699 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
703 struct rtrs_iu *iu = &con->rsp_ius[i];
705 err = rtrs_iu_post_recv(&con->c, iu);
707 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
733 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
963 req->con = rtrs_permit_to_clt_con(clt_path, permit);
1012 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
1019 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1051 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
1058 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
1078 struct rtrs_clt_con *con = req->con;
1079 struct rtrs_path *s = con->c.path;
1152 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
1172 struct rtrs_clt_con *con = req->con;
1173 struct rtrs_path *s = con->c.path;
1259 ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
1491 struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1493 rtrs_rdma_error_recovery(con);
1541 clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
1543 if (!clt_path->s.con)
1596 kfree(clt_path->s.con);
1607 kfree(clt_path->s.con);
1614 struct rtrs_clt_con *con;
1616 con = kzalloc(sizeof(*con), GFP_KERNEL);
1617 if (!con)
1621 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1622 con->c.cid = cid;
1623 con->c.path = &clt_path->s;
1625 atomic_set(&con->c.wr_cnt, 1);
1626 mutex_init(&con->con_mutex);
1628 clt_path->s.con[cid] = &con->c;
1633 static void destroy_con(struct rtrs_clt_con *con)
1635 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1637 clt_path->s.con[con->c.cid] = NULL;
1638 mutex_destroy(&con->con_mutex);
1639 kfree(con);
1642 static int create_con_cq_qp(struct rtrs_clt_con *con)
1644 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1649 lockdep_assert_held(&con->con_mutex);
1650 if (con->c.cid == 0) {
1661 clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1702 atomic_set(&con->c.sq_wr_avail, max_send_wr);
1705 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1706 con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
1711 if (!con->rsp_ius)
1713 con->queue_num = cq_num;
1715 cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
1716 if (con->c.cid >= clt_path->s.irq_con_num)
1717 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1721 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1731 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1733 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1739 lockdep_assert_held(&con->con_mutex);
1740 rtrs_cq_qp_destroy(&con->c);
1741 if (con->rsp_ius) {
1742 rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
1743 con->queue_num);
1744 con->rsp_ius = NULL;
1745 con->queue_num = 0;
1753 static void stop_cm(struct rtrs_clt_con *con)
1755 rdma_disconnect(con->c.cm_id);
1756 if (con->c.qp)
1757 ib_drain_qp(con->c.qp);
1760 static void destroy_cm(struct rtrs_clt_con *con)
1762 rdma_destroy_id(con->c.cm_id);
1763 con->c.cm_id = NULL;
1766 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1768 struct rtrs_path *s = con->c.path;
1771 mutex_lock(&con->con_mutex);
1772 err = create_con_cq_qp(con);
1773 mutex_unlock(&con->con_mutex);
1778 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1785 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1787 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1804 .cid = cpu_to_le16(con->c.cid),
1812 err = rdma_connect_locked(con->c.cm_id, ¶m);
1819 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1822 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1851 if (con->c.cid == 0) {
1898 clt_path->hca_port = con->c.cm_id->port_num;
1901 clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
1909 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1911 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1914 con->cm_err = 1;
1917 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1920 struct rtrs_path *s = con->c.path;
1927 rej_msg = rdma_reject_msg(con->c.cm_id, status);
1928 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1958 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1960 if (con->cm_err == 1) {
1963 clt_path = to_clt_path(con->c.path);
1968 con->cm_err = cm_err;
1974 struct rtrs_clt_con *con = cm_id->context;
1975 struct rtrs_path *s = con->c.path;
1981 cm_err = rtrs_rdma_addr_resolved(con);
1984 cm_err = rtrs_rdma_route_resolved(con);
1987 cm_err = rtrs_rdma_conn_established(con, ev);
1993 flag_success_on_conn(con);
1999 cm_err = rtrs_rdma_conn_rejected(con, ev);
2037 flag_error_on_conn(con, cm_err);
2038 rtrs_rdma_error_recovery(con);
2045 static int create_cm(struct rtrs_clt_con *con)
2047 struct rtrs_path *s = con->c.path;
2052 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
2061 con->c.cm_id = cm_id;
2062 con->cm_err = 0;
2083 con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
2091 if (con->cm_err < 0)
2092 return con->cm_err;
2148 struct rtrs_clt_con *con;
2176 if (!clt_path->s.con[cid])
2178 con = to_clt_con(clt_path->s.con[cid]);
2179 stop_cm(con);
2198 if (!clt_path->s.con[cid])
2200 con = to_clt_con(clt_path->s.con[cid]);
2201 mutex_lock(&con->con_mutex);
2202 destroy_con_cq_qp(con);
2203 mutex_unlock(&con->con_mutex);
2204 destroy_cm(con);
2205 destroy_con(con);
2340 err = create_cm(to_clt_con(clt_path->s.con[cid]));
2353 struct rtrs_clt_con *con;
2355 if (!clt_path->s.con[i])
2358 con = to_clt_con(clt_path->s.con[i]);
2359 if (con->c.cm_id) {
2360 stop_cm(con);
2361 mutex_lock(&con->con_mutex);
2362 destroy_con_cq_qp(con);
2363 mutex_unlock(&con->con_mutex);
2364 destroy_cm(con);
2366 destroy_con(con);
2380 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2381 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2394 rtrs_clt_update_wc_stats(con);
2464 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2465 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2474 WARN_ON(con->c.cid);
2514 rtrs_clt_update_wc_stats(con);
2521 struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
3044 struct rtrs_con *con;
3054 con = clt_path->s.con[index + 1];
3055 cnt = ib_process_cq_direct(con->cq, -1);