Lines Matching defs:qhp

87 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
90 spin_lock_irqsave(&qhp->lock, flag);
91 qhp->attr.state = state;
92 spin_unlock_irqrestore(&qhp->lock, flag);
690 static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
693 qhp->sq_sig_all;
695 qhp->sq_sig_all;
706 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
707 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
708 build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
711 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
713 swsqe->idx = qhp->wq.sq.pidx;
720 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
724 write_wrid = qhp->wq.sq.pidx;
727 qhp->wq.sq.in_use++;
728 if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
729 qhp->wq.sq.pidx = 0;
732 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
737 swsqe->idx = qhp->wq.sq.pidx;
744 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
749 wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
753 t4_sq_produce(&qhp->wq, len16);
756 t4_ring_sq_db(&qhp->wq, idx, wqe);
759 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
764 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
765 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
912 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
916 xa_lock_irqsave(&qhp->rhp->qps, flags);
917 spin_lock(&qhp->lock);
918 if (qhp->rhp->db_state == NORMAL)
919 t4_ring_sq_db(&qhp->wq, inc, NULL);
921 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
922 qhp->wq.sq.wq_pidx_inc += inc;
924 spin_unlock(&qhp->lock);
925 xa_unlock_irqrestore(&qhp->rhp->qps, flags);
929 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
933 xa_lock_irqsave(&qhp->rhp->qps, flags);
934 spin_lock(&qhp->lock);
935 if (qhp->rhp->db_state == NORMAL)
936 t4_ring_rq_db(&qhp->wq, inc, NULL);
938 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
939 qhp->wq.rq.wq_pidx_inc += inc;
941 spin_unlock(&qhp->lock);
942 xa_unlock_irqrestore(&qhp->rhp->qps, flags);
979 static int complete_sq_drain_wr(struct c4iw_qp *qhp,
988 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1001 CQE_QPID_V(qhp->wq.sq.qid));
1018 static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
1025 ret = complete_sq_drain_wr(qhp, wr);
1035 static void complete_rq_drain_wr(struct c4iw_qp *qhp,
1043 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1052 CQE_QPID_V(qhp->wq.sq.qid));
1068 static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
1072 complete_rq_drain_wr(qhp, wr);
1084 struct c4iw_qp *qhp;
1092 qhp = to_c4iw_qp(ibqp);
1093 rhp = qhp->rhp;
1094 spin_lock_irqsave(&qhp->lock, flag);
1100 if (qhp->wq.flushed) {
1101 spin_unlock_irqrestore(&qhp->lock, flag);
1102 err = complete_sq_drain_wrs(qhp, wr, bad_wr);
1105 num_wrs = t4_sq_avail(&qhp->wq);
1107 spin_unlock_irqrestore(&qhp->lock, flag);
1121 if (qhp->rhp->rdev.lldi.write_cmpl_support &&
1122 CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
1131 post_write_cmpl(qhp, wr);
1132 spin_unlock_irqrestore(&qhp->lock, flag);
1142 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
1143 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
1148 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
1150 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
1161 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
1173 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
1189 if (!qhp->wq.sq.oldest_read)
1190 qhp->wq.sq.oldest_read = swsqe;
1203 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
1229 swsqe->idx = qhp->wq.sq.pidx;
1232 qhp->sq_sig_all;
1241 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
1244 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
1248 t4_sq_produce(&qhp->wq, len16);
1252 t4_ring_sq_db(&qhp->wq, idx, wqe);
1253 spin_unlock_irqrestore(&qhp->lock, flag);
1255 spin_unlock_irqrestore(&qhp->lock, flag);
1256 ring_kernel_sq_db(qhp, idx);
1265 struct c4iw_qp *qhp;
1272 qhp = to_c4iw_qp(ibqp);
1273 spin_lock_irqsave(&qhp->lock, flag);
1279 if (qhp->wq.flushed) {
1280 spin_unlock_irqrestore(&qhp->lock, flag);
1281 complete_rq_drain_wrs(qhp, wr);
1284 num_wrs = t4_rq_avail(&qhp->wq);
1286 spin_unlock_irqrestore(&qhp->lock, flag);
1296 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1297 qhp->wq.rq.wq_pidx *
1300 err = build_rdma_recv(qhp, wqe, wr, &len16);
1308 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
1310 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1312 qhp->rhp->rdev.lldi.ports[0]);
1313 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
1319 wqe->recv.wrid = qhp->wq.rq.pidx;
1325 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
1326 t4_rq_produce(&qhp->wq, len16);
1331 if (!qhp->rhp->rdev.status_page->db_off) {
1332 t4_ring_rq_db(&qhp->wq, idx, wqe);
1333 spin_unlock_irqrestore(&qhp->lock, flag);
1335 spin_unlock_irqrestore(&qhp->lock, flag);
1336 ring_kernel_rq_db(qhp, idx);
1560 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1567 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
1568 qhp->ep->hwtid);
1570 skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1574 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1579 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1585 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1586 term->layer_etype = qhp->attr.layer_etype;
1587 term->ecode = qhp->attr.ecode;
1590 c4iw_ofld_send(&qhp->rhp->rdev, skb);
1594 * Assumes qhp lock is held.
1596 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1603 pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
1609 spin_lock(&qhp->lock);
1611 if (qhp->wq.flushed) {
1612 spin_unlock(&qhp->lock);
1618 qhp->wq.flushed = 1;
1619 t4_set_wq_in_error(&qhp->wq, 0);
1621 c4iw_flush_hw_cq(rchp, qhp);
1622 if (!qhp->srq) {
1623 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1624 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1628 c4iw_flush_hw_cq(schp, qhp);
1629 sq_flushed = c4iw_flush_sq(qhp);
1631 spin_unlock(&qhp->lock);
1660 static void flush_qp(struct c4iw_qp *qhp)
1665 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1666 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1668 if (qhp->ibqp.uobject) {
1670 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1671 if (qhp->wq.flushed)
1674 qhp->wq.flushed = 1;
1675 t4_set_wq_in_error(&qhp->wq, 0);
1689 __flush_qp(qhp, rchp, schp);
1692 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1699 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
1719 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1749 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1755 pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
1756 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1763 ret = alloc_ird(rhp, qhp->attr.max_ird);
1765 qhp->attr.max_ird = 0;
1769 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1776 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1779 wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
1783 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1784 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1786 if (qhp->attr.mpa_attr.recv_marker_enabled)
1788 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1790 if (qhp->attr.mpa_attr.crc_enabled)
1796 if (!qhp->ibqp.uobject)
1799 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1800 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1801 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1802 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1803 if (qhp->srq) {
1805 qhp->srq->idx);
1807 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1808 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1809 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1812 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1813 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1814 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1815 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1816 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1817 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1818 if (qhp->attr.mpa_attr.initiator)
1819 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1821 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
1822 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1826 free_ird(rhp, qhp->attr.max_ird);
1832 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1838 struct c4iw_qp_attributes newattr = qhp->attr;
1845 pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1846 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1849 mutex_lock(&qhp->mutex);
1853 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1877 qhp->attr = newattr;
1881 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1885 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1891 if (qhp->attr.state == attrs->next_state)
1894 switch (qhp->attr.state) {
1906 qhp->attr.mpa_attr = attrs->mpa_attr;
1907 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1908 qhp->ep = qhp->attr.llp_stream_handle;
1909 set_state(qhp, C4IW_QP_STATE_RTS);
1917 c4iw_get_ep(&qhp->ep->com);
1918 ret = rdma_init(rhp, qhp);
1923 set_state(qhp, C4IW_QP_STATE_ERROR);
1924 flush_qp(qhp);
1934 t4_set_wq_in_error(&qhp->wq, 0);
1935 set_state(qhp, C4IW_QP_STATE_CLOSING);
1936 ep = qhp->ep;
1940 c4iw_get_ep(&qhp->ep->com);
1942 ret = rdma_fini(rhp, qhp, ep);
1947 t4_set_wq_in_error(&qhp->wq, 0);
1948 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1949 qhp->attr.layer_etype = attrs->layer_etype;
1950 qhp->attr.ecode = attrs->ecode;
1951 ep = qhp->ep;
1957 terminate = qhp->attr.send_term;
1958 ret = rdma_fini(rhp, qhp, ep);
1964 t4_set_wq_in_error(&qhp->wq, 0);
1965 set_state(qhp, C4IW_QP_STATE_ERROR);
1969 ep = qhp->ep;
1970 c4iw_get_ep(&qhp->ep->com);
1984 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1991 flush_qp(qhp);
1992 set_state(qhp, C4IW_QP_STATE_IDLE);
1993 qhp->attr.llp_stream_handle = NULL;
1994 c4iw_put_ep(&qhp->ep->com);
1995 qhp->ep = NULL;
1996 wake_up(&qhp->wait);
2010 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
2014 set_state(qhp, C4IW_QP_STATE_IDLE);
2024 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
2031 pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
2032 qhp->wq.sq.qid);
2035 qhp->attr.llp_stream_handle = NULL;
2037 ep = qhp->ep;
2038 qhp->ep = NULL;
2039 set_state(qhp, C4IW_QP_STATE_ERROR);
2042 flush_qp(qhp);
2043 wake_up(&qhp->wait);
2045 mutex_unlock(&qhp->mutex);
2048 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
2067 pr_debug("exit state %d\n", qhp->attr.state);
2074 struct c4iw_qp *qhp;
2078 qhp = to_c4iw_qp(ib_qp);
2079 rhp = qhp->rhp;
2080 ucontext = qhp->ucontext;
2083 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
2084 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2086 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
2087 wait_event(qhp->wait, !qhp->ep);
2090 __xa_erase(&rhp->qps, qhp->wq.sq.qid);
2091 if (!list_empty(&qhp->db_fc_entry))
2092 list_del_init(&qhp->db_fc_entry);
2094 free_ird(rhp, qhp->attr.max_ird);
2098 wait_for_completion(&qhp->qp_rel_comp);
2100 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
2101 pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
2103 destroy_qp(&rhp->rdev, &qhp->wq,
2104 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
2106 c4iw_put_wr_wait(qhp->wr_waitp);
2108 kfree(qhp);
2116 struct c4iw_qp *qhp;
2157 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
2158 if (!qhp)
2161 qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2162 if (!qhp->wr_waitp) {
2167 qhp->wq.sq.size = sqsize;
2168 qhp->wq.sq.memsize =
2170 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
2171 qhp->wq.sq.flush_cidx = -1;
2173 qhp->wq.rq.size = rqsize;
2174 qhp->wq.rq.memsize =
2176 sizeof(*qhp->wq.rq.queue);
2180 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
2182 qhp->wq.rq.memsize =
2183 roundup(qhp->wq.rq.memsize, PAGE_SIZE);
2186 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
2188 qhp->wr_waitp, !attrs->srq);
2196 qhp->rhp = rhp;
2197 qhp->attr.pd = php->pdid;
2198 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
2199 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
2200 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
2201 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
2202 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
2204 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
2205 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
2207 qhp->attr.state = C4IW_QP_STATE_IDLE;
2208 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
2209 qhp->attr.enable_rdma_read = 1;
2210 qhp->attr.enable_rdma_write = 1;
2211 qhp->attr.enable_bind = 1;
2212 qhp->attr.max_ord = 0;
2213 qhp->attr.max_ird = 0;
2214 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
2215 spin_lock_init(&qhp->lock);
2216 mutex_init(&qhp->mutex);
2217 init_waitqueue_head(&qhp->wait);
2218 init_completion(&qhp->qp_rel_comp);
2219 refcount_set(&qhp->qp_refcnt, 1);
2221 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
2252 if (t4_sq_onchip(&qhp->wq.sq)) {
2264 uresp.sqid = qhp->wq.sq.qid;
2265 uresp.sq_size = qhp->wq.sq.size;
2266 uresp.sq_memsize = qhp->wq.sq.memsize;
2268 uresp.rqid = qhp->wq.rq.qid;
2269 uresp.rq_size = qhp->wq.rq.size;
2270 uresp.rq_memsize = qhp->wq.rq.memsize;
2294 sq_key_mm->addr = qhp->wq.sq.phys_addr;
2295 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
2299 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
2300 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
2304 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
2310 (u64)(unsigned long)qhp->wq.rq.bar2_pa;
2323 qhp->ucontext = ucontext;
2326 qhp->wq.qp_errp =
2327 &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
2329 qhp->wq.qp_errp =
2330 &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
2331 qhp->wq.srqidxp =
2332 &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
2335 qhp->ibqp.qp_num = qhp->wq.sq.qid;
2337 qhp->srq = to_c4iw_srq(attrs->srq);
2338 INIT_LIST_HEAD(&qhp->db_fc_entry);
2340 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
2341 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
2342 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
2343 return &qhp->ibqp;
2357 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
2359 destroy_qp(&rhp->rdev, &qhp->wq,
2362 c4iw_put_wr_wait(qhp->wr_waitp);
2364 kfree(qhp);
2372 struct c4iw_qp *qhp;
2386 qhp = to_c4iw_qp(ibqp);
2387 rhp = qhp->rhp;
2416 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2467 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2471 attr->qp_state = to_ib_qp_state(qhp->attr.state);
2472 attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
2473 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2474 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2475 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
2476 init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
2478 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;