Lines Matching refs:qp
57 #include "qp.h"
64 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
393 * rvt_driver_qp_init - Init driver qp resources
457 * rvt_free_qp_cb - callback function to reset a qp
458 * @qp: the qp to reset
461 * This function resets the qp and removes it from the
462 * qp hash table.
464 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
467 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
469 /* Reset the qp and remove it from the qp hash list */
470 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
499 * Check for qp leaks and free resources.
616 * @qp: rvt qp data structure
619 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
622 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
624 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
625 rvt_put_ss(&qp->s_rdma_read_sge);
627 rvt_put_ss(&qp->r_sge);
630 while (qp->s_last != qp->s_head) {
631 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
633 rvt_put_qp_swqe(qp, wqe);
634 if (++qp->s_last >= qp->s_size)
635 qp->s_last = 0;
638 if (qp->s_rdma_mr) {
639 rvt_put_mr(qp->s_rdma_mr);
640 qp->s_rdma_mr = NULL;
644 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
645 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
675 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
676 * @qp - the rvt_qp
679 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
681 u32 s_last = qp->s_last;
683 while (s_last != qp->s_head) {
684 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
689 if (++s_last >= qp->s_size)
692 if (qp->s_rdma_mr)
693 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
700 * @qp - the qp
703 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
706 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
708 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
709 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
719 * @qp - the qp
723 * the qp.
725 * If so, the qp is put into an error state to elminate
726 * any references from the qp.
728 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
732 if (qp->ibqp.qp_type == IB_QPT_SMI ||
733 qp->ibqp.qp_type == IB_QPT_GSI)
736 spin_lock_irq(&qp->r_lock);
737 spin_lock(&qp->s_hlock);
738 spin_lock(&qp->s_lock);
740 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
743 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
744 rvt_qp_sends_has_lkey(qp, lkey) ||
745 rvt_qp_acks_has_lkey(qp, lkey))
746 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
748 spin_unlock(&qp->s_lock);
749 spin_unlock(&qp->s_hlock);
750 spin_unlock_irq(&qp->r_lock);
754 ev.device = qp->ibqp.device;
755 ev.element.qp = &qp->ibqp;
757 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
762 * rvt_remove_qp - remove qp form table
764 * @qp: qp to remove
769 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
771 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
772 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
778 if (rcu_dereference_protected(rvp->qp[0],
779 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
780 RCU_INIT_POINTER(rvp->qp[0], NULL);
781 } else if (rcu_dereference_protected(rvp->qp[1],
782 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
783 RCU_INIT_POINTER(rvp->qp[1], NULL);
793 if (q == qp) {
795 rcu_dereference_protected(qp->next,
798 trace_rvt_qpremove(qp, n);
807 rvt_put_qp(qp);
854 * @qp: the QP to init or reinit
862 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
865 qp->remote_qpn = 0;
866 qp->qkey = 0;
867 qp->qp_access_flags = 0;
868 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
869 qp->s_hdrwords = 0;
870 qp->s_wqe = NULL;
871 qp->s_draining = 0;
872 qp->s_next_psn = 0;
873 qp->s_last_psn = 0;
874 qp->s_sending_psn = 0;
875 qp->s_sending_hpsn = 0;
876 qp->s_psn = 0;
877 qp->r_psn = 0;
878 qp->r_msn = 0;
880 qp->s_state = IB_OPCODE_RC_SEND_LAST;
881 qp->r_state = IB_OPCODE_RC_SEND_LAST;
883 qp->s_state = IB_OPCODE_UC_SEND_LAST;
884 qp->r_state = IB_OPCODE_UC_SEND_LAST;
886 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
887 qp->r_nak_state = 0;
888 qp->r_aflags = 0;
889 qp->r_flags = 0;
890 qp->s_head = 0;
891 qp->s_tail = 0;
892 qp->s_cur = 0;
893 qp->s_acked = 0;
894 qp->s_last = 0;
895 qp->s_ssn = 1;
896 qp->s_lsn = 0;
897 qp->s_mig_state = IB_MIG_MIGRATED;
898 qp->r_head_ack_queue = 0;
899 qp->s_tail_ack_queue = 0;
900 qp->s_acked_ack_queue = 0;
901 qp->s_num_rd_atomic = 0;
902 qp->r_sge.num_sge = 0;
903 atomic_set(&qp->s_reserved_used, 0);
908 * @qp: the QP to reset
913 static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
915 __must_hold(&qp->s_lock)
916 __must_hold(&qp->s_hlock)
917 __must_hold(&qp->r_lock)
919 lockdep_assert_held(&qp->r_lock);
920 lockdep_assert_held(&qp->s_hlock);
921 lockdep_assert_held(&qp->s_lock);
922 if (qp->state != IB_QPS_RESET) {
923 qp->state = IB_QPS_RESET;
926 rdi->driver_f.flush_qp_waiters(qp);
927 rvt_stop_rc_timers(qp);
928 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
929 spin_unlock(&qp->s_lock);
930 spin_unlock(&qp->s_hlock);
931 spin_unlock_irq(&qp->r_lock);
934 rdi->driver_f.stop_send_queue(qp);
935 rvt_del_timers_sync(qp);
937 rdi->driver_f.quiesce_qp(qp);
939 /* take qp out the hash and wait for it to be unused */
940 rvt_remove_qp(rdi, qp);
943 spin_lock_irq(&qp->r_lock);
944 spin_lock(&qp->s_hlock);
945 spin_lock(&qp->s_lock);
947 rvt_clear_mr_refs(qp, 1);
950 * a qp that has been reset
952 rdi->driver_f.notify_qp_reset(qp);
954 rvt_init_qp(rdi, qp, type);
955 lockdep_assert_held(&qp->r_lock);
956 lockdep_assert_held(&qp->s_hlock);
957 lockdep_assert_held(&qp->s_lock);
963 * @qp: the QP to reset
969 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
972 spin_lock_irq(&qp->r_lock);
973 spin_lock(&qp->s_hlock);
974 spin_lock(&qp->s_lock);
975 _rvt_reset_qp(rdi, qp, type);
976 spin_unlock(&qp->s_lock);
977 spin_unlock(&qp->s_hlock);
978 spin_unlock_irq(&qp->r_lock);
1009 * @qp: Valid QP with allowed_ops set
1014 static void free_ud_wq_attr(struct rvt_qp *qp)
1019 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1020 wqe = rvt_get_swqe_ptr(qp, i);
1028 * @qp: Valid QP with allowed_ops set
1034 static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1039 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1040 wqe = rvt_get_swqe_ptr(qp, i);
1044 free_ud_wq_attr(qp);
1070 struct rvt_qp *qp;
1121 sz = sizeof(*qp);
1127 sg_list_sz = sizeof(*qp->r_sg_list) *
1130 sg_list_sz = sizeof(*qp->r_sg_list) *
1132 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1134 if (!qp)
1136 qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1138 RCU_INIT_POINTER(qp->next, NULL);
1140 qp->s_ack_queue =
1142 sizeof(*qp->s_ack_queue),
1145 if (!qp->s_ack_queue)
1148 /* initialize timers needed for rc qp */
1149 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1150 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1152 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1158 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1163 qp->priv = priv;
1164 qp->timeout_jiffies =
1165 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1170 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1171 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1172 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1174 err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1183 * ib_create_qp() will initialize qp->ibqp
1184 * except for qp->ibqp.qp_num.
1186 spin_lock_init(&qp->r_lock);
1187 spin_lock_init(&qp->s_hlock);
1188 spin_lock_init(&qp->s_lock);
1189 atomic_set(&qp->refcount, 0);
1190 atomic_set(&qp->local_ops_pending, 0);
1191 init_waitqueue_head(&qp->wait);
1192 INIT_LIST_HEAD(&qp->rspwait);
1193 qp->state = IB_QPS_RESET;
1194 qp->s_wq = swq;
1195 qp->s_size = sqsize;
1196 qp->s_avail = init_attr->cap.max_send_wr;
1197 qp->s_max_sge = init_attr->cap.max_send_sge;
1199 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1200 err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1217 qp->ibqp.qp_num = err;
1219 qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
1220 qp->port_num = init_attr->port_num;
1221 rvt_init_qp(rdi, qp, init_attr->qp_type);
1223 err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1243 if (!qp->r_rq.wq) {
1253 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1255 qp->ip = rvt_create_mmap_info(rdi, s, udata,
1256 qp->r_rq.wq);
1257 if (IS_ERR(qp->ip)) {
1258 ret = ERR_CAST(qp->ip);
1262 err = ib_copy_to_udata(udata, &qp->ip->offset,
1263 sizeof(qp->ip->offset));
1269 qp->pid = current->pid;
1285 * qps. busy_jiffies is incremented every rc qp scaling interval.
1295 if (qp->ip) {
1297 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1301 ret = &qp->ibqp;
1306 if (qp->ip)
1307 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1310 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1313 free_ud_wq_attr(qp);
1316 rvt_free_rq(&qp->r_rq);
1319 rdi->driver_f.qp_priv_free(rdi, qp);
1322 kfree(qp->s_ack_queue);
1323 kfree(qp);
1333 * @qp: the QP to put into the error state
1342 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1346 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1348 lockdep_assert_held(&qp->r_lock);
1349 lockdep_assert_held(&qp->s_lock);
1350 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1353 qp->state = IB_QPS_ERR;
1355 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1356 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1357 del_timer(&qp->s_timer);
1360 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1361 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1363 rdi->driver_f.notify_error_qp(qp);
1366 if (READ_ONCE(qp->s_last) != qp->s_head)
1367 rdi->driver_f.schedule_send(qp);
1369 rvt_clear_mr_refs(qp, 0);
1372 wc.qp = &qp->ibqp;
1375 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1376 wc.wr_id = qp->r_wr_id;
1378 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1382 if (qp->r_rq.kwq) {
1388 spin_lock(&qp->r_rq.kwq->c_lock);
1389 /* qp->ip used to validate if there is a user buffer mmaped */
1390 if (qp->ip) {
1391 wq = qp->r_rq.wq;
1395 kwq = qp->r_rq.kwq;
1400 if (head >= qp->r_rq.size)
1402 if (tail >= qp->r_rq.size)
1405 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1406 if (++tail >= qp->r_rq.size)
1408 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1410 if (qp->ip)
1414 spin_unlock(&qp->r_rq.kwq->c_lock);
1415 } else if (qp->ibqp.event_handler) {
1428 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1430 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1433 rvt_get_qp(qp);
1436 if (qp->ibqp.qp_num <= 1) {
1437 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1439 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1441 qp->next = rdi->qp_dev->qp_table[n];
1442 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1443 trace_rvt_qpinsert(qp, n);
1462 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1470 spin_lock_irq(&qp->r_lock);
1471 spin_lock(&qp->s_hlock);
1472 spin_lock(&qp->s_lock);
1475 attr->cur_qp_state : qp->state;
1477 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1484 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1498 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1513 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1528 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1529 qp->ibqp.qp_type == IB_QPT_GSI ||
1549 * that to a small mtu. We'll set qp->path_mtu
1555 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1562 if (qp->s_mig_state == IB_MIG_ARMED)
1567 if (qp->s_mig_state == IB_MIG_REARM)
1571 if (qp->s_mig_state == IB_MIG_ARMED)
1584 if (qp->state != IB_QPS_RESET)
1585 _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1590 qp->r_flags &= ~RVT_R_COMM_EST;
1591 qp->state = new_state;
1595 qp->s_draining = qp->s_last != qp->s_cur;
1596 qp->state = new_state;
1600 if (qp->ibqp.qp_type == IB_QPT_RC)
1602 qp->state = new_state;
1606 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1610 qp->state = new_state;
1615 qp->s_pkey_index = attr->pkey_index;
1618 qp->port_num = attr->port_num;
1621 qp->remote_qpn = attr->dest_qp_num;
1624 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1625 qp->s_psn = qp->s_next_psn;
1626 qp->s_sending_psn = qp->s_next_psn;
1627 qp->s_last_psn = qp->s_next_psn - 1;
1628 qp->s_sending_hpsn = qp->s_last_psn;
1632 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1635 qp->qp_access_flags = attr->qp_access_flags;
1638 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1639 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1640 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1644 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1645 qp->s_alt_pkey_index = attr->alt_pkey_index;
1649 qp->s_mig_state = attr->path_mig_state;
1651 qp->remote_ah_attr = qp->alt_ah_attr;
1652 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1653 qp->s_pkey_index = qp->s_alt_pkey_index;
1658 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1659 qp->log_pmtu = ilog2(qp->pmtu);
1663 qp->s_retry_cnt = attr->retry_cnt;
1664 qp->s_retry = attr->retry_cnt;
1668 qp->s_rnr_retry_cnt = attr->rnr_retry;
1669 qp->s_rnr_retry = attr->rnr_retry;
1673 qp->r_min_rnr_timer = attr->min_rnr_timer;
1676 qp->timeout = attr->timeout;
1677 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1681 qp->qkey = attr->qkey;
1684 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1687 qp->s_max_rd_atomic = attr->max_rd_atomic;
1690 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1692 spin_unlock(&qp->s_lock);
1693 spin_unlock(&qp->s_hlock);
1694 spin_unlock_irq(&qp->r_lock);
1697 rvt_insert_qp(rdi, qp);
1700 ev.device = qp->ibqp.device;
1701 ev.element.qp = &qp->ibqp;
1703 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1706 ev.device = qp->ibqp.device;
1707 ev.element.qp = &qp->ibqp;
1709 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1714 spin_unlock(&qp->s_lock);
1715 spin_unlock(&qp->s_hlock);
1716 spin_unlock_irq(&qp->r_lock);
1731 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1734 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1736 wait_event(qp->wait, !atomic_read(&qp->refcount));
1738 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1742 if (qp->ibqp.qp_type == IB_QPT_RC) {
1748 if (qp->ip)
1749 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1750 kvfree(qp->r_rq.kwq);
1751 rdi->driver_f.qp_priv_free(rdi, qp);
1752 kfree(qp->s_ack_queue);
1753 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1754 rdma_destroy_ah_attr(&qp->alt_ah_attr);
1755 free_ud_wq_attr(qp);
1756 vfree(qp->s_wq);
1757 kfree(qp);
1763 * @ibqp: IB qp to query
1773 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1776 attr->qp_state = qp->state;
1778 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1779 attr->path_mig_state = qp->s_mig_state;
1780 attr->qkey = qp->qkey;
1781 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1782 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1783 attr->dest_qp_num = qp->remote_qpn;
1784 attr->qp_access_flags = qp->qp_access_flags;
1785 attr->cap.max_send_wr = qp->s_size - 1 -
1787 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1788 attr->cap.max_send_sge = qp->s_max_sge;
1789 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1791 attr->ah_attr = qp->remote_ah_attr;
1792 attr->alt_ah_attr = qp->alt_ah_attr;
1793 attr->pkey_index = qp->s_pkey_index;
1794 attr->alt_pkey_index = qp->s_alt_pkey_index;
1796 attr->sq_draining = qp->s_draining;
1797 attr->max_rd_atomic = qp->s_max_rd_atomic;
1798 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1799 attr->min_rnr_timer = qp->r_min_rnr_timer;
1800 attr->port_num = qp->port_num;
1801 attr->timeout = qp->timeout;
1802 attr->retry_cnt = qp->s_retry_cnt;
1803 attr->rnr_retry = qp->s_rnr_retry_cnt;
1805 rdma_ah_get_port_num(&qp->alt_ah_attr);
1806 attr->alt_timeout = qp->alt_timeout;
1808 init_attr->event_handler = qp->ibqp.event_handler;
1809 init_attr->qp_context = qp->ibqp.qp_context;
1810 init_attr->send_cq = qp->ibqp.send_cq;
1811 init_attr->recv_cq = qp->ibqp.recv_cq;
1812 init_attr->srq = qp->ibqp.srq;
1814 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1818 init_attr->qp_type = qp->ibqp.qp_type;
1819 init_attr->port_num = qp->port_num;
1836 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1837 struct rvt_krwq *wq = qp->r_rq.kwq;
1839 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1840 !qp->ibqp.srq;
1843 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1853 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1858 spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1860 if (next >= qp->r_rq.size)
1863 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1871 wc.qp = &qp->ibqp;
1875 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1877 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1891 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1898 * @qp - the qp
1915 struct rvt_qp *qp,
1923 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1926 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1934 !qp->s_max_rd_atomic)
1938 if (qp->ibqp.qp_type != IB_QPT_UC &&
1939 qp->ibqp.qp_type != IB_QPT_RC) {
1940 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1949 * @qp: the qp
1954 * qp variable is uncontrolled.
1956 * For non reserved operations, the qp->s_avail
1962 struct rvt_qp *qp,
1974 reserved_used = atomic_read(&qp->s_reserved_used);
1980 if (likely(qp->s_avail))
1983 slast = smp_load_acquire(&qp->s_last);
1984 if (qp->s_head >= slast)
1985 avail = qp->s_size - (qp->s_head - slast);
1987 avail = slast - qp->s_head;
1989 reserved_used = atomic_read(&qp->s_reserved_used);
1995 qp->s_avail = avail;
1996 if (WARN_ON(qp->s_avail >
1997 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
2000 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
2001 qp->s_head, qp->s_tail, qp->s_cur,
2002 qp->s_acked, qp->s_last);
2008 * @qp: the QP to post on
2011 static int rvt_post_one_wr(struct rvt_qp *qp,
2022 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2032 if (unlikely(wr->num_sge > qp->s_max_sge))
2035 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2054 ret = rvt_fast_reg_mr(qp,
2063 atomic_read(&qp->local_ops_pending)) {
2067 qp, wr->ex.invalidate_rkey);
2080 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2083 next = qp->s_head + 1;
2084 if (next >= qp->s_size)
2088 pd = ibpd_to_rvtpd(qp->ibqp.pd);
2089 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2123 log_pmtu = qp->log_pmtu;
2124 if (qp->allowed_ops == IB_OPCODE_UD) {
2133 atomic_inc(&qp->local_ops_pending);
2140 wqe->ssn = qp->s_ssn++;
2141 wqe->psn = qp->s_next_psn;
2150 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2156 qp->s_next_psn = wqe->lpsn + 1;
2160 rvt_qp_wqe_reserve(qp, wqe);
2163 qp->s_avail--;
2165 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2167 qp->s_head = next;
2172 if (qp->allowed_ops == IB_OPCODE_UD)
2197 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2204 spin_lock_irqsave(&qp->s_hlock, flags);
2210 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2211 spin_unlock_irqrestore(&qp->s_hlock, flags);
2220 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2223 err = rvt_post_one_wr(qp, wr, &call_send);
2231 spin_unlock_irqrestore(&qp->s_hlock, flags);
2238 rdi->driver_f.do_send(qp);
2240 rdi->driver_f.schedule_send_no_lock(qp);
2318 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2325 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2328 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2329 ss = &qp->r_sge;
2330 ss->sg_list = qp->r_sg_list;
2331 qp->r_len = 0;
2341 qp->r_len += wqe->sg_list[i].length;
2345 ss->total_len = qp->r_len;
2359 wc.qp = &qp->ibqp;
2361 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2386 * @qp: the QP
2387 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2394 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2408 if (qp->ibqp.srq) {
2409 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2416 rq = &qp->r_rq;
2417 ip = qp->ip;
2421 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2459 if (!wr_id_only && !init_sge(qp, wqe)) {
2463 qp->r_wr_id = wqe->wr_id;
2467 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2482 ev.device = qp->ibqp.device;
2483 ev.element.srq = qp->ibqp.srq;
2499 * @qp: the QP
2501 void rvt_comm_est(struct rvt_qp *qp)
2503 qp->r_flags |= RVT_R_COMM_EST;
2504 if (qp->ibqp.event_handler) {
2507 ev.device = qp->ibqp.device;
2508 ev.element.qp = &qp->ibqp;
2510 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2515 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2520 spin_lock_irqsave(&qp->s_lock, flags);
2521 lastwqe = rvt_error_qp(qp, err);
2522 spin_unlock_irqrestore(&qp->s_lock, flags);
2527 ev.device = qp->ibqp.device;
2528 ev.element.qp = &qp->ibqp;
2530 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2554 * @qp - the QP
2558 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2560 struct ib_qp *ibqp = &qp->ibqp;
2563 lockdep_assert_held(&qp->s_lock);
2564 qp->s_flags |= RVT_S_TIMER;
2565 /* 4.096 usec. * (1 << qp->timeout) */
2566 qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2567 (qp->timeout_jiffies << shift);
2568 add_timer(&qp->s_timer);
2574 * @qp: the QP
2577 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2581 lockdep_assert_held(&qp->s_lock);
2582 qp->s_flags |= RVT_S_WAIT_RNR;
2584 trace_rvt_rnrnak_add(qp, to);
2585 hrtimer_start(&qp->s_rnr_timer,
2592 * @qp: the QP
2595 void rvt_stop_rc_timers(struct rvt_qp *qp)
2597 lockdep_assert_held(&qp->s_lock);
2599 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2600 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2601 del_timer(&qp->s_timer);
2602 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2609 * @qp - the QP
2614 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2616 lockdep_assert_held(&qp->s_lock);
2618 if (qp->s_flags & RVT_S_WAIT_RNR) {
2619 qp->s_flags &= ~RVT_S_WAIT_RNR;
2620 trace_rvt_rnrnak_stop(qp, 0);
2626 * @qp: the QP
2628 void rvt_del_timers_sync(struct rvt_qp *qp)
2630 del_timer_sync(&qp->s_timer);
2631 hrtimer_cancel(&qp->s_rnr_timer);
2640 struct rvt_qp *qp = from_timer(qp, t, s_timer);
2641 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2644 spin_lock_irqsave(&qp->r_lock, flags);
2645 spin_lock(&qp->s_lock);
2646 if (qp->s_flags & RVT_S_TIMER) {
2647 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2649 qp->s_flags &= ~RVT_S_TIMER;
2651 del_timer(&qp->s_timer);
2652 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2654 rdi->driver_f.notify_restart_rc(qp,
2655 qp->s_last_psn + 1,
2657 rdi->driver_f.schedule_send(qp);
2659 spin_unlock(&qp->s_lock);
2660 spin_unlock_irqrestore(&qp->r_lock, flags);
2668 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2669 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2672 spin_lock_irqsave(&qp->s_lock, flags);
2673 rvt_stop_rnr_timer(qp);
2674 trace_rvt_rnrnak_timeout(qp, 0);
2675 rdi->driver_f.schedule_send(qp);
2676 spin_unlock_irqrestore(&qp->s_lock, flags);
2702 void (*cb)(struct rvt_qp *qp, u64 v))
2727 * Updates iter->qp with the current QP when the return
2730 * Return: 0 - iter->qp is valid 1 - no more QPs
2737 struct rvt_qp *pqp = iter->qp;
2738 struct rvt_qp *qp;
2744 * real hash table. Since the qp code sets
2745 * the qp->next hash link to NULL, this works just fine.
2749 * n = 0..iter->specials is the special qp indices
2757 qp = rcu_dereference(pqp->next);
2765 qp = rcu_dereference(rvp->qp[n & 1]);
2767 qp = rcu_dereference(
2772 pqp = qp;
2773 if (qp) {
2774 iter->qp = qp;
2801 void (*cb)(struct rvt_qp *qp, u64 v))
2815 rvt_get_qp(i.qp);
2817 i.cb(i.qp, i.v);
2819 rvt_put_qp(i.qp);
2829 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2835 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2837 rdi = ib_to_rvt(qp->ibqp.device);
2839 old_last = qp->s_last;
2840 trace_rvt_qp_send_completion(qp, wqe, old_last);
2841 last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2843 if (qp->s_acked == old_last)
2844 qp->s_acked = last;
2845 if (qp->s_cur == old_last)
2846 qp->s_cur = last;
2847 if (qp->s_tail == old_last)
2848 qp->s_tail = last;
2849 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2850 qp->s_draining = 0;
2856 * @qp: associated QP
2863 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2871 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2957 struct rvt_qp *qp;
2978 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
3015 if (!qp) {
3019 spin_lock_irqsave(&qp->r_lock, flags);
3020 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
3021 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
3050 ret = rvt_get_rwqe(qp, false);
3055 if (wqe->length > qp->r_len)
3059 if (!rvt_invalidate_rkey(qp,
3076 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3080 ret = rvt_get_rwqe(qp, true);
3088 copy_last = rvt_is_user_qp(qp);
3089 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3094 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3099 qp->r_sge.sg_list = NULL;
3100 qp->r_sge.num_sge = 1;
3101 qp->r_sge.total_len = wqe->length;
3105 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3107 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3115 qp->r_sge.sge = wqe->sg_list[0];
3116 qp->r_sge.sg_list = wqe->sg_list + 1;
3117 qp->r_sge.num_sge = wqe->wr.num_sge;
3118 qp->r_sge.total_len = wqe->length;
3123 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3127 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3133 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3138 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3140 rvt_put_mr(qp->r_sge.sge.mr);
3141 qp->r_sge.num_sge = 0;
3154 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3160 rvt_put_ss(&qp->r_sge);
3162 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3169 wc.wr_id = qp->r_wr_id;
3172 wc.qp = &qp->ibqp;
3173 wc.src_qp = qp->remote_qpn;
3174 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3175 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3178 rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3181 spin_unlock_irqrestore(&qp->r_lock, flags);
3197 if (qp->ibqp.qp_type == IB_QPT_UC)
3210 spin_unlock_irqrestore(&qp->r_lock, flags);
3214 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3236 rvt_rc_error(qp, wc.status);
3239 spin_unlock_irqrestore(&qp->r_lock, flags);
3258 ev.element.qp = &sqp->ibqp;