Lines Matching refs:rdi
22 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
103 void rvt_wss_exit(struct rvt_dev_info *rdi)
105 struct rvt_wss *wss = rdi->wss;
113 kfree(rdi->wss);
114 rdi->wss = NULL;
122 int rvt_wss_init(struct rvt_dev_info *rdi)
124 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
125 unsigned int wss_threshold = rdi->dparms.wss_threshold;
126 unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
132 int node = rdi->dparms.node;
135 rdi->wss = NULL;
139 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
140 if (!rdi->wss)
142 wss = rdi->wss;
179 rvt_wss_exit(rdi);
284 * @rdi: rvt dev struct
287 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
293 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
298 qpt->last = rdi->dparms.qpn_start;
299 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
309 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
312 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
317 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
318 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
319 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
353 * @rdi: rvt dev strucutre
357 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
362 if (!rdi->dparms.qp_table_size)
369 if (!rdi->driver_f.free_all_qps ||
370 !rdi->driver_f.qp_priv_alloc ||
371 !rdi->driver_f.qp_priv_free ||
372 !rdi->driver_f.notify_qp_reset ||
373 !rdi->driver_f.notify_restart_rc)
377 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
378 rdi->dparms.node);
379 if (!rdi->qp_dev)
383 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
384 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
385 rdi->qp_dev->qp_table =
386 kmalloc_array_node(rdi->qp_dev->qp_table_size,
387 sizeof(*rdi->qp_dev->qp_table),
388 GFP_KERNEL, rdi->dparms.node);
389 if (!rdi->qp_dev->qp_table)
392 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
393 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
395 spin_lock_init(&rdi->qp_dev->qpt_lock);
398 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
401 spin_lock_init(&rdi->n_qps_lock);
406 kfree(rdi->qp_dev->qp_table);
407 free_qpn_table(&rdi->qp_dev->qpn_table);
410 kfree(rdi->qp_dev);
426 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
429 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
437 * @rdi: rvt device info structure
443 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
447 qp_inuse += rvt_mcast_tree_empty(rdi);
449 rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
456 * @rdi: rvt dev structure
460 void rvt_qp_exit(struct rvt_dev_info *rdi)
462 u32 qps_inuse = rvt_free_all_qps(rdi);
465 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
468 kfree(rdi->qp_dev->qp_table);
469 free_qpn_table(&rdi->qp_dev->qpn_table);
470 kfree(rdi->qp_dev);
482 * @rdi: rvt device info structure
490 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
499 if (rdi->driver_f.alloc_qpn)
500 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
563 WARN_ON(rdi->dparms.qos_shift > 1 &&
564 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
582 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
604 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
666 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
668 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
723 * @rdi: rvt dev struct
729 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
731 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
732 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
736 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
739 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
742 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
749 qpp = &rdi->qp_dev->qp_table[n];
751 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
756 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
764 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
814 * @rdi: rvt dev struct
823 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
869 * @rdi: rvt dev struct
875 static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
888 rdi->driver_f.flush_qp_waiters(qp);
896 rdi->driver_f.stop_send_queue(qp);
899 rdi->driver_f.quiesce_qp(qp);
902 rvt_remove_qp(rdi, qp);
914 rdi->driver_f.notify_qp_reset(qp);
916 rvt_init_qp(rdi, qp, type);
924 * @rdi: the device info
931 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
937 _rvt_reset_qp(rdi, qp, type);
1037 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1042 if (!rdi)
1048 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1049 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
1055 rdi->dparms.props.max_recv_sge ||
1056 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1067 rdi->dparms.reserved_operations;
1079 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1093 kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
1101 kcalloc_node(rvt_max_atomic(rdi),
1104 rdi->dparms.node);
1118 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1135 rdi->dparms.node, udata);
1158 ret = alloc_ud_wq_attr(qp, rdi->dparms.node);
1165 ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1176 rvt_init_qp(rdi, qp, init_attr->qp_type);
1177 if (rdi->driver_f.qp_priv_init) {
1178 ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1206 qp->ip = rvt_create_mmap_info(rdi, s, udata,
1221 spin_lock(&rdi->n_qps_lock);
1222 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1223 spin_unlock(&rdi->n_qps_lock);
1228 rdi->n_qps_allocated++;
1239 rdi->n_rc_qps++;
1240 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1242 spin_unlock(&rdi->n_qps_lock);
1245 spin_lock_irq(&rdi->pending_lock);
1246 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1247 spin_unlock_irq(&rdi->pending_lock);
1257 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1266 rdi->driver_f.qp_priv_free(rdi, qp);
1290 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1307 rdi->driver_f.notify_error_qp(qp);
1311 rdi->driver_f.schedule_send(qp);
1372 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1374 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1378 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1383 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1385 qp->next = rdi->qp_dev->qp_table[n];
1386 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1390 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1405 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1430 if (rdi->driver_f.check_modify_qp &&
1431 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1462 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1467 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1502 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1526 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1532 _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1571 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1579 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1605 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1636 if (rdi->driver_f.modify_qp)
1637 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1644 rvt_insert_qp(rdi, qp);
1680 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1682 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1686 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1688 spin_lock(&rdi->n_qps_lock);
1689 rdi->n_qps_allocated--;
1691 rdi->n_rc_qps--;
1692 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1694 spin_unlock(&rdi->n_qps_lock);
1699 rdi->driver_f.qp_priv_free(rdi, qp);
1722 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1726 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1729 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1730 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1734 rdi->dparms.reserved_operations;
1898 * @rdi: the rdmavt device
1911 struct rvt_dev_info *rdi,
1923 if (reserved_used >= rdi->dparms.reserved_operations)
1939 (rdi->dparms.reserved_operations - reserved_used);
1945 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1946 rvt_pr_err(rdi,
1971 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1984 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2000 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2026 reserved_op = rdi->post_parms[wr->opcode].flags &
2029 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2036 rkt = &rdi->lkey_table;
2080 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2098 if (rdi->driver_f.setup_wqe) {
2099 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2104 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2147 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2187 rdi->driver_f.do_send(qp);
2189 rdi->driver_f.schedule_send_no_lock(qp);
2274 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2276 rkt = &rdi->lkey_table;
2510 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2515 qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2590 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2596 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2602 if (rdi->driver_f.notify_restart_rc)
2603 rdi->driver_f.notify_restart_rc(qp,
2606 rdi->driver_f.schedule_send(qp);
2618 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2624 rdi->driver_f.schedule_send(qp);
2632 * @rdi: rvt devinfo
2649 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2659 i->rdi = rdi;
2661 i->specials = rdi->ibdev.phys_port_cnt * 2;
2688 struct rvt_dev_info *rdi = iter->rdi;
2700 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2704 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2712 pidx = n % rdi->ibdev.phys_port_cnt;
2713 rvp = rdi->ports[pidx];
2717 rdi->qp_dev->qp_table[
2734 * @rdi: rvt devinfo
2748 void rvt_qp_iter(struct rvt_dev_info *rdi,
2754 .rdi = rdi,
2755 .specials = rdi->ibdev.phys_port_cnt * 2,
2782 struct rvt_dev_info *rdi;
2786 rdi = ib_to_rvt(qp->ibqp.device);
2790 last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2820 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2821 struct rvt_wss *wss = rdi->wss;
2822 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2905 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2920 rvp = rdi->ports[sqp->port_num - 1];