Lines Matching defs:rxe
19 * mcast packets in the rxe receive path.
22 #include "rxe.h"
25 * rxe_mcast_add - add multicast address to rxe device
26 * @rxe: rxe device object
31 static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
37 return dev_mc_add(rxe->ndev, ll_addr);
41 * rxe_mcast_del - delete multicast address from rxe device
42 * @rxe: rxe device object
47 static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
53 return dev_mc_del(rxe->ndev, ll_addr);
57 * __rxe_insert_mcg - insert an mcg into red-black tree (rxe->mcg_tree)
60 * Context: caller must hold a reference to mcg and rxe->mcg_lock and
65 struct rb_root *tree = &mcg->rxe->mcg_tree;
90 * Context: caller must hold a reference to mcg and rxe->mcg_lock
94 rb_erase(&mcg->node, &mcg->rxe->mcg_tree);
98 * __rxe_lookup_mcg - lookup mcg in rxe->mcg_tree while holding lock
99 * @rxe: rxe device object
102 * Context: caller must hold rxe->mcg_lock
105 static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
108 struct rb_root *tree = &rxe->mcg_tree;
138 * @rxe: rxe device object
143 struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
147 spin_lock_bh(&rxe->mcg_lock);
148 mcg = __rxe_lookup_mcg(rxe, mgid);
149 spin_unlock_bh(&rxe->mcg_lock);
156 * @rxe: rxe device
160 * Context: caller should hold rxe->mcg lock
162 static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
168 mcg->rxe = rxe;
183 * @rxe: rxe device object
188 static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
193 if (rxe->attr.max_mcast_grp == 0)
197 mcg = rxe_lookup_mcg(rxe, mgid);
202 if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
214 spin_lock_bh(&rxe->mcg_lock);
216 tmp = __rxe_lookup_mcg(rxe, mgid);
218 spin_unlock_bh(&rxe->mcg_lock);
219 atomic_dec(&rxe->mcg_num);
224 __rxe_init_mcg(rxe, mgid, mcg);
225 spin_unlock_bh(&rxe->mcg_lock);
228 err = rxe_mcast_add(rxe, mgid);
234 atomic_dec(&rxe->mcg_num);
250 * __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock
253 * Context: caller is holding rxe->mcg_lock
258 struct rxe_dev *rxe = mcg->rxe;
264 atomic_dec(&rxe->mcg_num);
276 rxe_mcast_del(mcg->rxe, &mcg->mgid);
278 spin_lock_bh(&mcg->rxe->mcg_lock);
280 spin_unlock_bh(&mcg->rxe->mcg_lock);
289 * Context: caller must hold references on qp and mcg, rxe->mcg_lock
297 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
300 n = atomic_inc_return(&rxe->mcg_attach);
301 if (n > rxe->attr.max_total_mcast_qp_attach) {
302 atomic_dec(&rxe->mcg_attach);
307 if (n > rxe->attr.max_mcast_qp_attach) {
309 atomic_dec(&rxe->mcg_attach);
333 struct rxe_dev *rxe = mcg->rxe;
338 spin_lock_bh(&rxe->mcg_lock);
341 spin_unlock_bh(&rxe->mcg_lock);
345 spin_unlock_bh(&rxe->mcg_lock);
352 spin_lock_bh(&rxe->mcg_lock);
366 spin_unlock_bh(&rxe->mcg_lock);
375 * Context: caller must hold a reference to mcg and rxe->mcg_lock
382 atomic_dec(&mcg->rxe->mcg_attach);
398 struct rxe_dev *rxe = mcg->rxe;
401 spin_lock_bh(&rxe->mcg_lock);
415 spin_unlock_bh(&rxe->mcg_lock);
421 spin_unlock_bh(&rxe->mcg_lock);
436 struct rxe_dev *rxe = to_rdev(ibqp->device);
441 mcg = rxe_get_mcg(rxe, mgid);
466 struct rxe_dev *rxe = to_rdev(ibqp->device);
471 mcg = rxe_lookup_mcg(rxe, mgid);