Lines Matching refs:mw
18 struct rxe_mw *mw = to_rmw(ibmw);
25 ret = rxe_add_to_pool(&rxe->mw_pool, mw);
31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
32 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
34 spin_lock_init(&mw->lock);
36 rxe_finalize(mw);
43 struct rxe_mw *mw = to_rmw(ibmw);
45 rxe_cleanup(mw);
51 struct rxe_mw *mw, struct rxe_mr *mr, int access)
53 if (mw->ibmw.type == IB_MW_TYPE_1) {
54 if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
55 rxe_dbg_mw(mw,
62 rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n");
67 if (mw->ibmw.type == IB_MW_TYPE_2) {
69 if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
70 rxe_dbg_mw(mw,
76 if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
77 rxe_dbg_mw(mw,
83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
84 rxe_dbg_mw(mw,
95 rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n");
101 rxe_dbg_mw(mw,
110 rxe_dbg_mw(mw,
117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
118 rxe_dbg_mw(mw,
123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
126 rxe_dbg_mw(mw,
136 struct rxe_mw *mw, struct rxe_mr *mr, int access)
138 u32 key = wqe->wr.wr.mw.rkey & 0xff;
140 mw->rkey = (mw->rkey & ~0xff) | key;
141 mw->access = access;
142 mw->state = RXE_MW_STATE_VALID;
143 mw->addr = wqe->wr.wr.mw.addr;
144 mw->length = wqe->wr.wr.mw.length;
146 if (mw->mr) {
147 rxe_put(mw->mr);
148 atomic_dec(&mw->mr->num_mw);
149 mw->mr = NULL;
152 if (mw->length) {
153 mw->mr = mr;
158 if (mw->ibmw.type == IB_MW_TYPE_2) {
160 mw->qp = qp;
167 struct rxe_mw *mw;
170 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
171 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
172 int access = wqe->wr.wr.mw.access;
174 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
175 if (unlikely(!mw)) {
180 if (unlikely(mw->rkey != mw_rkey)) {
185 if (likely(wqe->wr.wr.mw.length)) {
201 rxe_err_mw(mw, "access %#x not supported", access);
206 spin_lock_bh(&mw->lock);
208 ret = rxe_check_bind_mw(qp, wqe, mw, mr, access);
212 rxe_do_bind_mw(qp, wqe, mw, mr, access);
214 spin_unlock_bh(&mw->lock);
219 rxe_put(mw);
224 static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw)
226 if (unlikely(mw->state == RXE_MW_STATE_INVALID))
230 if (unlikely(mw->ibmw.type == IB_MW_TYPE_1))
236 static void rxe_do_invalidate_mw(struct rxe_mw *mw)
242 qp = mw->qp;
243 mw->qp = NULL;
247 mr = mw->mr;
248 mw->mr = NULL;
252 mw->access = 0;
253 mw->addr = 0;
254 mw->length = 0;
255 mw->state = RXE_MW_STATE_FREE;
261 struct rxe_mw *mw;
264 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
265 if (!mw) {
270 if (rkey != mw->rkey) {
275 spin_lock_bh(&mw->lock);
277 ret = rxe_check_invalidate_mw(qp, mw);
281 rxe_do_invalidate_mw(mw);
283 spin_unlock_bh(&mw->lock);
285 rxe_put(mw);
294 struct rxe_mw *mw;
297 mw = rxe_pool_get_index(&rxe->mw_pool, index);
298 if (!mw)
301 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd ||
302 (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
303 (mw->length == 0) || ((access & mw->access) != access) ||
304 mw->state != RXE_MW_STATE_VALID)) {
305 rxe_put(mw);
309 return mw;
314 struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
315 struct rxe_pd *pd = to_rpd(mw->ibmw.pd);
319 if (mw->mr) {
320 struct rxe_mr *mr = mw->mr;
322 mw->mr = NULL;
327 if (mw->qp) {
328 struct rxe_qp *qp = mw->qp;
330 mw->qp = NULL;
334 mw->access = 0;
335 mw->addr = 0;
336 mw->length = 0;
337 mw->state = RXE_MW_STATE_INVALID;