1/******************************************************************************* 2* 3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. 4* 5* This software is available to you under a choice of one of two 6* licenses. You may choose to be licensed under the terms of the GNU 7* General Public License (GPL) Version 2, available from the file 8* COPYING in the main directory of this source tree, or the 9* OpenFabrics.org BSD license below: 10* 11* Redistribution and use in source and binary forms, with or 12* without modification, are permitted provided that the following 13* conditions are met: 14* 15* - Redistributions of source code must retain the above 16* copyright notice, this list of conditions and the following 17* disclaimer. 18* 19* - Redistributions in binary form must reproduce the above 20* copyright notice, this list of conditions and the following 21* disclaimer in the documentation and/or other materials 22* provided with the distribution. 23* 24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31* SOFTWARE. 32* 33*******************************************************************************/ 34 35#include "i40iw_osdep.h" 36#include "i40iw_status.h" 37#include "i40iw_d.h" 38#include "i40iw_user.h" 39#include "i40iw_register.h" 40 41static u32 nop_signature = 0x55550000; 42 43/** 44 * i40iw_nop_1 - insert a nop wqe and move head. no post work 45 * @qp: hw qp ptr 46 */ 47static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp) 48{ 49 u64 header, *wqe; 50 u64 *wqe_0 = NULL; 51 u32 wqe_idx, peek_head; 52 bool signaled = false; 53 54 if (!qp->sq_ring.head) 55 return I40IW_ERR_PARAM; 56 57 wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); 58 wqe = qp->sq_base[wqe_idx].elem; 59 60 qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE; 61 62 peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; 63 wqe_0 = qp->sq_base[peek_head].elem; 64 if (peek_head) 65 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); 66 else 67 wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 68 69 set_64bit_val(wqe, 0, 0); 70 set_64bit_val(wqe, 8, 0); 71 set_64bit_val(wqe, 16, 0); 72 73 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | 74 LS_64(signaled, I40IWQPSQ_SIGCOMPL) | 75 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++; 76 77 wmb(); /* Memory barrier to ensure data is written before valid bit is set */ 78 79 set_64bit_val(wqe, 24, header); 80 return 0; 81} 82 83/** 84 * i40iw_qp_post_wr - post wr to hrdware 85 * @qp: hw qp ptr 86 */ 87void i40iw_qp_post_wr(struct i40iw_qp_uk *qp) 88{ 89 u64 temp; 90 u32 hw_sq_tail; 91 u32 sw_sq_head; 92 93 mb(); /* valid bit is written and loads completed before reading shadow */ 94 95 /* read the doorbell shadow area */ 96 get_64bit_val(qp->shadow_area, 0, &temp); 97 98 hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL); 99 sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); 100 if (sw_sq_head != hw_sq_tail) { 101 if (sw_sq_head > qp->initial_ring.head) { 102 if ((hw_sq_tail >= qp->initial_ring.head) && 103 (hw_sq_tail < sw_sq_head)) { 104 writel(qp->qp_id, qp->wqe_alloc_reg); 105 } 106 } else if (sw_sq_head != qp->initial_ring.head) { 107 if ((hw_sq_tail >= qp->initial_ring.head) || 108 (hw_sq_tail < sw_sq_head)) { 109 writel(qp->qp_id, qp->wqe_alloc_reg); 110 } 111 } 112 } 113 114 qp->initial_ring.head = qp->sq_ring.head; 115} 116 117/** 118 * i40iw_qp_ring_push_db - ring qp doorbell 119 * @qp: hw qp ptr 120 * @wqe_idx: wqe index 121 */ 122static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx) 123{ 124 set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id); 125 qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); 126} 127 128/** 129 * i40iw_qp_get_next_send_wqe - return next wqe ptr 130 * @qp: hw qp ptr 131 * @wqe_idx: return wqe index 132 * @wqe_size: size of sq wqe 133 */ 134u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, 135 u32 *wqe_idx, 136 u8 wqe_size, 137 u32 total_size, 138 u64 wr_id 139 ) 140{ 141 u64 *wqe = NULL; 142 u64 wqe_ptr; 143 u32 peek_head = 0; 144 u16 offset; 145 enum i40iw_status_code ret_code = 0; 146 u8 nop_wqe_cnt = 0, i; 147 u64 *wqe_0 = NULL; 148 149 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); 150 151 if (!*wqe_idx) 152 qp->swqe_polarity = !qp->swqe_polarity; 153 wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem; 154 offset = (u16)(wqe_ptr) & 0x7F; 155 if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) { 156 nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE; 157 for (i = 0; i < nop_wqe_cnt; i++) { 158 i40iw_nop_1(qp); 159 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); 160 if (ret_code) 161 return NULL; 162 } 163 164 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); 165 if (!*wqe_idx) 166 qp->swqe_polarity = !qp->swqe_polarity; 167 } 168 169 if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) { 170 i40iw_nop_1(qp); 171 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); 172 if (ret_code) 173 return NULL; 174 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); 175 if (!*wqe_idx) 176 qp->swqe_polarity = !qp->swqe_polarity; 177 } 178 I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring, 179 wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code); 180 if (ret_code) 181 return NULL; 182 183 wqe = qp->sq_base[*wqe_idx].elem; 184 185 peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); 186 wqe_0 = qp->sq_base[peek_head].elem; 187 188 if (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) { 189 if (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity) 190 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); 191 } 192 193 qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id; 194 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; 195 qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size; 196 return wqe; 197} 198 199/** 200 * i40iw_set_fragment - set fragment in wqe 201 * @wqe: wqe for setting fragment 202 * @offset: offset value 203 * @sge: sge length and stag 204 */ 205static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge) 206{ 207 if (sge) { 208 set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO)); 209 set_64bit_val(wqe, (offset + 8), 210 (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) | 211 LS_64(sge->stag, I40IWQPSQ_FRAG_STAG))); 212 } 213} 214 215/** 216 * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe 217 * @qp: hw qp ptr 218 * @wqe_idx: return wqe index 219 */ 220u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx) 221{ 222 u64 *wqe = NULL; 223 enum i40iw_status_code ret_code; 224 225 if (I40IW_RING_FULL_ERR(qp->rq_ring)) 226 return NULL; 227 228 I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); 229 if (ret_code) 230 return NULL; 231 if (!*wqe_idx) 232 qp->rwqe_polarity = !qp->rwqe_polarity; 233 /* rq_wqe_size_multiplier is no of qwords in one rq wqe */ 234 wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem; 235 236 return wqe; 237} 238 239/** 240 * i40iw_rdma_write - rdma write operation 241 * @qp: hw qp ptr 242 * @info: post sq information 243 * @post_sq: flag to post sq 244 */ 245static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp, 246 struct i40iw_post_sq_info *info, 247 bool post_sq) 248{ 249 u64 header; 250 u64 *wqe; 251 struct i40iw_rdma_write *op_info; 252 u32 i, wqe_idx; 253 u32 total_size = 0, byte_off; 254 enum i40iw_status_code ret_code; 255 bool read_fence = false; 256 u8 wqe_size; 257 258 op_info = &info->op.rdma_write; 259 if (op_info->num_lo_sges > qp->max_sq_frag_cnt) 260 return I40IW_ERR_INVALID_FRAG_COUNT; 261 262 for (i = 0; i < op_info->num_lo_sges; i++) 263 total_size += op_info->lo_sg_list[i].len; 264 265 if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE) 266 return I40IW_ERR_QP_INVALID_MSG_SIZE; 267 268 read_fence |= info->read_fence; 269 270 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size); 271 if (ret_code) 272 return ret_code; 273 274 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); 275 if (!wqe) 276 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 277 set_64bit_val(wqe, 16, 278 LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); 279 if (!op_info->rem_addr.stag) 280 return I40IW_ERR_BAD_STAG; 281 282 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | 283 LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | 284 LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) | 285 LS_64(read_fence, I40IWQPSQ_READFENCE) | 286 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | 287 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | 288 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 289 290 i40iw_set_fragment(wqe, 0, op_info->lo_sg_list); 291 292 for (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) { 293 i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]); 294 byte_off += 16; 295 } 296 297 wmb(); /* make sure WQE is populated before valid bit is set */ 298 299 set_64bit_val(wqe, 24, header); 300 301 if (post_sq) 302 i40iw_qp_post_wr(qp); 303 304 return 0; 305} 306 307/** 308 * i40iw_rdma_read - rdma read command 309 * @qp: hw qp ptr 310 * @info: post sq information 311 * @inv_stag: flag for inv_stag 312 * @post_sq: flag to post sq 313 */ 314static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp, 315 struct i40iw_post_sq_info *info, 316 bool inv_stag, 317 bool post_sq) 318{ 319 u64 *wqe; 320 struct i40iw_rdma_read *op_info; 321 u64 header; 322 u32 wqe_idx; 323 enum i40iw_status_code ret_code; 324 u8 wqe_size; 325 bool local_fence = false; 326 327 op_info = &info->op.rdma_read; 328 ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size); 329 if (ret_code) 330 return ret_code; 331 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id); 332 if (!wqe) 333 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 334 local_fence |= info->local_fence; 335 336 set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); 337 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | 338 LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) | 339 LS_64(info->read_fence, I40IWQPSQ_READFENCE) | 340 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | 341 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | 342 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 343 344 i40iw_set_fragment(wqe, 0, &op_info->lo_addr); 345 346 wmb(); /* make sure WQE is populated before valid bit is set */ 347 348 set_64bit_val(wqe, 24, header); 349 if (post_sq) 350 i40iw_qp_post_wr(qp); 351 352 return 0; 353} 354 355/** 356 * i40iw_send - rdma send command 357 * @qp: hw qp ptr 358 * @info: post sq information 359 * @stag_to_inv: stag_to_inv value 360 * @post_sq: flag to post sq 361 */ 362static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp, 363 struct i40iw_post_sq_info *info, 364 u32 stag_to_inv, 365 bool post_sq) 366{ 367 u64 *wqe; 368 struct i40iw_post_send *op_info; 369 u64 header; 370 u32 i, wqe_idx, total_size = 0, byte_off; 371 enum i40iw_status_code ret_code; 372 bool read_fence = false; 373 u8 wqe_size; 374 375 op_info = &info->op.send; 376 if (qp->max_sq_frag_cnt < op_info->num_sges) 377 return I40IW_ERR_INVALID_FRAG_COUNT; 378 379 for (i = 0; i < op_info->num_sges; i++) 380 total_size += op_info->sg_list[i].len; 381 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size); 382 if (ret_code) 383 return ret_code; 384 385 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); 386 if (!wqe) 387 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 388 389 read_fence |= info->read_fence; 390 set_64bit_val(wqe, 16, 0); 391 header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) | 392 LS_64(info->op_type, I40IWQPSQ_OPCODE) | 393 LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0), 394 I40IWQPSQ_ADDFRAGCNT) | 395 LS_64(read_fence, I40IWQPSQ_READFENCE) | 396 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | 397 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | 398 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 399 400 i40iw_set_fragment(wqe, 0, op_info->sg_list); 401 402 for (i = 1, byte_off = 32; i < op_info->num_sges; i++) { 403 i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]); 404 byte_off += 16; 405 } 406 407 wmb(); /* make sure WQE is populated before valid bit is set */ 408 409 set_64bit_val(wqe, 24, header); 410 if (post_sq) 411 i40iw_qp_post_wr(qp); 412 413 return 0; 414} 415 416/** 417 * i40iw_inline_rdma_write - inline rdma write operation 418 * @qp: hw qp ptr 419 * @info: post sq information 420 * @post_sq: flag to post sq 421 */ 422static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, 423 struct i40iw_post_sq_info *info, 424 bool post_sq) 425{ 426 u64 *wqe; 427 u8 *dest, *src; 428 struct i40iw_inline_rdma_write *op_info; 429 u64 *push; 430 u64 header = 0; 431 u32 wqe_idx; 432 enum i40iw_status_code ret_code; 433 bool read_fence = false; 434 u8 wqe_size; 435 436 op_info = &info->op.inline_rdma_write; 437 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) 438 return I40IW_ERR_INVALID_INLINE_DATA_SIZE; 439 440 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); 441 if (ret_code) 442 return ret_code; 443 444 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); 445 if (!wqe) 446 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 447 448 read_fence |= info->read_fence; 449 set_64bit_val(wqe, 16, 450 LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); 451 452 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | 453 LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | 454 LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) | 455 LS_64(1, I40IWQPSQ_INLINEDATAFLAG) | 456 LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) | 457 LS_64(read_fence, I40IWQPSQ_READFENCE) | 458 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | 459 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | 460 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 461 462 dest = (u8 *)wqe; 463 src = (u8 *)(op_info->data); 464 465 if (op_info->len <= 16) { 466 memcpy(dest, src, op_info->len); 467 } else { 468 memcpy(dest, src, 16); 469 src += 16; 470 dest = (u8 *)wqe + 32; 471 memcpy(dest, src, op_info->len - 16); 472 } 473 474 wmb(); /* make sure WQE is populated before valid bit is set */ 475 476 set_64bit_val(wqe, 24, header); 477 478 if (qp->push_db) { 479 push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20); 480 memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32); 481 i40iw_qp_ring_push_db(qp, wqe_idx); 482 } else { 483 if (post_sq) 484 i40iw_qp_post_wr(qp); 485 } 486 487 return 0; 488} 489 490/** 491 * i40iw_inline_send - inline send operation 492 * @qp: hw qp ptr 493 * @info: post sq information 494 * @stag_to_inv: remote stag 495 * @post_sq: flag to post sq 496 */ 497static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, 498 struct i40iw_post_sq_info *info, 499 u32 stag_to_inv, 500 bool post_sq) 501{ 502 u64 *wqe; 503 u8 *dest, *src; 504 struct i40iw_post_inline_send *op_info; 505 u64 header; 506 u32 wqe_idx; 507 enum i40iw_status_code ret_code; 508 bool read_fence = false; 509 u8 wqe_size; 510 u64 *push; 511 512 op_info = &info->op.inline_send; 513 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) 514 return I40IW_ERR_INVALID_INLINE_DATA_SIZE; 515 516 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); 517 if (ret_code) 518 return ret_code; 519 520 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); 521 if (!wqe) 522 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 523 524 read_fence |= info->read_fence; 525 header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) | 526 LS_64(info->op_type, I40IWQPSQ_OPCODE) | 527 LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) | 528 LS_64(1, I40IWQPSQ_INLINEDATAFLAG) | 529 LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) | 530 LS_64(read_fence, I40IWQPSQ_READFENCE) | 531 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | 532 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | 533 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 534 535 dest = (u8 *)wqe; 536 src = (u8 *)(op_info->data); 537 538 if (op_info->len <= 16) { 539 memcpy(dest, src, op_info->len); 540 } else { 541 memcpy(dest, src, 16); 542 src += 16; 543 dest = (u8 *)wqe + 32; 544 memcpy(dest, src, op_info->len - 16); 545 } 546 547 wmb(); /* make sure WQE is populated before valid bit is set */ 548 549 set_64bit_val(wqe, 24, header); 550 551 if (qp->push_db) { 552 push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20); 553 memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32); 554 i40iw_qp_ring_push_db(qp, wqe_idx); 555 } else { 556 if (post_sq) 557 i40iw_qp_post_wr(qp); 558 } 559 560 return 0; 561} 562 563/** 564 * i40iw_stag_local_invalidate - stag invalidate operation 565 * @qp: hw qp ptr 566 * @info: post sq information 567 * @post_sq: flag to post sq 568 */ 569static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp, 570 struct i40iw_post_sq_info *info, 571 bool post_sq) 572{ 573 u64 *wqe; 574 struct i40iw_inv_local_stag *op_info; 575 u64 header; 576 u32 wqe_idx; 577 bool local_fence = false; 578 579 op_info = &info->op.inv_local_stag; 580 local_fence = info->local_fence; 581 582 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); 583 if (!wqe) 584 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 585 set_64bit_val(wqe, 0, 0); 586 set_64bit_val(wqe, 8, 587 LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG)); 588 set_64bit_val(wqe, 16, 0); 589 header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) | 590 LS_64(info->read_fence, I40IWQPSQ_READFENCE) | 591 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | 592 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | 593 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 594 595 wmb(); /* make sure WQE is populated before valid bit is set */ 596 597 set_64bit_val(wqe, 24, header); 598 599 if (post_sq) 600 i40iw_qp_post_wr(qp); 601 602 return 0; 603} 604 605/** 606 * i40iw_mw_bind - Memory Window bind operation 607 * @qp: hw qp ptr 608 * @info: post sq information 609 * @post_sq: flag to post sq 610 */ 611static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp, 612 struct i40iw_post_sq_info *info, 613 bool post_sq) 614{ 615 u64 *wqe; 616 struct i40iw_bind_window *op_info; 617 u64 header; 618 u32 wqe_idx; 619 bool local_fence = false; 620 621 op_info = &info->op.bind_window; 622 623 local_fence |= info->local_fence; 624 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); 625 if (!wqe) 626 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 627 set_64bit_val(wqe, 0, (uintptr_t)op_info->va); 628 set_64bit_val(wqe, 8, 629 LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) | 630 LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG)); 631 set_64bit_val(wqe, 16, op_info->bind_length); 632 header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) | 633 LS_64(((op_info->enable_reads << 2) | 634 (op_info->enable_writes << 3)), 635 I40IWQPSQ_STAGRIGHTS) | 636 LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0), 637 I40IWQPSQ_VABASEDTO) | 638 LS_64(info->read_fence, I40IWQPSQ_READFENCE) | 639 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | 640 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | 641 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 642 643 wmb(); /* make sure WQE is populated before valid bit is set */ 644 645 set_64bit_val(wqe, 24, header); 646 647 if (post_sq) 648 i40iw_qp_post_wr(qp); 649 650 return 0; 651} 652 653/** 654 * i40iw_post_receive - post receive wqe 655 * @qp: hw qp ptr 656 * @info: post rq information 657 */ 658static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp, 659 struct i40iw_post_rq_info *info) 660{ 661 u64 *wqe; 662 u64 header; 663 u32 total_size = 0, wqe_idx, i, byte_off; 664 665 if (qp->max_rq_frag_cnt < info->num_sges) 666 return I40IW_ERR_INVALID_FRAG_COUNT; 667 for (i = 0; i < info->num_sges; i++) 668 total_size += info->sg_list[i].len; 669 wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx); 670 if (!wqe) 671 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 672 673 qp->rq_wrid_array[wqe_idx] = info->wr_id; 674 set_64bit_val(wqe, 16, 0); 675 676 header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0), 677 I40IWQPSQ_ADDFRAGCNT) | 678 LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID); 679 680 i40iw_set_fragment(wqe, 0, info->sg_list); 681 682 for (i = 1, byte_off = 32; i < info->num_sges; i++) { 683 i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]); 684 byte_off += 16; 685 } 686 687 wmb(); /* make sure WQE is populated before valid bit is set */ 688 689 set_64bit_val(wqe, 24, header); 690 691 return 0; 692} 693 694/** 695 * i40iw_cq_request_notification - cq notification request (door bell) 696 * @cq: hw cq 697 * @cq_notify: notification type 698 */ 699static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq, 700 enum i40iw_completion_notify cq_notify) 701{ 702 u64 temp_val; 703 u16 sw_cq_sel; 704 u8 arm_next_se = 0; 705 u8 arm_next = 0; 706 u8 arm_seq_num; 707 708 get_64bit_val(cq->shadow_area, 32, &temp_val); 709 arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM); 710 arm_seq_num++; 711 712 sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT); 713 arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE); 714 arm_next_se |= 1; 715 if (cq_notify == IW_CQ_COMPL_EVENT) 716 arm_next = 1; 717 temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) | 718 LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) | 719 LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) | 720 LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT); 721 722 set_64bit_val(cq->shadow_area, 32, temp_val); 723 724 wmb(); /* make sure WQE is populated before valid bit is set */ 725 726 writel(cq->cq_id, cq->cqe_alloc_reg); 727} 728 729/** 730 * i40iw_cq_post_entries - update tail in shadow memory 731 * @cq: hw cq 732 * @count: # of entries processed 733 */ 734static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq, 735 u8 count) 736{ 737 I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count); 738 set_64bit_val(cq->shadow_area, 0, 739 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); 740 return 0; 741} 742 743/** 744 * i40iw_cq_poll_completion - get cq completion info 745 * @cq: hw cq 746 * @info: cq poll information returned 747 * @post_cq: update cq tail 748 */ 749static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, 750 struct i40iw_cq_poll_info *info) 751{ 752 u64 comp_ctx, qword0, qword2, qword3, wqe_qword; 753 u64 *cqe, *sw_wqe; 754 struct i40iw_qp_uk *qp; 755 struct i40iw_ring *pring = NULL; 756 u32 wqe_idx, q_type, array_idx = 0; 757 enum i40iw_status_code ret_code = 0; 758 bool move_cq_head = true; 759 u8 polarity; 760 u8 addl_wqes = 0; 761 762 if (cq->avoid_mem_cflct) 763 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq); 764 else 765 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq); 766 767 get_64bit_val(cqe, 24, &qword3); 768 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID); 769 770 if (polarity != cq->polarity) 771 return I40IW_ERR_QUEUE_EMPTY; 772 773 q_type = (u8)RS_64(qword3, I40IW_CQ_SQ); 774 info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR); 775 info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP); 776 if (info->error) { 777 info->comp_status = I40IW_COMPL_STATUS_FLUSHED; 778 info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR); 779 info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR); 780 } else { 781 info->comp_status = I40IW_COMPL_STATUS_SUCCESS; 782 } 783 784 get_64bit_val(cqe, 0, &qword0); 785 get_64bit_val(cqe, 16, &qword2); 786 787 info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM); 788 789 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); 790 791 get_64bit_val(cqe, 8, &comp_ctx); 792 793 info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT); 794 info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ); 795 796 qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx; 797 if (!qp) { 798 ret_code = I40IW_ERR_QUEUE_DESTROYED; 799 goto exit; 800 } 801 wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX); 802 info->qp_handle = (i40iw_qp_handle)(unsigned long)qp; 803 804 if (q_type == I40IW_CQE_QTYPE_RQ) { 805 array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier; 806 if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) { 807 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail]; 808 array_idx = qp->rq_ring.tail; 809 } else { 810 info->wr_id = qp->rq_wrid_array[array_idx]; 811 } 812 813 info->op_type = I40IW_OP_TYPE_REC; 814 if (qword3 & I40IWCQ_STAG_MASK) { 815 info->stag_invalid_set = true; 816 info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG); 817 } else { 818 info->stag_invalid_set = false; 819 } 820 info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN); 821 I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1); 822 pring = &qp->rq_ring; 823 } else { 824 if (qp->first_sq_wq) { 825 qp->first_sq_wq = false; 826 if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) { 827 I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); 828 I40IW_RING_MOVE_TAIL(cq->cq_ring); 829 set_64bit_val(cq->shadow_area, 0, 830 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); 831 memset(info, 0, sizeof(struct i40iw_cq_poll_info)); 832 return i40iw_cq_poll_completion(cq, info); 833 } 834 } 835 836 if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) { 837 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; 838 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len; 839 840 info->op_type = (u8)RS_64(qword3, I40IWCQ_OP); 841 sw_wqe = qp->sq_base[wqe_idx].elem; 842 get_64bit_val(sw_wqe, 24, &wqe_qword); 843 844 addl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE; 845 I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes)); 846 } else { 847 do { 848 u8 op_type; 849 u32 tail; 850 851 tail = qp->sq_ring.tail; 852 sw_wqe = qp->sq_base[tail].elem; 853 get_64bit_val(sw_wqe, 24, &wqe_qword); 854 op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE); 855 info->op_type = op_type; 856 addl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE; 857 I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes)); 858 if (op_type != I40IWQP_OP_NOP) { 859 info->wr_id = qp->sq_wrtrk_array[tail].wrid; 860 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len; 861 break; 862 } 863 } while (1); 864 } 865 pring = &qp->sq_ring; 866 } 867 868 ret_code = 0; 869 870exit: 871 if (!ret_code && 872 (info->comp_status == I40IW_COMPL_STATUS_FLUSHED)) 873 if (pring && (I40IW_RING_MORE_WORK(*pring))) 874 move_cq_head = false; 875 876 if (move_cq_head) { 877 I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); 878 879 if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0) 880 cq->polarity ^= 1; 881 882 I40IW_RING_MOVE_TAIL(cq->cq_ring); 883 set_64bit_val(cq->shadow_area, 0, 884 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); 885 } else { 886 if (info->is_srq) 887 return ret_code; 888 qword3 &= ~I40IW_CQ_WQEIDX_MASK; 889 qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX); 890 set_64bit_val(cqe, 24, qword3); 891 } 892 893 return ret_code; 894} 895 896/** 897 * i40iw_get_wqe_shift - get shift count for maximum wqe size 898 * @sge: Maximum Scatter Gather Elements wqe 899 * @inline_data: Maximum inline data size 900 * @shift: Returns the shift needed based on sge 901 * 902 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size. 903 * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes). 904 * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes). 905 * Shift of 2 otherwise (wqe size of 128 bytes). 906 */ 907void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift) 908{ 909 *shift = 0; 910 if (sge > 1 || inline_data > 16) 911 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2; 912} 913 914/* 915 * i40iw_get_sqdepth - get SQ depth (quantas) 916 * @sq_size: SQ size 917 * @shift: shift which determines size of WQE 918 * @sqdepth: depth of SQ 919 * 920 */ 921enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth) 922{ 923 *sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD); 924 925 if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift)) 926 *sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift; 927 else if (*sqdepth > I40IW_QP_SW_MAX_SQ_QUANTAS) 928 return I40IW_ERR_INVALID_SIZE; 929 930 return 0; 931} 932 933/* 934 * i40iw_get_rq_depth - get RQ depth (quantas) 935 * @rq_size: RQ size 936 * @shift: shift which determines size of WQE 937 * @rqdepth: depth of RQ 938 * 939 */ 940enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth) 941{ 942 *rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD); 943 944 if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift)) 945 *rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift; 946 else if (*rqdepth > I40IW_QP_SW_MAX_RQ_QUANTAS) 947 return I40IW_ERR_INVALID_SIZE; 948 949 return 0; 950} 951 952static const struct i40iw_qp_uk_ops iw_qp_uk_ops = { 953 .iw_qp_post_wr = i40iw_qp_post_wr, 954 .iw_qp_ring_push_db = i40iw_qp_ring_push_db, 955 .iw_rdma_write = i40iw_rdma_write, 956 .iw_rdma_read = i40iw_rdma_read, 957 .iw_send = i40iw_send, 958 .iw_inline_rdma_write = i40iw_inline_rdma_write, 959 .iw_inline_send = i40iw_inline_send, 960 .iw_stag_local_invalidate = i40iw_stag_local_invalidate, 961 .iw_mw_bind = i40iw_mw_bind, 962 .iw_post_receive = i40iw_post_receive, 963 .iw_post_nop = i40iw_nop 964}; 965 966static const struct i40iw_cq_ops iw_cq_ops = { 967 .iw_cq_request_notification = i40iw_cq_request_notification, 968 .iw_cq_poll_completion = i40iw_cq_poll_completion, 969 .iw_cq_post_entries = i40iw_cq_post_entries, 970 .iw_cq_clean = i40iw_clean_cq 971}; 972 973static const struct i40iw_device_uk_ops iw_device_uk_ops = { 974 .iwarp_cq_uk_init = i40iw_cq_uk_init, 975 .iwarp_qp_uk_init = i40iw_qp_uk_init, 976}; 977 978/** 979 * i40iw_qp_uk_init - initialize shared qp 980 * @qp: hw qp (user and kernel) 981 * @info: qp initialization info 982 * 983 * initializes the vars used in both user and kernel mode. 984 * size of the wqe depends on numbers of max. fragements 985 * allowed. Then size of wqe * the number of wqes should be the 986 * amount of memory allocated for sq and rq. If srq is used, 987 * then rq_base will point to one rq wqe only (not the whole 988 * array of wqes) 989 */ 990enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp, 991 struct i40iw_qp_uk_init_info *info) 992{ 993 enum i40iw_status_code ret_code = 0; 994 u32 sq_ring_size; 995 u8 sqshift, rqshift; 996 997 if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT) 998 return I40IW_ERR_INVALID_FRAG_COUNT; 999 1000 if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT) 1001 return I40IW_ERR_INVALID_FRAG_COUNT; 1002 i40iw_get_wqe_shift(info->max_sq_frag_cnt, info->max_inline_data, &sqshift); 1003 1004 qp->sq_base = info->sq; 1005 qp->rq_base = info->rq; 1006 qp->shadow_area = info->shadow_area; 1007 qp->sq_wrtrk_array = info->sq_wrtrk_array; 1008 qp->rq_wrid_array = info->rq_wrid_array; 1009 1010 qp->wqe_alloc_reg = info->wqe_alloc_reg; 1011 qp->qp_id = info->qp_id; 1012 1013 qp->sq_size = info->sq_size; 1014 qp->push_db = info->push_db; 1015 qp->push_wqe = info->push_wqe; 1016 1017 qp->max_sq_frag_cnt = info->max_sq_frag_cnt; 1018 sq_ring_size = qp->sq_size << sqshift; 1019 1020 I40IW_RING_INIT(qp->sq_ring, sq_ring_size); 1021 I40IW_RING_INIT(qp->initial_ring, sq_ring_size); 1022 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); 1023 I40IW_RING_MOVE_TAIL(qp->sq_ring); 1024 I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code); 1025 qp->swqe_polarity = 1; 1026 qp->first_sq_wq = true; 1027 qp->swqe_polarity_deferred = 1; 1028 qp->rwqe_polarity = 0; 1029 1030 if (!qp->use_srq) { 1031 qp->rq_size = info->rq_size; 1032 qp->max_rq_frag_cnt = info->max_rq_frag_cnt; 1033 I40IW_RING_INIT(qp->rq_ring, qp->rq_size); 1034 switch (info->abi_ver) { 1035 case 4: 1036 i40iw_get_wqe_shift(info->max_rq_frag_cnt, 0, &rqshift); 1037 break; 1038 case 5: /* fallthrough until next ABI version */ 1039 default: 1040 rqshift = I40IW_MAX_RQ_WQE_SHIFT; 1041 break; 1042 } 1043 qp->rq_wqe_size = rqshift; 1044 qp->rq_wqe_size_multiplier = 4 << rqshift; 1045 } 1046 qp->ops = iw_qp_uk_ops; 1047 1048 return ret_code; 1049} 1050 1051/** 1052 * i40iw_cq_uk_init - initialize shared cq (user and kernel) 1053 * @cq: hw cq 1054 * @info: hw cq initialization info 1055 */ 1056enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq, 1057 struct i40iw_cq_uk_init_info *info) 1058{ 1059 if ((info->cq_size < I40IW_MIN_CQ_SIZE) || 1060 (info->cq_size > I40IW_MAX_CQ_SIZE)) 1061 return I40IW_ERR_INVALID_SIZE; 1062 cq->cq_base = (struct i40iw_cqe *)info->cq_base; 1063 cq->cq_id = info->cq_id; 1064 cq->cq_size = info->cq_size; 1065 cq->cqe_alloc_reg = info->cqe_alloc_reg; 1066 cq->shadow_area = info->shadow_area; 1067 cq->avoid_mem_cflct = info->avoid_mem_cflct; 1068 1069 I40IW_RING_INIT(cq->cq_ring, cq->cq_size); 1070 cq->polarity = 1; 1071 cq->ops = iw_cq_ops; 1072 1073 return 0; 1074} 1075 1076/** 1077 * i40iw_device_init_uk - setup routines for iwarp shared device 1078 * @dev: iwarp shared (user and kernel) 1079 */ 1080void i40iw_device_init_uk(struct i40iw_dev_uk *dev) 1081{ 1082 dev->ops_uk = iw_device_uk_ops; 1083} 1084 1085/** 1086 * i40iw_clean_cq - clean cq entries 1087 * @ queue completion context 1088 * @cq: cq to clean 1089 */ 1090void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq) 1091{ 1092 u64 *cqe; 1093 u64 qword3, comp_ctx; 1094 u32 cq_head; 1095 u8 polarity, temp; 1096 1097 cq_head = cq->cq_ring.head; 1098 temp = cq->polarity; 1099 do { 1100 if (cq->avoid_mem_cflct) 1101 cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]); 1102 else 1103 cqe = (u64 *)&cq->cq_base[cq_head]; 1104 get_64bit_val(cqe, 24, &qword3); 1105 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID); 1106 1107 if (polarity != temp) 1108 break; 1109 1110 get_64bit_val(cqe, 8, &comp_ctx); 1111 if ((void *)(unsigned long)comp_ctx == queue) 1112 set_64bit_val(cqe, 8, 0); 1113 1114 cq_head = (cq_head + 1) % cq->cq_ring.size; 1115 if (!cq_head) 1116 temp ^= 1; 1117 } while (true); 1118} 1119 1120/** 1121 * i40iw_nop - send a nop 1122 * @qp: hw qp ptr 1123 * @wr_id: work request id 1124 * @signaled: flag if signaled for completion 1125 * @post_sq: flag to post sq 1126 */ 1127enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, 1128 u64 wr_id, 1129 bool signaled, 1130 bool post_sq) 1131{ 1132 u64 header, *wqe; 1133 u32 wqe_idx; 1134 1135 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id); 1136 if (!wqe) 1137 return I40IW_ERR_QP_TOOMANY_WRS_POSTED; 1138 set_64bit_val(wqe, 0, 0); 1139 set_64bit_val(wqe, 8, 0); 1140 set_64bit_val(wqe, 16, 0); 1141 1142 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | 1143 LS_64(signaled, I40IWQPSQ_SIGCOMPL) | 1144 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); 1145 1146 wmb(); /* make sure WQE is populated before valid bit is set */ 1147 1148 set_64bit_val(wqe, 24, header); 1149 if (post_sq) 1150 i40iw_qp_post_wr(qp); 1151 1152 return 0; 1153} 1154 1155/** 1156 * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ 1157 * @frag_cnt: number of fragments 1158 * @wqe_size: size of sq wqe returned 1159 */ 1160enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size) 1161{ 1162 switch (frag_cnt) { 1163 case 0: 1164 case 1: 1165 *wqe_size = I40IW_QP_WQE_MIN_SIZE; 1166 break; 1167 case 2: 1168 case 3: 1169 *wqe_size = 64; 1170 break; 1171 case 4: 1172 case 5: 1173 *wqe_size = 96; 1174 break; 1175 case 6: 1176 case 7: 1177 *wqe_size = 128; 1178 break; 1179 default: 1180 return I40IW_ERR_INVALID_FRAG_COUNT; 1181 } 1182 1183 return 0; 1184} 1185 1186/** 1187 * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ 1188 * @frag_cnt: number of fragments 1189 * @wqe_size: size of rq wqe returned 1190 */ 1191enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size) 1192{ 1193 switch (frag_cnt) { 1194 case 0: 1195 case 1: 1196 *wqe_size = 32; 1197 break; 1198 case 2: 1199 case 3: 1200 *wqe_size = 64; 1201 break; 1202 case 4: 1203 case 5: 1204 case 6: 1205 case 7: 1206 *wqe_size = 128; 1207 break; 1208 default: 1209 return I40IW_ERR_INVALID_FRAG_COUNT; 1210 } 1211 1212 return 0; 1213} 1214 1215/** 1216 * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size 1217 * @data_size: data size for inline 1218 * @wqe_size: size of sq wqe returned 1219 */ 1220enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, 1221 u8 *wqe_size) 1222{ 1223 if (data_size > I40IW_MAX_INLINE_DATA_SIZE) 1224 return I40IW_ERR_INVALID_INLINE_DATA_SIZE; 1225 1226 if (data_size <= 16) 1227 *wqe_size = I40IW_QP_WQE_MIN_SIZE; 1228 else 1229 *wqe_size = 64; 1230 1231 return 0; 1232} 1233