Lines Matching defs:cqp
48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe
69 * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
70 * @cqp: struct for cqp hw
71 * @val: cqp tail register value
73 * @error: cqp processing err
75 static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
80 if (cqp->dev->is_pf) {
81 *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
85 *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
92 * i40iw_cqp_poll_registers - poll cqp registers
93 * @cqp: struct for cqp hw
98 struct i40iw_sc_cqp *cqp,
107 i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
109 error = (cqp->dev->is_pf) ?
110 i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
111 i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
116 I40IW_RING_MOVE_TAIL(cqp->sq_ring);
117 cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
366 /* issue cqp suspend command */
431 * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
437 /* cqp sq's hw coded value starts from 1 for size of 4
450 * @cqp: IWARP control queue pair pointer
455 static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
466 cqp->size = sizeof(*cqp);
467 cqp->sq_size = info->sq_size;
468 cqp->hw_sq_size = hw_sq_size;
469 cqp->sq_base = info->sq;
470 cqp->host_ctx = info->host_ctx;
471 cqp->sq_pa = info->sq_pa;
472 cqp->host_ctx_pa = info->host_ctx_pa;
473 cqp->dev = info->dev;
474 cqp->struct_ver = info->struct_ver;
475 cqp->scratch_array = info->scratch_array;
476 cqp->polarity = 0;
477 cqp->en_datacenter_tcp = info->en_datacenter_tcp;
478 cqp->enabled_vf_count = info->enabled_vf_count;
479 cqp->hmc_profile = info->hmc_profile;
480 info->dev->cqp = cqp;
482 I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
483 cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
484 cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
485 INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */
487 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
488 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
490 i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
491 "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
492 __func__, cqp->sq_size, cqp->hw_sq_size,
493 cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
498 * i40iw_sc_cqp_create - create cqp during bringup
499 * @cqp: struct for cqp hw
503 static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
514 ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
515 &cqp->sdbuf,
516 I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
522 temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
523 LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
525 set_64bit_val(cqp->host_ctx, 0, temp);
526 set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
527 temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
528 LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
529 set_64bit_val(cqp->host_ctx, 16, temp);
530 set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
531 set_64bit_val(cqp->host_ctx, 32, 0);
532 set_64bit_val(cqp->host_ctx, 40, 0);
533 set_64bit_val(cqp->host_ctx, 48, 0);
534 set_64bit_val(cqp->host_ctx, 56, 0);
536 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
537 cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
539 p1 = RS_32_1(cqp->host_ctx_pa, 32);
540 p2 = (u32)cqp->host_ctx_pa;
542 if (cqp->dev->is_pf) {
543 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
544 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
546 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
547 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
551 i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
557 if (cqp->dev->is_pf)
558 err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
560 err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
566 if (cqp->dev->is_pf)
567 val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
569 val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
574 cqp->process_cqp_sds = i40iw_update_sds_noccq;
579 * i40iw_sc_cqp_post_sq - post of cqp's sq
580 * @cqp: struct for cqp hw
582 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
584 if (cqp->dev->is_pf)
585 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
587 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
589 i40iw_debug(cqp->dev,
593 cqp->sq_ring.head,
594 cqp->sq_ring.tail,
595 cqp->sq_ring.size);
600 * @cqp: pointer to CQP structure
604 static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
610 if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
611 i40iw_debug(cqp->dev,
615 cqp->sq_ring.head,
616 cqp->sq_ring.tail,
617 cqp->sq_ring.size);
620 I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
621 cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
625 cqp->polarity = !cqp->polarity;
627 wqe = cqp->sq_base[*wqe_idx].elem;
628 cqp->scratch_array[*wqe_idx] = scratch;
635 * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
636 * @cqp: struct for cqp hw
639 u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
643 return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
647 * i40iw_sc_cqp_destroy - destroy cqp during close
648 * @cqp: struct for cqp hw
650 static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
656 if (cqp->dev->is_pf) {
657 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
658 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
661 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
662 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
671 val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
674 i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
725 struct i40iw_sc_cqp *cqp;
741 cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
749 info->scratch = cqp->scratch_array[wqe_idx];
753 get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
755 info->cqp = cqp;
768 I40IW_RING_MOVE_TAIL(cqp->sq_ring);
776 * @cqp: struct for cqp hw
777 * @op_code: cqp opcode for completion
781 struct i40iw_sc_cqp *cqp,
791 ccq = cqp->dev->ccq;
807 i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
824 * @cqp: struct for cqp hw
826 * @scratch: u64 saved to be used during cqp completion
827 * @post_sq: flag for cqp db to ring
830 struct i40iw_sc_cqp *cqp,
841 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
849 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
854 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
858 i40iw_sc_cqp_post_sq(cqp);
864 * @cqp: struct for cqp hw
865 * @scratch: u64 saved to be used during cqp completion
866 * @vf_index: vf index for cqp
868 * @post_sq: flag for cqp db to ring
871 struct i40iw_sc_cqp *cqp,
882 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
889 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
892 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
895 i40iw_sc_cqp_post_sq(cqp);
900 * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
901 * @cqp: struct for cqp hw
902 * @scratch: u64 saved to be used during cqp completion
905 * @post_sq: flag for cqp db to ring
906 * @poll_registers: flag to poll register for cqp completion
909 struct i40iw_sc_cqp *cqp,
920 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
929 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
933 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
936 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
941 i40iw_sc_cqp_post_sq(cqp);
943 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
945 ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
954 * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
955 * @cqp: struct for cqp hw
957 static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
959 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
963 * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
964 * @cqp: struct for cqp hw
966 static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
968 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
972 * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
973 * @cqp: struct for cqp hw
974 * @scratch: u64 saved to be used during cqp completion
977 * @post_sq: flag for cqp db to ring
978 * @wait_type: poll ccq or cqp registers for cqp completion
981 struct i40iw_sc_cqp *cqp,
993 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1001 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1005 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
1008 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1013 i40iw_sc_cqp_post_sq(cqp);
1016 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1018 ret_code = i40iw_sc_commit_fpm_values_done(cqp);
1025 * i40iw_sc_query_rdma_features_done - poll cqp for query features done
1026 * @cqp: struct for cqp hw
1029 i40iw_sc_query_rdma_features_done(struct i40iw_sc_cqp *cqp)
1032 cqp, I40IW_CQP_OP_QUERY_RDMA_FEATURES, NULL);
1037 * @cqp: struct for cqp hw
1039 * @scratch: u64 saved to be used during cqp completion
1042 i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
1048 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1055 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | feat_mem->size;
1059 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY RDMA FEATURES WQE",
1062 i40iw_sc_cqp_post_sq(cqp);
1085 ret_code = i40iw_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
1087 ret_code = i40iw_sc_query_rdma_features_done(dev->cqp);
1114 * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
1115 * @cqp: struct for cqp hw
1117 static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
1119 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
1123 * i40iw_sc_query_fpm_values - cqp wqe query fpm values
1124 * @cqp: struct for cqp hw
1125 * @scratch: u64 saved to be used during cqp completion
1128 * @post_sq: flag for cqp db to ring
1129 * @wait_type: poll ccq or cqp registers for cqp completion
1132 struct i40iw_sc_cqp *cqp,
1144 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1152 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1156 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
1160 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1166 i40iw_sc_cqp_post_sq(cqp);
1168 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1170 ret_code = i40iw_sc_query_fpm_values_done(cqp);
1177 * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
1178 * @cqp: struct for cqp hw
1180 * @scratch: u64 saved to be used during cqp completion
1181 * @post_sq: flag for cqp db to ring
1184 struct i40iw_sc_cqp *cqp,
1192 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1210 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1214 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
1218 i40iw_sc_cqp_post_sq(cqp);
1224 * @cqp: struct for cqp hw
1225 * @scratch: u64 saved to be used during cqp completion
1227 * @post_sq: flag for cqp db to ring
1230 struct i40iw_sc_cqp *cqp,
1238 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1244 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1247 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
1251 i40iw_sc_cqp_post_sq(cqp);
1256 * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
1257 * @cqp: struct for cqp hw
1258 * @scratch: u64 saved to be used during cqp completion
1260 * @post_sq: flag for cqp db to ring
1263 struct i40iw_sc_cqp *cqp,
1271 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1278 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1282 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
1286 i40iw_sc_cqp_post_sq(cqp);
1292 * @cqp: struct for cqp hw
1294 * @scratch: u64 saved to be used during cqp completion
1295 * @post_sq: flag for cqp db to ring
1298 struct i40iw_sc_cqp *cqp,
1306 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1314 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1318 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
1322 i40iw_sc_cqp_post_sq(cqp);
1328 * @cqp: struct for cqp hw
1330 * @scratch: u64 saved to be used during cqp completion
1331 * @post_sq: flag for cqp db to ring
1343 struct i40iw_sc_cqp *cqp,
1354 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1407 temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
1416 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
1420 i40iw_sc_cqp_post_sq(cqp);
1425 * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1426 * @cqp: struct for cqp hw
1427 * @scratch: u64 saved to be used during cqp completion
1428 * @post_sq: flag for cqp db to ring
1431 struct i40iw_sc_cqp *cqp,
1438 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1442 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1445 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1448 i40iw_sc_cqp_post_sq(cqp);
1454 * @cqp: struct for cqp hw
1456 * @scratch: u64 saved to be used during cqp completion
1457 * @post_sq: flag for cqp db to ring
1460 struct i40iw_sc_cqp *cqp,
1468 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1482 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1486 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
1490 i40iw_sc_cqp_post_sq(cqp);
1495 * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1496 * @cqp: struct for cqp hw
1497 * @scratch: u64 saved to be used during cqp completion
1500 * @post_sq: flag for cqp db to ring
1503 struct i40iw_sc_cqp *cqp,
1512 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1518 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
1523 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
1527 i40iw_sc_cqp_post_sq(cqp);
1533 * @cqp: struct for cqp hw
1534 * @scratch: u64 saved to be used during cqp completion
1535 * @post_sq: flag for cqp db to ring
1537 static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
1544 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1548 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1550 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
1554 i40iw_sc_cqp_post_sq(cqp);
1604 * @scratch: u64 saved to be used during cqp completion
1605 * @post_sq: flag for cqp db to ring
1611 struct i40iw_sc_cqp *cqp;
1615 cqp = ceq->dev->cqp;
1616 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1629 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1633 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
1637 i40iw_sc_cqp_post_sq(cqp);
1647 struct i40iw_sc_cqp *cqp;
1649 cqp = ceq->dev->cqp;
1650 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
1659 struct i40iw_sc_cqp *cqp;
1661 cqp = ceq->dev->cqp;
1662 cqp->process_cqp_sds = i40iw_update_sds_noccq;
1663 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
1669 * @scratch: u64 saved to be used during cqp completion
1684 * @scratch: u64 saved to be used during cqp completion
1685 * @post_sq: flag for cqp db to ring
1691 struct i40iw_sc_cqp *cqp;
1695 cqp = ceq->dev->cqp;
1696 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1706 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1708 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
1712 i40iw_sc_cqp_post_sq(cqp);
1787 * @scratch: u64 saved to be used during cqp completion
1788 * @post_sq: flag for cqp db to ring
1795 struct i40iw_sc_cqp *cqp;
1798 cqp = aeq->dev->cqp;
1799 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1811 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1814 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
1817 i40iw_sc_cqp_post_sq(cqp);
1824 * @scratch: u64 saved to be used during cqp completion
1825 * @post_sq: flag for cqp db to ring
1832 struct i40iw_sc_cqp *cqp;
1835 cqp = aeq->dev->cqp;
1836 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1844 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1847 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
1850 i40iw_sc_cqp_post_sq(cqp);
1999 struct i40iw_sc_cqp *cqp;
2001 cqp = aeq->dev->cqp;
2002 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
2011 struct i40iw_sc_cqp *cqp;
2013 cqp = aeq->dev->cqp;
2014 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
2069 * i40iw_sc_ccq_create_done - poll cqp for ccq create
2074 struct i40iw_sc_cqp *cqp;
2076 cqp = ccq->dev->cqp;
2077 return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
2083 * @scratch: u64 saved to be used during cqp completion
2085 * @post_sq: flag for cqp db to ring
2093 struct i40iw_sc_cqp *cqp;
2097 cqp = ccq->dev->cqp;
2098 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2122 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2126 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
2130 i40iw_sc_cqp_post_sq(cqp);
2135 cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
2143 * @scratch: u64 saved to be used during cqp completion
2144 * @post_sq: flag for cqp db to ring
2150 struct i40iw_sc_cqp *cqp;
2156 cqp = ccq->dev->cqp;
2157 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2171 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2175 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
2178 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
2183 i40iw_sc_cqp_post_sq(cqp);
2184 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
2187 cqp->process_cqp_sds = i40iw_update_sds_noccq;
2241 * @scratch: u64 saved to be used during cqp completion
2243 * @post_sq: flag for cqp db to ring
2251 struct i40iw_sc_cqp *cqp;
2260 cqp = cq->dev->cqp;
2261 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2287 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2291 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
2295 i40iw_sc_cqp_post_sq(cqp);
2302 * @scratch: u64 saved to be used during cqp completion
2303 * @post_sq: flag for cqp db to ring
2309 struct i40iw_sc_cqp *cqp;
2313 cqp = cq->dev->cqp;
2314 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2331 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2335 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
2339 i40iw_sc_cqp_post_sq(cqp);
2355 struct i40iw_sc_cqp *cqp;
2372 cqp = cq->dev->cqp;
2373 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2426 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2430 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
2434 i40iw_sc_cqp_post_sq(cqp);
2523 * @scratch: u64 saved to be used during cqp completion
2524 * @post_sq: flag for cqp db to ring
2532 struct i40iw_sc_cqp *cqp;
2540 cqp = qp->pd->dev->cqp;
2541 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2558 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2561 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
2565 i40iw_sc_cqp_post_sq(cqp);
2570 * i40iw_sc_qp_modify - modify qp cqp wqe
2573 * @scratch: u64 saved to be used during cqp completion
2574 * @post_sq: flag for cqp db to ring
2583 struct i40iw_sc_cqp *cqp;
2588 cqp = qp->pd->dev->cqp;
2589 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2623 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2627 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
2631 i40iw_sc_cqp_post_sq(cqp);
2636 * i40iw_sc_qp_destroy - cqp destroy qp
2638 * @scratch: u64 saved to be used during cqp completion
2641 * @post_sq: flag for cqp db to ring
2651 struct i40iw_sc_cqp *cqp;
2655 cqp = qp->pd->dev->cqp;
2656 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2667 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2670 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
2674 i40iw_sc_cqp_post_sq(cqp);
2682 * @scratch: u64 saved to be used during cqp completion
2683 * @post_sq: flag for cqp db to ring
2693 struct i40iw_sc_cqp *cqp;
2708 cqp = qp->pd->dev->cqp;
2709 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2735 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2739 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
2743 i40iw_sc_cqp_post_sq(cqp);
2751 * @scratch: u64 saved to be used during cqp completion
2752 * @post_sq: flag for cqp db to ring
2762 struct i40iw_sc_cqp *cqp;
2765 cqp = qp->pd->dev->cqp;
2766 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2778 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2782 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "GEN_AE WQE",
2786 i40iw_sc_cqp_post_sq(cqp);
2794 * @scratch: u64 saved to be used during cqp completion
2795 * @post_sq: flag for cqp db to ring
2804 struct i40iw_sc_cqp *cqp;
2807 cqp = dev->cqp;
2808 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2818 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2826 i40iw_sc_cqp_post_sq(cqp);
3022 * @scratch: u64 saved to be used during cqp completion
3023 * @post_sq: flag for cqp db to ring
3032 struct i40iw_sc_cqp *cqp;
3040 cqp = dev->cqp;
3041 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3063 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3071 i40iw_sc_cqp_post_sq(cqp);
3079 * @scratch: u64 saved to be used during cqp completion
3080 * @post_sq: flag for cqp db to ring
3090 struct i40iw_sc_cqp *cqp;
3112 cqp = dev->cqp;
3113 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3149 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3157 i40iw_sc_cqp_post_sq(cqp);
3165 * @scratch: u64 saved to be used during cqp completion
3166 * @post_sq: flag for cqp db to ring
3175 struct i40iw_sc_cqp *cqp;
3186 cqp = dev->cqp;
3187 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3212 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3220 i40iw_sc_cqp_post_sq(cqp);
3228 * @scratch: u64 saved to be used during cqp completion
3229 * @post_sq: flag for cqp db to ring
3239 struct i40iw_sc_cqp *cqp;
3241 cqp = dev->cqp;
3242 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3254 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3262 i40iw_sc_cqp_post_sq(cqp);
3269 * @scratch: u64 saved to be used during cqp completion
3271 * @post_sq: flag for cqp db to ring
3280 struct i40iw_sc_cqp *cqp;
3282 cqp = dev->cqp;
3283 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3291 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3299 i40iw_sc_cqp_post_sq(cqp);
3306 * @scratch: u64 saved to be used during cqp completion
3309 * @post_sq: flag for cqp db to ring
3319 struct i40iw_sc_cqp *cqp;
3322 cqp = dev->cqp;
3323 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3332 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3340 i40iw_sc_cqp_post_sq(cqp);
3348 * @post_sq: flag for cqp db to ring
3554 * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3620 dev->cqp,
3657 * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3702 dev->cqp,
3722 * cqp_sds_wqe_fill - fill cqp wqe doe sd
3723 * @cqp: struct for cqp hw
3725 * @scratch: u64 saved to be used during cqp completion
3727 static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
3735 struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
3739 wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
3748 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
3790 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
3796 * i40iw_update_pe_sds - cqp wqe for sd
3799 * @scratch: u64 saved to be used during cqp completion
3805 struct i40iw_sc_cqp *cqp = dev->cqp;
3808 ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
3810 i40iw_sc_cqp_post_sq(cqp);
3824 struct i40iw_sc_cqp *cqp = dev->cqp;
3827 ret_code = cqp_sds_wqe_fill(cqp, info, 0);
3830 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3834 i40iw_sc_cqp_post_sq(cqp);
3835 ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
3842 * @cqp: struct for cqp hw
3844 * @scratch: u64 saved to be used during cqp completion
3846 enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
3853 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3858 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3862 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
3865 i40iw_sc_cqp_post_sq(cqp);
3871 * @cqp: struct for cqp hw
3873 * @scratch: u64 saved to be used during cqp completion
3875 enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
3882 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3891 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3895 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
3898 i40iw_sc_cqp_post_sq(cqp);
3903 * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3904 * @cqp: struct for cqp hw
3905 * @scratch: u64 saved to be used during cqp completion
3907 * @post_sq: flag for cqp db to ring
3908 * @poll_registers: flag to poll register for cqp completion
3911 struct i40iw_sc_cqp *cqp,
3922 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3930 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3934 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
3936 i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3942 i40iw_sc_cqp_post_sq(cqp);
3944 /* check for cqp sq tail update */
3945 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
3947 ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
3956 * i40iw_ring_full - check if cqp ring is full
3957 * @cqp: struct for cqp hw
3959 static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
3961 return I40IW_RING_FULL_ERR(cqp->sq_ring);
4124 * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
4126 * @pcmdinfo: cqp command info
4138 pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
4157 pcmdinfo->in.u.del_arp_cache_entry.cqp,
4164 pcmdinfo->in.u.manage_apbvt_entry.cqp,
4181 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
4187 pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
4194 pcmdinfo->in.u.manage_qhash_table_entry.cqp,
4295 pcmdinfo->in.u.add_arp_cache_entry.cqp,
4302 pcmdinfo->in.u.manage_push_page.cqp,
4318 pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
4326 pcmdinfo->in.u.suspend_resume.cqp,
4332 pcmdinfo->in.u.suspend_resume.cqp,
4338 pcmdinfo->in.u.manage_vf_pble_bp.cqp,
4346 pcmdinfo->in.u.query_fpm_values.cqp,
4355 pcmdinfo->in.u.commit_fpm_values.cqp,
4366 pcmdinfo->in.u.query_rdma_features.cqp, &values_mem,
4378 * i40iw_process_cqp_cmd - process all cqp commands
4380 * @pcmdinfo: cqp command info
4389 if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
4398 * i40iw_process_bh - called from tasklet for cqp list
4408 while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {