Lines Matching defs:cqp

62 				/* issue cqp suspend command */
176 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
177 * @cqp: struct for cqp hw
179 * @scratch: u64 saved to be used during cqp completion
180 * @post_sq: flag for cqp db to ring
182 static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
189 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
199 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
207 irdma_sc_cqp_post_sq(cqp);
214 * @cqp: struct for cqp hw
215 * @scratch: u64 saved to be used during cqp completion
217 * @post_sq: flag for cqp db to ring
219 static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
225 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
231 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
240 irdma_sc_cqp_post_sq(cqp);
247 * @cqp: struct for cqp hw
249 * @scratch: u64 saved to be used during cqp completion
250 * @post_sq: flag for cqp db to ring
252 static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
259 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
267 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
275 irdma_sc_cqp_post_sq(cqp);
282 * @cqp: struct for cqp hw
284 * @scratch: u64 saved to be used during cqp completion
285 * @post_sq: flag for cqp db to ring
300 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
310 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
351 temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
365 irdma_sc_cqp_post_sq(cqp);
445 * @scratch: u64 saved to be used during cqp completion
446 * @post_sq: flag for cqp db to ring
451 struct irdma_sc_cqp *cqp;
455 cqp = qp->dev->cqp;
456 if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
457 qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
460 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
479 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
487 irdma_sc_cqp_post_sq(cqp);
493 * irdma_sc_qp_modify - modify qp cqp wqe
496 * @scratch: u64 saved to be used during cqp completion
497 * @post_sq: flag for cqp db to ring
503 struct irdma_sc_cqp *cqp;
508 cqp = qp->dev->cqp;
509 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
548 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
556 irdma_sc_cqp_post_sq(cqp);
562 * irdma_sc_qp_destroy - cqp destroy qp
564 * @scratch: u64 saved to be used during cqp completion
567 * @post_sq: flag for cqp db to ring
573 struct irdma_sc_cqp *cqp;
576 cqp = qp->dev->cqp;
577 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
589 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
597 irdma_sc_cqp_post_sq(cqp);
757 * @cqp: struct for cqp hw
758 * @scratch: u64 saved to be used during cqp completion
759 * @post_sq: flag for cqp db to ring
761 static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
767 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
773 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
784 irdma_sc_cqp_post_sq(cqp);
790 * @cqp: struct for cqp hw
792 * @scratch: u64 saved to be used during cqp completion
793 * @post_sq: flag for cqp db to ring
795 static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
802 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
811 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
821 irdma_sc_cqp_post_sq(cqp);
826 * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
827 * @cqp: struct for cqp hw
828 * @scratch: u64 saved to be used during cqp completion
831 * @post_sq: flag for cqp db to ring
833 static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
840 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
847 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
859 irdma_sc_cqp_post_sq(cqp);
1052 * @scratch: u64 saved to be used during cqp completion
1053 * @post_sq: flag for cqp db to ring
1060 struct irdma_sc_cqp *cqp;
1074 cqp = dev->cqp;
1075 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1099 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1107 irdma_sc_cqp_post_sq(cqp);
1116 * @scratch: u64 saved to be used during cqp completion
1117 * @post_sq: flag for cqp db to ring
1125 struct irdma_sc_cqp *cqp;
1154 cqp = dev->cqp;
1155 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1190 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1198 irdma_sc_cqp_post_sq(cqp);
1207 * @scratch: u64 saved to be used during cqp completion
1208 * @post_sq: flag for cqp db to ring
1216 struct irdma_sc_cqp *cqp;
1218 cqp = dev->cqp;
1219 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1230 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1238 irdma_sc_cqp_post_sq(cqp);
1247 * @scratch: u64 saved to be used during cqp completion
1248 * @post_sq: flag for cqp db to ring
1255 struct irdma_sc_cqp *cqp;
1258 cqp = dev->cqp;
1259 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1272 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1280 irdma_sc_cqp_post_sq(cqp);
1289 * @post_sq: flag for cqp db to ring
2029 /* cqp sq's hw coded value starts from 1 for size of 4
2043 * @cqp: struct for cqp hw
2045 * @scratch: u64 saved to be used during cqp completion
2047 static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
2057 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2065 temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2079 irdma_sc_cqp_post_sq(cqp);
2080 ibdev_dbg(to_ibdev(cqp->dev),
2082 cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
2089 * @cqp: struct for cqp hw
2092 * @scratch: u64 saved to be used during cqp completion
2094 static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
2101 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2107 temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2121 irdma_sc_cqp_post_sq(cqp);
2127 * @cqp: struct for cqp hw
2129 * @scratch: u64 saved to be used during cqp completion
2131 static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
2138 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2150 temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
2161 irdma_sc_cqp_post_sq(cqp);
2168 * @cqp: struct for cqp hw
2171 * @scratch: u64 saved to be used during cqp completion
2173 static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
2180 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2188 temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
2203 irdma_sc_cqp_post_sq(cqp);
2212 * @scratch: u64 saved to be used during cqp completion
2213 * @post_sq: flag for cqp db to ring
2221 struct irdma_sc_cqp *cqp;
2239 cqp = qp->pd->dev->cqp;
2240 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2269 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2277 irdma_sc_cqp_post_sq(cqp);
2286 * @scratch: u64 saved to be used during cqp completion
2287 * @post_sq: flag for cqp db to ring
2295 struct irdma_sc_cqp *cqp;
2298 cqp = qp->pd->dev->cqp;
2299 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2310 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2318 irdma_sc_cqp_post_sq(cqp);
2326 * @scratch: u64 saved to be used during cqp completion
2327 * @post_sq: flag for cqp db to ring
2334 struct irdma_sc_cqp *cqp;
2337 cqp = dev->cqp;
2338 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2349 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2357 irdma_sc_cqp_post_sq(cqp);
2364 * @cqp: struct for cqp hw
2366 * @scratch: u64 saved to be used during cqp completion
2367 * @post_sq: flag for cqp db to ring
2369 static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
2377 info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
2380 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2388 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
2397 irdma_sc_cqp_post_sq(cqp);
2404 * @cqp: struct for cqp hw
2406 * @scratch: u64 saved to be used during cqp completion
2408 static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
2414 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2420 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2427 irdma_sc_cqp_post_sq(cqp);
2434 * @cqp: struct for cqp hw
2436 * @scratch: u64 saved to be used during cqp completion
2438 static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
2444 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2453 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2460 irdma_sc_cqp_post_sq(cqp);
2512 * @scratch: u64 saved to be used during cqp completion
2514 * @post_sq: flag for cqp db to ring
2520 struct irdma_sc_cqp *cqp;
2525 cqp = cq->dev->cqp;
2526 if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
2539 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2570 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2578 irdma_sc_cqp_post_sq(cqp);
2586 * @scratch: u64 saved to be used during cqp completion
2587 * @post_sq: flag for cqp db to ring
2591 struct irdma_sc_cqp *cqp;
2596 cqp = cq->dev->cqp;
2597 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2621 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2629 irdma_sc_cqp_post_sq(cqp);
2652 * @scratch: u64 saved to be used during cqp completion
2659 struct irdma_sc_cqp *cqp;
2669 cqp = cq->dev->cqp;
2670 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2695 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2703 irdma_sc_cqp_post_sq(cqp);
2709 * irdma_check_cqp_progress - check cqp processing progress
2715 u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
2720 } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
2726 * irdma_get_cqp_reg_info - get head and tail for cqp using registers
2727 * @cqp: struct for cqp hw
2728 * @val: cqp tail register value
2730 * @error: cqp processing err
2732 static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
2735 *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
2741 * irdma_cqp_poll_registers - poll cqp registers
2742 * @cqp: struct for cqp hw
2746 static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
2753 irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
2755 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
2756 ibdev_dbg(to_ibdev(cqp->dev),
2763 IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
2764 atomic64_inc(&cqp->completed_ops);
2767 udelay(cqp->dev->hw_attrs.max_sleep_count);
3083 * @cqp: IWARP control queue pair pointer
3088 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
3100 cqp->size = sizeof(*cqp);
3101 cqp->sq_size = info->sq_size;
3102 cqp->hw_sq_size = hw_sq_size;
3103 cqp->sq_base = info->sq;
3104 cqp->host_ctx = info->host_ctx;
3105 cqp->sq_pa = info->sq_pa;
3106 cqp->host_ctx_pa = info->host_ctx_pa;
3107 cqp->dev = info->dev;
3108 cqp->struct_ver = info->struct_ver;
3109 cqp->hw_maj_ver = info->hw_maj_ver;
3110 cqp->hw_min_ver = info->hw_min_ver;
3111 cqp->scratch_array = info->scratch_array;
3112 cqp->polarity = 0;
3113 cqp->en_datacenter_tcp = info->en_datacenter_tcp;
3114 cqp->ena_vf_count = info->ena_vf_count;
3115 cqp->hmc_profile = info->hmc_profile;
3116 cqp->ceqs_per_vf = info->ceqs_per_vf;
3117 cqp->disable_packed = info->disable_packed;
3118 cqp->rocev2_rto_policy = info->rocev2_rto_policy;
3119 cqp->protocol_used = info->protocol_used;
3120 memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
3121 info->dev->cqp = cqp;
3123 IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
3124 cqp->requested_ops = 0;
3125 atomic64_set(&cqp->completed_ops, 0);
3126 /* for the cqp commands backlog. */
3127 INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
3129 writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
3130 writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
3131 writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3133 ibdev_dbg(to_ibdev(cqp->dev),
3134 "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
3135 cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
3136 (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
3141 * irdma_sc_cqp_create - create cqp during bringup
3142 * @cqp: struct for cqp hw
3146 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
3153 hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
3154 cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
3156 cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
3157 cqp->sdbuf.size, &cqp->sdbuf.pa,
3159 if (!cqp->sdbuf.va)
3162 spin_lock_init(&cqp->dev->cqp_lock);
3164 temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
3165 FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
3166 FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
3167 FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
3170 cqp->rocev2_rto_policy) |
3172 cqp->protocol_used);
3175 set_64bit_val(cqp->host_ctx, 0, temp);
3176 set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
3178 temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
3179 FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
3180 set_64bit_val(cqp->host_ctx, 16, temp);
3181 set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
3182 temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
3183 FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
3185 temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
3186 FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
3188 set_64bit_val(cqp->host_ctx, 32, temp);
3189 set_64bit_val(cqp->host_ctx, 40, 0);
3192 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
3193 FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
3194 FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
3196 set_64bit_val(cqp->host_ctx, 48, temp);
3199 temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
3200 FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
3201 FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
3202 FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
3204 set_64bit_val(cqp->host_ctx, 56, temp);
3206 8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
3207 p1 = cqp->host_ctx_pa >> 32;
3208 p2 = (u32)cqp->host_ctx_pa;
3210 writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3211 writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3214 if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3218 udelay(cqp->dev->hw_attrs.max_sleep_count);
3219 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3222 if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
3227 cqp->process_cqp_sds = irdma_update_sds_noccq;
3231 dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
3232 cqp->sdbuf.va, cqp->sdbuf.pa);
3233 cqp->sdbuf.va = NULL;
3234 err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3241 * irdma_sc_cqp_post_sq - post of cqp's sq
3242 * @cqp: struct for cqp hw
3244 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
3246 writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
3248 ibdev_dbg(to_ibdev(cqp->dev),
3250 cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
3254 * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
3256 * @cqp: CQP HW structure
3260 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
3266 if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
3267 ibdev_dbg(to_ibdev(cqp->dev),
3269 cqp->sq_ring.head, cqp->sq_ring.tail,
3270 cqp->sq_ring.size);
3273 IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
3277 cqp->requested_ops++;
3279 cqp->polarity = !cqp->polarity;
3280 wqe = cqp->sq_base[*wqe_idx].elem;
3281 cqp->scratch_array[*wqe_idx] = scratch;
3288 * irdma_sc_cqp_destroy - destroy cqp during close
3289 * @cqp: struct for cqp hw
3291 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
3296 writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3297 writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3299 if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3303 udelay(cqp->dev->hw_attrs.max_sleep_count);
3304 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3305 } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
3307 dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
3308 cqp->sdbuf.va, cqp->sdbuf.pa);
3309 cqp->sdbuf.va = NULL;
3350 struct irdma_sc_cqp *cqp;
3370 cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
3376 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3377 ibdev_dbg(to_ibdev(cqp->dev),
3382 info->scratch = cqp->scratch_array[wqe_idx];
3386 get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
3388 info->cqp = cqp;
3402 IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
3403 atomic64_inc(&cqp->completed_ops);
3410 * @cqp: struct for cqp hw
3411 * @op_code: cqp opcode for completion
3414 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
3422 ccq = cqp->dev->ccq;
3424 if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
3428 udelay(cqp->dev->hw_attrs.max_sleep_count);
3438 ibdev_dbg(to_ibdev(cqp->dev),
3451 * @cqp: struct for cqp hw
3452 * @scratch: u64 saved to be used during cqp completion
3454 * @post_sq: flag for cqp db to ring
3456 static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
3463 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3479 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3488 irdma_sc_cqp_post_sq(cqp);
3494 * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
3496 * @cqp: struct for cqp hw
3498 static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
3500 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
3505 * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
3506 * @cqp: struct for cqp hw
3507 * @scratch: u64 saved to be used during cqp completion
3510 * @post_sq: flag for cqp db to ring
3511 * @wait_type: poll ccq or cqp registers for cqp completion
3513 static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
3523 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3532 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3540 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3543 irdma_sc_cqp_post_sq(cqp);
3545 ret_code = irdma_cqp_poll_registers(cqp, tail,
3546 cqp->dev->hw_attrs.max_done_count);
3548 ret_code = irdma_sc_commit_fpm_val_done(cqp);
3555 * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
3557 * @cqp: struct for cqp hw
3559 static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
3561 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
3566 * irdma_sc_query_fpm_val - cqp wqe query fpm values
3567 * @cqp: struct for cqp hw
3568 * @scratch: u64 saved to be used during cqp completion
3571 * @post_sq: flag for cqp db to ring
3572 * @wait_type: poll ccq or cqp registers for cqp completion
3574 static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
3584 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3592 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3599 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3602 irdma_sc_cqp_post_sq(cqp);
3604 ret_code = irdma_cqp_poll_registers(cqp, tail,
3605 cqp->dev->hw_attrs.max_done_count);
3607 ret_code = irdma_sc_query_fpm_val_done(cqp);
3661 * @scratch: u64 saved to be used during cqp completion
3662 * @post_sq: flag for cqp db to ring
3668 struct irdma_sc_cqp *cqp;
3672 cqp = ceq->dev->cqp;
3673 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3690 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3698 irdma_sc_cqp_post_sq(cqp);
3709 struct irdma_sc_cqp *cqp;
3711 cqp = ceq->dev->cqp;
3712 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
3722 struct irdma_sc_cqp *cqp;
3727 cqp = ceq->dev->cqp;
3728 cqp->process_cqp_sds = irdma_update_sds_noccq;
3730 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
3737 * @scratch: u64 saved to be used during cqp completion
3761 * @scratch: u64 saved to be used during cqp completion
3762 * @post_sq: flag for cqp db to ring
3766 struct irdma_sc_cqp *cqp;
3770 cqp = ceq->dev->cqp;
3771 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3782 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3790 irdma_sc_cqp_post_sq(cqp);
3926 * @scratch: u64 saved to be used during cqp completion
3927 * @post_sq: flag for cqp db to ring
3933 struct irdma_sc_cqp *cqp;
3936 cqp = aeq->dev->cqp;
3937 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3949 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3957 irdma_sc_cqp_post_sq(cqp);
3965 * @scratch: u64 saved to be used during cqp completion
3966 * @post_sq: flag for cqp db to ring
3972 struct irdma_sc_cqp *cqp;
3979 cqp = dev->cqp;
3980 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3988 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3996 irdma_sc_cqp_post_sq(cqp);
4214 * irdma_sc_ccq_create_done - poll cqp for ccq create
4219 struct irdma_sc_cqp *cqp;
4221 cqp = ccq->dev->cqp;
4223 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
4229 * @scratch: u64 saved to be used during cqp completion
4231 * @post_sq: flag for cqp db to ring
4247 ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
4255 * @scratch: u64 saved to be used during cqp completion
4256 * @post_sq: flag for cqp db to ring
4260 struct irdma_sc_cqp *cqp;
4266 cqp = ccq->dev->cqp;
4267 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4283 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4290 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4293 irdma_sc_cqp_post_sq(cqp);
4294 ret_code = irdma_cqp_poll_registers(cqp, tail,
4295 cqp->dev->hw_attrs.max_done_count);
4298 cqp->process_cqp_sds = irdma_update_sds_noccq;
4304 * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
4323 ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4339 * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
4394 ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4408 * cqp_sds_wqe_fill - fill cqp wqe doe sd
4409 * @cqp: struct for cqp hw
4411 * @scratch: u64 saved to be used during cqp completion
4413 static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
4420 struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
4424 wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
4468 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
4487 * irdma_update_pe_sds - cqp wqe for sd
4490 * @scratch: u64 saved to be used during cqp completion
4495 struct irdma_sc_cqp *cqp = dev->cqp;
4498 ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
4500 irdma_sc_cqp_post_sq(cqp);
4514 struct irdma_sc_cqp *cqp = dev->cqp;
4517 ret_code = cqp_sds_wqe_fill(cqp, info, 0);
4521 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4523 irdma_sc_cqp_post_sq(cqp);
4524 return irdma_cqp_poll_registers(cqp, tail,
4525 cqp->dev->hw_attrs.max_done_count);
4529 * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
4530 * @cqp: struct for cqp hw
4531 * @scratch: u64 saved to be used during cqp completion
4533 * @post_sq: flag for cqp db to ring
4534 * @poll_registers: flag to poll register for cqp completion
4536 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
4544 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4553 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4561 irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4564 irdma_sc_cqp_post_sq(cqp);
4566 /* check for cqp sq tail update */
4567 return irdma_cqp_poll_registers(cqp, tail,
4568 cqp->dev->hw_attrs.max_done_count);
4570 return irdma_sc_poll_for_cqp_op_done(cqp,
4579 * irdma_cqp_ring_full - check if cqp ring is full
4580 * @cqp: struct for cqp hw
4582 static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
4584 return IRDMA_RING_FULL_ERR(cqp->sq_ring);
4618 * irdma_sc_query_rdma_features_done - poll cqp for query features done
4619 * @cqp: struct for cqp hw
4621 static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
4623 return irdma_sc_poll_for_cqp_op_done(cqp,
4630 * @cqp: struct for cqp hw
4632 * @scratch: u64 saved to be used during cqp completion
4634 static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
4640 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4648 cqp->polarity) |
4657 irdma_sc_cqp_post_sq(cqp);
4680 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4682 ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
4705 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4707 ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
4749 if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
4906 if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
4977 * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
4979 * @pcmdinfo: cqp command info
5047 status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
5060 irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
5066 status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
5071 status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
5078 status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
5086 status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
5097 status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
5103 status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
5108 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5114 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5120 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5126 status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
5131 status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
5136 status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
5142 status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
5148 status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
5197 status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
5203 status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
5208 status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
5214 status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
5221 status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
5226 status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
5231 status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
5236 status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
5241 status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
5254 * irdma_process_cqp_cmd - process all cqp commands
5256 * @pcmdinfo: cqp command info
5265 if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
5274 * irdma_process_bh - called from tasklet for cqp list
5285 !irdma_cqp_ring_full(dev->cqp)) {