Lines Matching defs:info
178 * @info: arp entry information
183 struct irdma_add_arp_cache_entry_info *info,
192 set_64bit_val(wqe, 8, info->reach_max);
193 set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
195 hdr = info->arp_index |
197 FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
248 * @info: info for apbvt entry to add or delete
253 struct irdma_apbvt_info *info,
263 set_64bit_val(wqe, 16, info->port);
266 FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
283 * @info: info for quad hash to manage
293 * and quad is passed in info.
301 struct irdma_qhash_table_info *info,
308 struct irdma_sc_vsi *vsi = info->vsi;
314 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
316 qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
317 FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
318 if (info->ipv4_valid) {
320 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
323 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
324 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
327 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
328 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
331 vsi->qos[info->user_pri].qs_handle);
332 if (info->vlan_valid)
333 qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
335 if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
336 qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
337 if (!info->ipv4_valid) {
339 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
340 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
342 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
343 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
346 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
354 FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
355 FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
356 FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
357 FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
373 * @info: initialization qp info
375 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
381 if (info->qp_uk_init_info.max_sq_frag_cnt >
382 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
383 info->qp_uk_init_info.max_rq_frag_cnt >
384 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
387 qp->dev = info->pd->dev;
388 qp->vsi = info->vsi;
389 qp->ieq_qp = info->vsi->exception_lan_q;
390 qp->sq_pa = info->sq_pa;
391 qp->rq_pa = info->rq_pa;
392 qp->hw_host_ctx_pa = info->host_ctx_pa;
393 qp->q2_pa = info->q2_pa;
394 qp->shadow_area_pa = info->shadow_area_pa;
395 qp->q2_buf = info->q2;
396 qp->pd = info->pd;
397 qp->hw_host_ctx = info->host_ctx;
398 info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
399 ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
403 qp->virtual_map = info->virtual_map;
404 pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
406 if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
407 (info->virtual_map && info->rq_pa >= pble_obj_cnt))
429 qp->sq_tph_val = info->sq_tph_val;
430 qp->rq_tph_val = info->rq_tph_val;
431 qp->sq_tph_en = info->sq_tph_en;
432 qp->rq_tph_en = info->rq_tph_en;
433 qp->rcv_tph_en = info->rcv_tph_en;
434 qp->xmit_tph_en = info->xmit_tph_en;
435 qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
444 * @info: qp create info
448 int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
469 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
470 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
471 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
474 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
475 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
477 info->arp_cache_idx_valid) |
478 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
495 * @info: modify qp info
499 int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
513 if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
514 if (info->dont_send_fin)
516 if (info->dont_send_term)
520 term_len = info->termlen;
524 FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
531 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
532 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
534 info->cached_var_valid) |
536 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
537 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
538 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
540 FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
542 info->remove_hash_idx) |
544 FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
546 info->arp_cache_idx_valid) |
547 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
635 * @info: ctx info
638 struct irdma_qp_host_ctx_info *info)
645 roce_info = info->roce_info;
646 udp = info->udp_info;
647 qp->user_pri = info->user_pri;
715 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
716 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
718 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
725 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
735 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
750 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
791 * @info:mac addr info
796 struct irdma_local_mac_entry_info *info,
806 set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
808 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
867 * @info: ctx info
870 struct irdma_qp_host_ctx_info *info)
880 iw = info->iwarp_info;
881 tcp = info->tcp_info;
887 qp->user_pri = info->user_pri;
912 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
913 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
915 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
921 if (info->iwarp_info_valid) {
935 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
951 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
960 if (info->tcp_info_valid) {
1036 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
1051 * @info: stag info
1056 struct irdma_allocate_stag_info *info,
1064 if (!info->total_len && !info->all_memory)
1067 if (info->page_size == 0x40000000)
1069 else if (info->page_size == 0x200000)
1080 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
1081 FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
1083 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1085 FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
1087 if (info->chunk_size)
1089 FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
1093 FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1094 FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1096 FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
1097 FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1098 FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1115 * @info: mr info
1120 struct irdma_reg_ns_stag_info *info,
1132 if (!info->total_len && !info->all_memory)
1135 if (info->page_size == 0x40000000)
1137 else if (info->page_size == 0x200000)
1139 else if (info->page_size == 0x1000)
1144 if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
1151 if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
1158 fbo = info->va & (info->page_size - 1);
1161 (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
1162 info->va : fbo));
1164 FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
1165 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1167 FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
1168 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1169 if (!info->chunk_size) {
1170 set_64bit_val(wqe, 32, info->reg_addr_pa);
1175 FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
1177 set_64bit_val(wqe, 40, info->hmc_fcn_index);
1180 addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
1183 FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1185 FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1188 FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1189 FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1206 * @info: dealloc stag info
1211 struct irdma_dealloc_stag_info *info,
1224 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1226 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1229 FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
1246 * @info: memory window allocation information
1251 struct irdma_mw_alloc_info *info, u64 scratch,
1264 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1266 FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
1269 FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
1271 info->mw1_bind_dont_vldt_key) |
1288 * @info: fast mr info
1292 struct irdma_fast_reg_stag_info *info,
1301 if (info->page_size == 0x40000000)
1303 else if (info->page_size == 0x200000)
1308 sq_info.wr_id = info->wr_id;
1309 sq_info.signaled = info->signaled;
1320 info->wr_id, wqe_idx,
1323 temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
1324 (uintptr_t)info->va : info->fbo;
1328 info->first_pm_pbl_index >> 16);
1331 FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
1333 info->total_len |
1334 FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
1336 hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
1337 FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
1339 FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
1341 FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
1342 FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
1343 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
1344 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
1345 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
1490 * @info: aeq info for the packet
1493 static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
1498 if (info->q2_data_written) {
1579 * @info: the struct contiaing AE information
1582 struct irdma_aeqe_info *info)
1593 if (info->q2_data_written) {
1598 opcode = irdma_iwarp_opcode(info, pkt);
1600 qp->sq_flush_code = info->sq;
1601 qp->rq_flush_code = info->rq;
1603 switch (info->ae_id) {
1617 if (info->q2_data_written)
1768 * @info: the struct contiaing AE information
1771 struct irdma_aeqe_info *info)
1778 termlen = irdma_bld_terminate_hdr(qp, info);
1788 * @info: the struct contiaing AE information
1791 struct irdma_aeqe_info *info)
1801 if (info->q2_data_written) {
1818 info->ae_id = aeq_id;
1819 if (info->ae_id) {
1821 irdma_terminate_connection(qp, info);
1856 * @info: the info used to initialize the vsi struct
1859 struct irdma_vsi_init_info *info)
1863 vsi->dev = info->dev;
1864 vsi->back_vsi = info->back_vsi;
1865 vsi->register_qset = info->register_qset;
1866 vsi->unregister_qset = info->unregister_qset;
1867 vsi->mtu = info->params->mtu;
1868 vsi->exception_lan_q = info->exception_lan_q;
1869 vsi->vsi_idx = info->pf_data_vsi_num;
1871 irdma_set_qos_info(vsi, info->params);
1943 * @info: The info structure used for initialization
1946 struct irdma_vsi_stats_info *info)
1950 vsi->pestat = info->pestat;
1970 vsi->stats_idx = info->fcn_id;
1971 if (info->alloc_stats_inst) {
2044 * @info: gather stats info structure
2048 struct irdma_stats_gather_info *info,
2054 if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
2062 FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
2063 set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
2066 FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
2068 info->stats_inst_index) |
2070 info->use_hmc_fcn_index) |
2090 * @info: stats info structure
2095 struct irdma_stats_inst_info *info,
2106 FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
2110 info->use_hmc_fcn_index) |
2111 FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
2128 * @info: User priority map info
2132 struct irdma_up_info *info, u64 scratch)
2143 temp |= (u64)info->map[i] << (i * 8);
2147 FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
2148 FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
2151 FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
2153 info->use_cnp_up_override) |
2169 * @info: node info structure
2174 struct irdma_ws_node_info *info,
2185 FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
2186 FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
2190 FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
2191 FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
2192 FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
2193 FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
2195 FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
2196 FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
2211 * @info: dlush information
2216 struct irdma_qp_flush_info *info, u64 scratch,
2225 if (info->rq && !qp->flush_rq)
2227 if (info->sq && !qp->flush_sq)
2244 if (info->userflushcode) {
2247 info->rq_minor_code) |
2249 info->rq_major_code);
2252 info->sq_minor_code) |
2254 info->sq_major_code);
2258 temp = (info->generate_ae) ?
2259 info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2260 info->ae_src) : 0;
2265 FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
2266 FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
2285 * @info: gen ae information
2290 struct irdma_gen_ae_info *info, u64 scratch,
2303 temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2304 info->ae_src);
2325 * @info: upload context info ptr for return
2330 struct irdma_upload_context_info *info,
2342 set_64bit_val(wqe, 16, info->buf_pa);
2344 hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
2346 FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
2347 FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
2348 FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
2365 * @info: push page info
2370 struct irdma_cqp_manage_push_page_info *info,
2376 if (info->free_page &&
2377 info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
2384 set_64bit_val(wqe, 16, info->qs_handle);
2385 hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
2386 FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
2389 FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
2477 * @info: cq initialization info
2479 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
2483 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2484 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
2487 cq->cq_pa = info->cq_base_pa;
2488 cq->dev = info->dev;
2489 cq->ceq_id = info->ceq_id;
2490 info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
2491 info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
2492 irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
2494 cq->virtual_map = info->virtual_map;
2495 cq->pbl_chunk_size = info->pbl_chunk_size;
2496 cq->ceqe_mask = info->ceqe_mask;
2497 cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
2498 cq->shadow_area_pa = info->shadow_area_pa;
2499 cq->shadow_read_threshold = info->shadow_read_threshold;
2500 cq->ceq_id_valid = info->ceq_id_valid;
2501 cq->tph_en = info->tph_en;
2502 cq->tph_val = info->tph_val;
2503 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2504 cq->vsi = info->vsi;
2635 * irdma_sc_cq_resize - set resized cq buffer info
2637 * @info: resized cq buffer info
2639 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
2641 cq->virtual_map = info->virtual_map;
2642 cq->cq_pa = info->cq_pa;
2643 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2644 cq->pbl_chunk_size = info->pbl_chunk_size;
2645 irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
2651 * @info: modification info struct
2656 struct irdma_modify_cq_info *info, u64 scratch,
2665 if (info->cq_resize && info->virtual_map &&
2666 info->first_pm_pbl_idx >= pble_obj_cnt)
2674 set_64bit_val(wqe, 0, info->cq_size);
2677 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
2678 set_64bit_val(wqe, 32, info->cq_pa);
2680 set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
2687 FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
2688 FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
2689 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
2690 FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
2710 * @timeout: timeout info struct
2778 * @obj_info: object info pointer
2813 * @info: ptr to irdma_hmc_obj_info struct
2816 * parses fpm commit info and copy base value
2821 struct irdma_hmc_obj_info *info, u32 *sd)
2828 irdma_sc_decode_fpm_commit(dev, buf, 0, info,
2830 irdma_sc_decode_fpm_commit(dev, buf, 8, info,
2833 irdma_sc_decode_fpm_commit(dev, buf, 24, info,
2835 irdma_sc_decode_fpm_commit(dev, buf, 32, info,
2837 irdma_sc_decode_fpm_commit(dev, buf, 40, info,
2839 irdma_sc_decode_fpm_commit(dev, buf, 48, info,
2841 irdma_sc_decode_fpm_commit(dev, buf, 56, info,
2843 irdma_sc_decode_fpm_commit(dev, buf, 64, info,
2845 irdma_sc_decode_fpm_commit(dev, buf, 72, info,
2847 irdma_sc_decode_fpm_commit(dev, buf, 80, info,
2849 irdma_sc_decode_fpm_commit(dev, buf, 88, info,
2851 irdma_sc_decode_fpm_commit(dev, buf, 112, info,
2855 irdma_sc_decode_fpm_commit(dev, buf, 96, info,
2857 irdma_sc_decode_fpm_commit(dev, buf, 104, info,
2859 irdma_sc_decode_fpm_commit(dev, buf, 128, info,
2861 irdma_sc_decode_fpm_commit(dev, buf, 136, info,
2863 irdma_sc_decode_fpm_commit(dev, buf, 144, info,
2865 irdma_sc_decode_fpm_commit(dev, buf, 152, info,
2867 irdma_sc_decode_fpm_commit(dev, buf, 160, info,
2869 irdma_sc_decode_fpm_commit(dev, buf, 168, info,
2875 if (info[i].base > max_base) {
2876 max_base = info[i].base;
2881 size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
2882 info[last_hmc_obj].base;
2896 * @rsrc_idx: resource index into info
3084 * @info: IWARP control queue pair init info pointer
3089 struct irdma_cqp_init_info *info)
3093 if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
3094 info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
3095 ((info->sq_size & (info->sq_size - 1))))
3098 hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
3101 cqp->sq_size = info->sq_size;
3103 cqp->sq_base = info->sq;
3104 cqp->host_ctx = info->host_ctx;
3105 cqp->sq_pa = info->sq_pa;
3106 cqp->host_ctx_pa = info->host_ctx_pa;
3107 cqp->dev = info->dev;
3108 cqp->struct_ver = info->struct_ver;
3109 cqp->hw_maj_ver = info->hw_maj_ver;
3110 cqp->hw_min_ver = info->hw_min_ver;
3111 cqp->scratch_array = info->scratch_array;
3113 cqp->en_datacenter_tcp = info->en_datacenter_tcp;
3114 cqp->ena_vf_count = info->ena_vf_count;
3115 cqp->hmc_profile = info->hmc_profile;
3116 cqp->ceqs_per_vf = info->ceqs_per_vf;
3117 cqp->disable_packed = info->disable_packed;
3118 cqp->rocev2_rto_policy = info->rocev2_rto_policy;
3119 cqp->protocol_used = info->protocol_used;
3120 memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
3121 info->dev->cqp = cqp;
3343 * @info: completion q entry to return
3346 struct irdma_ccq_cqe_info *info)
3371 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
3372 info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
3373 info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
3374 if (info->error) {
3375 info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
3382 info->scratch = cqp->scratch_array[wqe_idx];
3385 info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
3387 info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
3388 info->cqp = cqp;
3417 struct irdma_ccq_cqe_info info = {};
3427 if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
3431 if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
3436 if (op_code == info.op_code)
3440 op_code, info.op_code);
3444 memcpy(compl_info, &info, sizeof(*compl_info));
3453 * @info: info for the manage function table operation
3457 struct irdma_hmc_fcn_info *info,
3475 hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
3478 FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
3616 * @info: ceq initialization info
3619 struct irdma_ceq_init_info *info)
3623 if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
3624 info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
3627 if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
3629 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3631 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3635 ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
3636 ceq->ceq_id = info->ceq_id;
3637 ceq->dev = info->dev;
3638 ceq->elem_cnt = info->elem_cnt;
3639 ceq->ceq_elem_pa = info->ceqe_pa;
3640 ceq->virtual_map = info->virtual_map;
3641 ceq->itr_no_expire = info->itr_no_expire;
3642 ceq->reg_cq = info->reg_cq;
3645 ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
3646 ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
3647 ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
3648 ceq->tph_en = info->tph_en;
3649 ceq->tph_val = info->tph_val;
3650 ceq->vsi = info->vsi;
3653 ceq->dev->ceq[info->ceq_id] = ceq;
3890 * @info: aeq initialization info
3893 struct irdma_aeq_init_info *info)
3897 if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
3898 info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
3901 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3903 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3908 aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
3909 aeq->dev = info->dev;
3910 aeq->elem_cnt = info->elem_cnt;
3911 aeq->aeq_elem_pa = info->aeq_elem_pa;
3913 aeq->virtual_map = info->virtual_map;
3914 aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
3915 aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
3916 aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
3917 aeq->msix_idx = info->msix_idx;
3918 info->dev->aeq = aeq;
4003 * @info: aeqe info to be returned
4006 struct irdma_aeqe_info *info)
4029 info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
4030 info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
4032 info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
4033 info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
4034 info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
4035 info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
4036 info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
4038 info->ae_src = ae_src;
4039 switch (info->ae_id) {
4080 info->qp = true;
4081 info->compl_ctx = compl_ctx;
4084 info->cq = true;
4085 info->compl_ctx = compl_ctx << 1;
4100 info->qp = true;
4101 info->compl_ctx = compl_ctx;
4110 info->qp = true;
4111 info->rq = true;
4112 info->compl_ctx = compl_ctx;
4118 info->cq = true;
4119 info->compl_ctx = compl_ctx << 1;
4123 info->qp = true;
4124 info->sq = true;
4125 info->compl_ctx = compl_ctx;
4129 info->qp = true;
4130 info->compl_ctx = compl_ctx;
4131 info->in_rdrsp_wr = true;
4135 info->qp = true;
4136 info->compl_ctx = compl_ctx;
4137 info->out_rdrsp = true;
4164 * @info: info for control cq initialization
4166 int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
4170 if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
4171 info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
4174 if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
4177 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
4179 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
4182 cq->cq_pa = info->cq_pa;
4183 cq->cq_uk.cq_base = info->cq_base;
4184 cq->shadow_area_pa = info->shadow_area_pa;
4185 cq->cq_uk.shadow_area = info->shadow_area;
4186 cq->shadow_read_threshold = info->shadow_read_threshold;
4187 cq->dev = info->dev;
4188 cq->ceq_id = info->ceq_id;
4189 cq->cq_uk.cq_size = info->num_elem;
4191 cq->ceqe_mask = info->ceqe_mask;
4192 IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
4194 cq->ceq_id_valid = info->ceq_id_valid;
4195 cq->tph_en = info->tph_en;
4196 cq->tph_val = info->tph_val;
4197 cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
4198 cq->pbl_list = info->pbl_list;
4199 cq->virtual_map = info->virtual_map;
4200 cq->pbl_chunk_size = info->pbl_chunk_size;
4201 cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
4203 cq->vsi = info->vsi;
4209 info->dev->ccq = cq;
4328 /* parse the fpm_query_buf and fill hmc obj info */
4410 * @info: sd info for wqe
4414 struct irdma_update_sds_info *info, u64 scratch)
4428 wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
4429 mem_entries = info->cnt - wqe_entries;
4433 memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
4439 data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
4445 (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
4448 set_64bit_val(wqe, 56, info->entry[2].data);
4452 (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
4455 set_64bit_val(wqe, 40, info->entry[1].data);
4459 FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
4461 set_64bit_val(wqe, 8, info->entry[0].data);
4489 * @info: sd info for sd's
4493 struct irdma_update_sds_info *info, u64 scratch)
4498 ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
4508 * @info: sd info for sd's
4511 struct irdma_update_sds_info *info)
4517 ret_code = cqp_sds_wqe_fill(cqp, info, 0);
4631 * @buf: buffer to hold query info
4979 * @pcmdinfo: cqp command info
5013 &pcmdinfo->in.u.qp_upload_context.info,
5025 &pcmdinfo->in.u.cq_modify.info,
5036 &pcmdinfo->in.u.qp_flush_wqes.info,
5042 &pcmdinfo->in.u.gen_ae.info,
5048 &pcmdinfo->in.u.manage_push_page.info,
5054 &pcmdinfo->in.u.update_pe_sds.info,
5061 &pcmdinfo->in.u.manage_hmc_pm.info,
5098 &pcmdinfo->in.u.stats_manage.info,
5104 &pcmdinfo->in.u.stats_gather.info,
5109 &pcmdinfo->in.u.ws_node.info,
5115 &pcmdinfo->in.u.ws_node.info,
5121 &pcmdinfo->in.u.ws_node.info,
5127 &pcmdinfo->in.u.up_map.info,
5143 &pcmdinfo->in.u.manage_apbvt_entry.info,
5149 &pcmdinfo->in.u.manage_qhash_table_entry.info,
5155 &pcmdinfo->in.u.qp_modify.info,
5161 &pcmdinfo->in.u.qp_create.info,
5174 &pcmdinfo->in.u.alloc_stag.info,
5180 &pcmdinfo->in.u.mr_reg_non_shared.info,
5186 &pcmdinfo->in.u.dealloc_stag.info,
5192 &pcmdinfo->in.u.mw_alloc.info,
5198 &pcmdinfo->in.u.add_arp_cache_entry.info,
5209 &pcmdinfo->in.u.add_local_mac_entry.info,
5222 &pcmdinfo->in.u.ah_create.info,
5227 &pcmdinfo->in.u.ah_destroy.info,
5232 &pcmdinfo->in.u.mc_create.info,
5237 &pcmdinfo->in.u.mc_destroy.info,
5242 &pcmdinfo->in.u.mc_modify.info,
5256 * @pcmdinfo: cqp command info
5366 * @info: Device init info
5369 struct irdma_device_init_info *info)
5377 dev->hmc_fn_id = info->hmc_fn_id;
5378 dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5379 dev->fpm_query_buf = info->fpm_query_buf;
5380 dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5381 dev->fpm_commit_buf = info->fpm_commit_buf;
5382 dev->hw = info->hw;
5383 dev->hw->hw_addr = info->bar0;