Lines Matching defs:cdev

211 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
244 csk->cdev->ports[csk->port_id],
269 csk->cdev->ports[csk->port_id],
297 csk->cdev->ports[csk->port_id],
323 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
330 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
373 csk->cdev->ports[csk->port_id],
396 csk->cdev->ports[csk->port_id],
421 csk->cdev->ports[csk->port_id],
437 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
471 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
479 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
506 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
523 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
553 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
631 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
649 if (csk->cdev->skb_iso_txhdr)
673 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
717 struct cxgbi_device *cdev = csk->cdev;
718 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
875 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
882 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
891 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
898 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
904 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
918 module_put(cdev->owner);
992 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
1043 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1051 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1067 module_put(cdev->owner);
1092 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
1097 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1113 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1118 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1152 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1157 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1200 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1205 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1224 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1229 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1245 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1251 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1350 static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1354 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1445 static void do_rx_data_ddp(struct cxgbi_device *cdev,
1452 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1512 do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
1516 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1603 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1608 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1624 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1628 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1689 struct net_device *ndev = csk->cdev->ports[csk->port_id];
1713 lldi = cxgbi_cdev_priv(csk->cdev);
1768 struct cxgbi_device *cdev = csk->cdev;
1769 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1770 struct net_device *ndev = cdev->ports[csk->port_id];
1866 rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
1867 cdev->rxq_idx_cntr++;
1897 if (!try_module_get(cdev->owner)) {
1944 static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1951 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1956 cdev->csk_release_offload_resources = release_offload_resources;
1957 cdev->csk_push_tx_frames = push_tx_frames;
1958 cdev->csk_send_abort_req = send_abort_req;
1959 cdev->csk_send_close_req = send_close_req;
1960 cdev->csk_send_rx_credits = send_rx_credits;
1961 cdev->csk_alloc_cpls = alloc_cpls;
1962 cdev->csk_init_act_open = init_act_open;
1964 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1969 ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
1975 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1993 ddp_ppod_init_idata(struct cxgbi_device *cdev,
2010 ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
2022 struct cxgbi_device *cdev = csk->cdev;
2023 struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
2104 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2140 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2146 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2149 (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
2152 static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
2154 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
2155 struct net_device *ndev = cdev->ports[0];
2164 cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
2175 err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat,
2184 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
2185 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
2186 cdev->csk_ddp_set_map = ddp_set_map;
2187 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2189 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2191 cdev->cdev2ppm = cdev2ppm;
2213 struct cxgbi_device *cdev;
2222 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
2223 if (!cdev) {
2228 cdev, lldi->adapter_type, lldi->nports,
2234 cdev, i, lldi->rxq_ids[i]);
2236 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
2237 cdev->flags = CXGBI_FLAG_DEV_T4;
2238 cdev->pdev = lldi->pdev;
2239 cdev->ports = lldi->ports;
2240 cdev->nports = lldi->nports;
2241 cdev->mtus = lldi->mtus;
2242 cdev->nmtus = NMTUS;
2243 cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
2245 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
2246 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
2247 cdev->itp = &cxgb4i_iscsi_transport;
2248 cdev->owner = THIS_MODULE;
2250 cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf);
2251 pr_info("cdev 0x%p,%s, pfvf %u.\n",
2252 cdev, lldi->ports[0]->name, cdev->pfvf);
2254 rc = cxgb4i_ddp_init(cdev);
2256 pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
2260 ndev = cdev->ports[0];
2268 cdev->flags |= CXGBI_FLAG_DEV_ISO_OFF;
2272 ndev->name, cdev, t->ntids);
2275 pr_info("%s, 0x%p, NO adapter struct.\n", ndev->name, cdev);
2281 !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF))
2282 cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso);
2284 rc = cxgb4i_ofld_init(cdev);
2286 pr_info("t4 0x%p ofld init failed.\n", cdev);
2291 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn,
2296 for (i = 0; i < cdev->nports; i++) {
2298 cdev->hbas[i]->port_id = pi->port_id;
2300 return cdev;
2303 cxgbi_device_unregister(cdev);
2314 struct cxgbi_device *cdev = handle;
2339 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
2340 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
2345 cxgb4i_cplhandlers[opc](cdev, skb);
2355 struct cxgbi_device *cdev = handle;
2359 pr_info("cdev 0x%p, UP.\n", cdev);
2362 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
2366 pr_info("cdev 0x%p, DOWN.\n", cdev);
2369 pr_info("cdev 0x%p, DETACH.\n", cdev);
2370 cxgbi_device_unregister(cdev);
2373 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
2386 struct cxgbi_device *cdev = NULL;
2419 cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
2422 if (!cdev)
2425 pmap = &cdev->pmap;