Lines Matching defs:cdev
210 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
243 csk->cdev->ports[csk->port_id],
268 csk->cdev->ports[csk->port_id],
296 csk->cdev->ports[csk->port_id],
322 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
329 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
372 csk->cdev->ports[csk->port_id],
395 csk->cdev->ports[csk->port_id],
420 csk->cdev->ports[csk->port_id],
436 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
470 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
478 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
505 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
522 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
552 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
630 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
648 if (csk->cdev->skb_iso_txhdr)
672 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
716 struct cxgbi_device *cdev = csk->cdev;
717 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
874 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
881 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
890 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
897 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
903 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
917 module_put(cdev->owner);
991 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
1042 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1050 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1066 module_put(cdev->owner);
1091 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
1096 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1112 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1117 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1151 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1156 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1199 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1204 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1223 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1228 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1244 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1250 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1349 static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1353 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1444 static void do_rx_data_ddp(struct cxgbi_device *cdev,
1451 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1511 do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
1515 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1602 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1607 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1623 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1627 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1688 struct net_device *ndev = csk->cdev->ports[csk->port_id];
1712 lldi = cxgbi_cdev_priv(csk->cdev);
1767 struct cxgbi_device *cdev = csk->cdev;
1768 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1769 struct net_device *ndev = cdev->ports[csk->port_id];
1865 rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
1866 cdev->rxq_idx_cntr++;
1896 if (!try_module_get(cdev->owner)) {
1943 static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1950 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1955 cdev->csk_release_offload_resources = release_offload_resources;
1956 cdev->csk_push_tx_frames = push_tx_frames;
1957 cdev->csk_send_abort_req = send_abort_req;
1958 cdev->csk_send_close_req = send_close_req;
1959 cdev->csk_send_rx_credits = send_rx_credits;
1960 cdev->csk_alloc_cpls = alloc_cpls;
1961 cdev->csk_init_act_open = init_act_open;
1963 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1968 ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
1974 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1992 ddp_ppod_init_idata(struct cxgbi_device *cdev,
2009 ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
2021 struct cxgbi_device *cdev = csk->cdev;
2022 struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
2103 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2139 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2145 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2148 (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
2151 static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
2153 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
2154 struct net_device *ndev = cdev->ports[0];
2163 cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
2174 err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat,
2183 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
2184 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
2185 cdev->csk_ddp_set_map = ddp_set_map;
2186 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2188 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2190 cdev->cdev2ppm = cdev2ppm;
2212 struct cxgbi_device *cdev;
2221 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
2222 if (!cdev) {
2227 cdev, lldi->adapter_type, lldi->nports,
2233 cdev, i, lldi->rxq_ids[i]);
2235 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
2236 cdev->flags = CXGBI_FLAG_DEV_T4;
2237 cdev->pdev = lldi->pdev;
2238 cdev->ports = lldi->ports;
2239 cdev->nports = lldi->nports;
2240 cdev->mtus = lldi->mtus;
2241 cdev->nmtus = NMTUS;
2242 cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
2244 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
2245 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
2246 cdev->itp = &cxgb4i_iscsi_transport;
2247 cdev->owner = THIS_MODULE;
2249 cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf);
2250 pr_info("cdev 0x%p,%s, pfvf %u.\n",
2251 cdev, lldi->ports[0]->name, cdev->pfvf);
2253 rc = cxgb4i_ddp_init(cdev);
2255 pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
2259 ndev = cdev->ports[0];
2267 cdev->flags |= CXGBI_FLAG_DEV_ISO_OFF;
2271 ndev->name, cdev, t->ntids);
2274 pr_info("%s, 0x%p, NO adapter struct.\n", ndev->name, cdev);
2280 !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF))
2281 cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso);
2283 rc = cxgb4i_ofld_init(cdev);
2285 pr_info("t4 0x%p ofld init failed.\n", cdev);
2290 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn,
2295 for (i = 0; i < cdev->nports; i++) {
2297 cdev->hbas[i]->port_id = pi->port_id;
2299 return cdev;
2302 cxgbi_device_unregister(cdev);
2313 struct cxgbi_device *cdev = handle;
2338 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
2339 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
2344 cxgb4i_cplhandlers[opc](cdev, skb);
2354 struct cxgbi_device *cdev = handle;
2358 pr_info("cdev 0x%p, UP.\n", cdev);
2361 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
2365 pr_info("cdev 0x%p, DOWN.\n", cdev);
2368 pr_info("cdev 0x%p, DETACH.\n", cdev);
2369 cxgbi_device_unregister(cdev);
2372 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
2385 struct cxgbi_device *cdev = NULL;
2418 cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
2421 if (!cdev)
2424 pmap = &cdev->pmap;