Lines Matching refs:dev

109 	struct cnic_dev *dev;
118 dev = udev->dev;
120 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
127 cnic_shutdown_rings(dev);
128 cnic_init_rings(dev);
142 static inline void cnic_hold(struct cnic_dev *dev)
144 atomic_inc(&dev->ref_count);
147 static inline void cnic_put(struct cnic_dev *dev)
149 atomic_dec(&dev->ref_count);
188 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
190 struct cnic_local *cp = dev->cnic_priv;
200 ethdev->drv_ctl(dev->netdev, &info);
203 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
205 struct cnic_local *cp = dev->cnic_priv;
214 ethdev->drv_ctl(dev->netdev, &info);
217 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
219 struct cnic_local *cp = dev->cnic_priv;
232 ethdev->drv_ctl(dev->netdev, &info);
235 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
237 struct cnic_local *cp = dev->cnic_priv;
246 ethdev->drv_ctl(dev->netdev, &info);
249 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
251 struct cnic_local *cp = dev->cnic_priv;
259 ethdev->drv_ctl(dev->netdev, &info);
263 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
265 struct cnic_local *cp = dev->cnic_priv;
274 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
275 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
282 ethdev->drv_ctl(dev->netdev, &info);
290 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
292 struct cnic_local *cp = dev->cnic_priv;
299 ethdev->drv_ctl(dev->netdev, &info);
372 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
388 cp = dev->cnic_priv;
475 struct cnic_dev *dev;
490 list_for_each_entry(dev, &cnic_dev_list, list) {
491 struct cnic_local *cp = dev->cnic_priv;
503 list_for_each_entry(dev, &cnic_dev_list, list) {
504 struct cnic_local *cp = dev->cnic_priv;
507 ulp_ops->cnic_init(dev);
516 struct cnic_dev *dev;
532 list_for_each_entry(dev, &cnic_dev_list, list) {
533 struct cnic_local *cp = dev->cnic_priv;
566 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
569 struct cnic_local *cp = dev->cnic_priv;
594 cnic_hold(dev);
596 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
602 cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
609 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
611 struct cnic_local *cp = dev->cnic_priv;
625 cnic_put(dev);
635 dev->fcoe_cap = NULL;
645 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
648 cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
650 cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
732 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
741 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
747 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
756 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
770 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
784 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
788 struct cnic_local *cp = dev->cnic_priv;
799 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
811 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
816 cp->setup_pgtbl(dev, dma);
821 cnic_free_dma(dev, dma);
825 static void cnic_free_context(struct cnic_dev *dev)
827 struct cnic_local *cp = dev->cnic_priv;
832 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
843 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
849 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
877 static void cnic_free_resc(struct cnic_dev *dev)
879 struct cnic_local *cp = dev->cnic_priv;
883 udev->dev = NULL;
889 cnic_free_context(dev);
894 cnic_free_dma(dev, &cp->gbl_buf_info);
895 cnic_free_dma(dev, &cp->kwq_info);
896 cnic_free_dma(dev, &cp->kwq_16_data_info);
897 cnic_free_dma(dev, &cp->kcq2.dma);
898 cnic_free_dma(dev, &cp->kcq1.dma);
908 static int cnic_alloc_context(struct cnic_dev *dev)
910 struct cnic_local *cp = dev->cnic_priv;
932 reg = cnic_reg_rd_ind(dev, off);
947 dma_alloc_coherent(&dev->pcidev->dev,
984 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
993 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
1023 struct cnic_local *cp = udev->dev->cnic_priv;
1029 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1036 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1047 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1049 struct cnic_local *cp = dev->cnic_priv;
1053 if (udev->pdev == dev->pcidev) {
1054 udev->dev = dev;
1056 udev->dev = NULL;
1070 udev->dev = dev;
1071 udev->pdev = dev->pcidev;
1089 static int cnic_init_uio(struct cnic_dev *dev)
1091 struct cnic_local *cp = dev->cnic_priv;
1101 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1102 uinfo->mem[0].internal_addr = dev->regview;
1105 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1116 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1117 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1146 ret = uio_register_device(&udev->pdev->dev, uinfo);
1149 cnic_init_rings(dev);
1155 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1157 struct cnic_local *cp = dev->cnic_priv;
1160 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1165 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1169 ret = cnic_alloc_context(dev);
1173 ret = cnic_alloc_uio_rings(dev, 2);
1177 ret = cnic_init_uio(dev);
1184 cnic_free_resc(dev);
1188 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1190 struct cnic_local *cp = dev->cnic_priv;
1191 struct bnx2x *bp = netdev_priv(dev->netdev);
1218 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1226 cnic_free_context(dev);
1236 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1238 struct cnic_local *cp = dev->cnic_priv;
1239 struct bnx2x *bp = netdev_priv(dev->netdev);
1250 cp->max_cid_space += dev->max_fcoe_conn;
1277 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1293 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1298 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1304 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1308 ret = cnic_alloc_bnx2x_context(dev);
1319 ret = cnic_alloc_uio_rings(dev, 4);
1323 ret = cnic_init_uio(dev);
1330 cnic_free_resc(dev);
1340 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1343 struct cnic_local *cp = dev->cnic_priv;
1347 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1369 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1387 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1390 struct cnic_local *cp = dev->cnic_priv;
1391 struct bnx2x *bp = netdev_priv(dev->netdev);
1413 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1422 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1425 struct cnic_local *cp = dev->cnic_priv;
1437 static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1440 struct bnx2x *bp = netdev_priv(dev->netdev);
1451 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1454 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1458 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1460 struct cnic_local *cp = dev->cnic_priv;
1461 struct bnx2x *bp = netdev_priv(dev->netdev);
1477 if (!dev->max_iscsi_conn)
1481 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1485 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1487 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1492 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1495 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1497 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1499 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1502 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1504 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1506 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1510 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1512 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1514 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1517 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1519 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1521 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1525 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1527 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1529 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1532 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1534 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1537 cnic_bnx2x_set_tcp_options(dev,
1544 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1547 struct bnx2x *bp = netdev_priv(dev->netdev);
1553 if (!dev->max_iscsi_conn) {
1559 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1561 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1565 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1567 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1569 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1573 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1581 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1586 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1588 struct cnic_local *cp = dev->cnic_priv;
1594 cnic_free_dma(dev, &iscsi->hq_info);
1595 cnic_free_dma(dev, &iscsi->r2tq_info);
1596 cnic_free_dma(dev, &iscsi->task_array_info);
1605 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1609 struct cnic_local *cp = dev->cnic_priv;
1632 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1637 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1642 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1649 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1653 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1656 struct cnic_local *cp = dev->cnic_priv;
1683 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1686 struct cnic_local *cp = dev->cnic_priv;
1687 struct bnx2x *bp = netdev_priv(dev->netdev);
1708 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1870 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1875 struct cnic_local *cp = dev->cnic_priv;
1876 struct bnx2x *bp = netdev_priv(dev->netdev);
1912 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1916 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1921 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1923 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1933 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1938 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1940 struct cnic_local *cp = dev->cnic_priv;
1957 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1962 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1964 struct cnic_local *cp = dev->cnic_priv;
1965 struct bnx2x *bp = netdev_priv(dev->netdev);
1976 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1988 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1990 struct cnic_local *cp = dev->cnic_priv;
2013 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2016 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2031 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2036 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2060 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2080 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2082 struct bnx2x *bp = netdev_priv(dev->netdev);
2084 u8 *mac = dev->mac_addr;
2086 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2088 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2090 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2092 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2094 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2096 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2099 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2101 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2104 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2106 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2109 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2111 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2116 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2119 struct cnic_local *cp = dev->cnic_priv;
2120 struct bnx2x *bp = netdev_priv(dev->netdev);
2148 netdev_err(dev->netdev, "conn_buf size too big\n");
2186 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2188 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2191 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2199 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2206 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2211 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2218 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2222 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2233 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2237 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2248 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2252 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2257 struct cnic_local *cp = dev->cnic_priv;
2258 struct bnx2x *bp = netdev_priv(dev->netdev);
2272 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2277 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2281 struct cnic_local *cp = dev->cnic_priv;
2282 struct bnx2x *bp = netdev_priv(dev->netdev);
2307 netdev_err(dev->netdev, "fcoe_init size too big\n");
2328 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2334 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2339 struct cnic_local *cp = dev->cnic_priv;
2340 struct bnx2x *bp = netdev_priv(dev->netdev);
2365 if (l5_cid >= dev->max_fcoe_conn)
2374 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2381 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2394 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2408 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2417 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2425 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2429 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2436 struct cnic_local *cp = dev->cnic_priv;
2443 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2452 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2457 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2464 struct cnic_local *cp = dev->cnic_priv;
2469 if (l5_cid >= dev->max_fcoe_conn)
2475 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2484 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2489 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2495 struct cnic_local *cp = dev->cnic_priv;
2503 if (l5_cid >= dev->max_fcoe_conn)
2516 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2532 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2536 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2538 struct cnic_local *cp = dev->cnic_priv;
2555 netdev_warn(dev->netdev, "CID %x not deleted\n",
2560 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2563 struct cnic_local *cp = dev->cnic_priv;
2564 struct bnx2x *bp = netdev_priv(dev->netdev);
2568 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2573 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2578 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2580 struct cnic_local *cp = dev->cnic_priv;
2648 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2651 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2658 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2668 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2671 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2674 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2678 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2681 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2684 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2688 ret = cnic_bnx2x_close(dev, kwqe);
2691 ret = cnic_bnx2x_reset(dev, kwqe);
2694 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2697 ret = cnic_bnx2x_update_pg(dev, kwqe);
2704 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2709 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2717 cnic_bnx2x_kwqe_err(dev, kwqe);
2724 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2727 struct bnx2x *bp = netdev_priv(dev->netdev);
2732 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2745 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2749 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2753 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2756 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2759 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2762 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2765 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2769 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2774 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2782 cnic_bnx2x_kwqe_err(dev, kwqe);
2789 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2795 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2806 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2810 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2824 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2826 struct cnic_local *cp = dev->cnic_priv;
2862 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2880 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2883 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2885 struct cnic_local *cp = dev->cnic_priv;
2919 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2968 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2970 struct cnic_local *cp = dev->cnic_priv;
2978 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2980 service_kcqes(dev, kcqe_cnt);
2990 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2999 struct cnic_dev *dev = data;
3001 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3007 return cnic_service_bnx2_queues(dev);
3013 struct cnic_dev *dev = cp->dev;
3015 cp->last_status_idx = cnic_service_bnx2_queues(dev);
3017 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3021 static void cnic_doirq(struct cnic_dev *dev)
3023 struct cnic_local *cp = dev->cnic_priv;
3025 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3037 struct cnic_dev *dev = dev_instance;
3038 struct cnic_local *cp = dev->cnic_priv;
3041 cp->ack_int(dev);
3043 cnic_doirq(dev);
3048 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3051 struct bnx2x *bp = netdev_priv(dev->netdev);
3063 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3066 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3079 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3082 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3084 struct cnic_local *cp = dev->cnic_priv;
3086 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3090 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3092 struct cnic_local *cp = dev->cnic_priv;
3094 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3098 static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3100 struct cnic_local *cp = dev->cnic_priv;
3102 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3106 static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3108 struct cnic_local *cp = dev->cnic_priv;
3110 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3114 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3121 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3123 service_kcqes(dev, kcqe_cnt);
3138 struct cnic_dev *dev = cp->dev;
3139 struct bnx2x *bp = netdev_priv(dev->netdev);
3142 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3146 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3148 CNIC_WR16(dev, cp->kcq1.io_addr,
3152 cp->arm_int(dev, status_idx);
3156 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3161 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3164 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3173 struct cnic_dev *dev = data;
3174 struct cnic_local *cp = dev->cnic_priv;
3177 cnic_doirq(dev);
3207 static void cnic_ulp_stop(struct cnic_dev *dev)
3209 struct cnic_local *cp = dev->cnic_priv;
3216 static void cnic_ulp_start(struct cnic_dev *dev)
3218 struct cnic_local *cp = dev->cnic_priv;
3241 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3243 struct cnic_local *cp = dev->cnic_priv;
3260 struct cnic_dev *dev = data;
3265 cnic_hold(dev);
3267 cnic_ulp_stop(dev);
3268 cnic_stop_hw(dev);
3270 cnic_put(dev);
3273 cnic_hold(dev);
3275 if (!cnic_start_hw(dev))
3276 cnic_ulp_start(dev);
3278 cnic_put(dev);
3281 struct cnic_local *cp = dev->cnic_priv;
3290 struct cnic_local *cp = dev->cnic_priv;
3292 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3300 netdev_err(dev->netdev,
3314 cnic_hold(dev);
3315 cnic_copy_ulp_stats(dev, ulp_type);
3316 cnic_put(dev);
3325 static void cnic_ulp_init(struct cnic_dev *dev)
3328 struct cnic_local *cp = dev->cnic_priv;
3343 ulp_ops->cnic_init(dev);
3349 static void cnic_ulp_exit(struct cnic_dev *dev)
3352 struct cnic_local *cp = dev->cnic_priv;
3367 ulp_ops->cnic_exit(dev);
3375 struct cnic_dev *dev = csk->dev;
3395 l4kwqe->sa0 = dev->mac_addr[0];
3396 l4kwqe->sa1 = dev->mac_addr[1];
3397 l4kwqe->sa2 = dev->mac_addr[2];
3398 l4kwqe->sa3 = dev->mac_addr[3];
3399 l4kwqe->sa4 = dev->mac_addr[4];
3400 l4kwqe->sa5 = dev->mac_addr[5];
3412 return dev->submit_kwqes(dev, wqes, 1);
3417 struct cnic_dev *dev = csk->dev;
3440 return dev->submit_kwqes(dev, wqes, 1);
3445 struct cnic_dev *dev = csk->dev;
3458 return dev->submit_kwqes(dev, wqes, 1);
3463 struct cnic_dev *dev = csk->dev;
3542 return dev->submit_kwqes(dev, wqes, num_wqes);
3547 struct cnic_dev *dev = csk->dev;
3559 return dev->submit_kwqes(dev, wqes, 1);
3564 struct cnic_dev *dev = csk->dev;
3576 return dev->submit_kwqes(dev, wqes, 1);
3579 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3582 struct cnic_local *cp = dev->cnic_priv;
3602 csk1->dev = dev;
3626 struct cnic_dev *dev = csk->dev;
3627 struct cnic_local *cp = dev->cnic_priv;
3660 static inline u16 cnic_get_vlan(struct net_device *dev,
3663 if (is_vlan_dev(dev)) {
3664 *vlan_dev = vlan_dev_real_dev(dev);
3665 return vlan_dev_vlan_id(dev);
3667 *vlan_dev = dev;
3714 struct cnic_dev *dev = NULL;
3732 if (!dst->dev)
3735 cnic_get_vlan(dst->dev, &netdev);
3737 dev = cnic_from_netdev(netdev);
3741 if (dev)
3742 cnic_put(dev);
3743 return dev;
3748 struct cnic_dev *dev = csk->dev;
3749 struct cnic_local *cp = dev->cnic_priv;
3756 struct cnic_dev *dev = csk->dev;
3757 struct cnic_local *cp = dev->cnic_priv;
3793 csk->mtu = dev->netdev->mtu;
3794 if (dst && dst->dev) {
3795 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3796 if (realdev == dev->netdev) {
3834 struct cnic_local *cp = csk->dev->cnic_priv;
3863 struct cnic_local *cp = csk->dev->cnic_priv;
3940 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3942 struct cnic_local *cp = dev->cnic_priv;
3971 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3973 struct cnic_local *cp = dev->cnic_priv;
3983 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3985 struct cnic_local *cp = dev->cnic_priv;
3992 cnic_process_fcoe_term_conn(dev, kcqe);
3997 cnic_cm_process_offld_pg(dev, l4kcqe);
4041 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4059 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4072 struct cnic_dev *dev = data;
4076 cnic_cm_process_kcqe(dev, kcqe[i]);
4083 static void cnic_cm_free_mem(struct cnic_dev *dev)
4085 struct cnic_local *cp = dev->cnic_priv;
4092 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4094 struct cnic_local *cp = dev->cnic_priv;
4109 cnic_cm_free_mem(dev);
4143 struct cnic_dev *dev = csk->dev;
4144 struct cnic_local *cp = dev->cnic_priv;
4157 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4161 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4166 cnic_ctx_wr(dev, 45, 0, seed);
4172 struct cnic_dev *dev = csk->dev;
4173 struct cnic_local *cp = dev->cnic_priv;
4202 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4211 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4213 struct cnic_local *cp = dev->cnic_priv;
4218 if (!netif_running(dev->netdev))
4221 cnic_bnx2x_delete_wait(dev, 0);
4227 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4231 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4233 struct bnx2x *bp = netdev_priv(dev->netdev);
4237 cnic_init_bnx2x_mac(dev);
4238 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4240 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4243 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4245 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4249 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4251 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4253 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4255 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4258 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4266 struct cnic_dev *dev;
4271 dev = cp->dev;
4280 cp->ethdev->drv_ctl(dev->netdev, &info);
4299 err = cnic_bnx2x_destroy_ramrod(dev, i);
4301 cnic_free_bnx2x_conn_resc(dev, i);
4316 static int cnic_cm_open(struct cnic_dev *dev)
4318 struct cnic_local *cp = dev->cnic_priv;
4321 err = cnic_cm_alloc_mem(dev);
4325 err = cp->start_cm(dev);
4332 dev->cm_create = cnic_cm_create;
4333 dev->cm_destroy = cnic_cm_destroy;
4334 dev->cm_connect = cnic_cm_connect;
4335 dev->cm_abort = cnic_cm_abort;
4336 dev->cm_close = cnic_cm_close;
4337 dev->cm_select_dev = cnic_cm_select_dev;
4339 cp->ulp_handle[CNIC_ULP_L4] = dev;
4344 cnic_cm_free_mem(dev);
4348 static int cnic_cm_shutdown(struct cnic_dev *dev)
4350 struct cnic_local *cp = dev->cnic_priv;
4362 cnic_cm_free_mem(dev);
4367 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4375 cnic_ctx_wr(dev, cid_addr, i, 0);
4378 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4380 struct cnic_local *cp = dev->cnic_priv;
4394 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4396 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4398 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4402 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4415 static void cnic_free_irq(struct cnic_dev *dev)
4417 struct cnic_local *cp = dev->cnic_priv;
4421 cp->disable_int_sync(dev);
4423 free_irq(ethdev->irq_arr[0].vector, dev);
4427 static int cnic_request_irq(struct cnic_dev *dev)
4429 struct cnic_local *cp = dev->cnic_priv;
4433 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4440 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4442 struct cnic_local *cp = dev->cnic_priv;
4451 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4453 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4454 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4455 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4459 err = cnic_request_irq(dev);
4465 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4472 cnic_free_irq(dev);
4478 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4482 CNIC_WR(dev, BNX2_HC_COMMAND,
4495 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4499 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4501 struct cnic_local *cp = dev->cnic_priv;
4507 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4511 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4513 struct cnic_local *cp = dev->cnic_priv;
4519 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4521 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4525 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4527 struct cnic_local *cp = dev->cnic_priv;
4544 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4555 cnic_ctx_wr(dev, cid_addr2, i, 0);
4562 cnic_init_context(dev, tx_cid);
4563 cnic_init_context(dev, tx_cid + 1);
4571 cnic_ctx_wr(dev, cid_addr, offset0, val);
4574 cnic_ctx_wr(dev, cid_addr, offset1, val);
4584 cnic_ctx_wr(dev, cid_addr, offset2, val);
4588 cnic_ctx_wr(dev, cid_addr, offset3, val);
4592 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4594 struct cnic_local *cp = dev->cnic_priv;
4604 cnic_init_context(dev, 2);
4607 coal_val = CNIC_RD(dev, coal_reg);
4617 CNIC_WR(dev, coal_reg, coal_val);
4627 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4633 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4647 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4651 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4654 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4655 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4658 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4667 dev->submit_kwqes(dev, wqes, 1);
4670 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4672 struct cnic_local *cp = dev->cnic_priv;
4677 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4679 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4681 dev->mac_addr[0] = (u8) (val >> 8);
4682 dev->mac_addr[1] = (u8) val;
4684 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4686 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4688 dev->mac_addr[2] = (u8) (val >> 24);
4689 dev->mac_addr[3] = (u8) (val >> 16);
4690 dev->mac_addr[4] = (u8) (val >> 8);
4691 dev->mac_addr[5] = (u8) val;
4693 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4699 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4700 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4701 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4704 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4706 struct cnic_local *cp = dev->cnic_priv;
4712 cnic_set_bnx2_mac(dev);
4714 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4721 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4723 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4724 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4725 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4727 err = cnic_setup_5709_context(dev, 1);
4731 cnic_init_context(dev, KWQ_CID);
4732 cnic_init_context(dev, KCQ_CID);
4750 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4753 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4756 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4759 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4762 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4776 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4779 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4782 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4785 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4788 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4801 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4802 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4807 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4812 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4813 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4816 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4821 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4822 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4824 cnic_init_bnx2_tx_ring(dev);
4825 cnic_init_bnx2_rx_ring(dev);
4827 err = cnic_init_bnx2_irq(dev);
4829 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4830 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4831 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4840 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4842 struct cnic_local *cp = dev->cnic_priv;
4857 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4861 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4863 struct cnic_local *cp = dev->cnic_priv;
4869 err = cnic_request_irq(dev);
4874 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4878 struct bnx2x *bp = netdev_priv(dev->netdev);
4885 u16 flags = CNIC_RD16(dev, addr);
4890 CNIC_WR16(dev, addr, flags);
4893 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4895 struct cnic_local *cp = dev->cnic_priv;
4896 struct bnx2x *bp = netdev_priv(dev->netdev);
4899 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4904 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4907 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4911 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4914 struct cnic_local *cp = dev->cnic_priv;
4915 struct bnx2x *bp = netdev_priv(dev->netdev);
4977 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4980 struct cnic_local *cp = dev->cnic_priv;
4981 struct bnx2x *bp = netdev_priv(dev->netdev);
5045 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5047 struct cnic_local *cp = dev->cnic_priv;
5048 struct bnx2x *bp = netdev_priv(dev->netdev);
5084 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5086 struct cnic_local *cp = dev->cnic_priv;
5087 struct bnx2x *bp = netdev_priv(dev->netdev);
5092 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5104 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5113 cnic_init_bnx2x_kcq(dev);
5116 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5117 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5119 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5122 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5125 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5128 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5131 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5133 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5135 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5139 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5142 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5146 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5149 cnic_setup_bnx2x_context(dev);
5151 ret = cnic_init_bnx2x_irq(dev);
5159 static void cnic_init_rings(struct cnic_dev *dev)
5161 struct cnic_local *cp = dev->cnic_priv;
5162 struct bnx2x *bp = netdev_priv(dev->netdev);
5168 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5169 cnic_init_bnx2_tx_ring(dev);
5170 cnic_init_bnx2_rx_ring(dev);
5172 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5193 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5202 cnic_init_bnx2x_tx_ring(dev, data);
5203 cnic_init_bnx2x_rx_ring(dev, data);
5212 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5221 netdev_err(dev->netdev,
5223 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5224 cnic_ring_ctl(dev, cid, cli, 1);
5231 static void cnic_shutdown_rings(struct cnic_dev *dev)
5233 struct cnic_local *cp = dev->cnic_priv;
5240 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5241 cnic_shutdown_bnx2_rx_ring(dev);
5242 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5248 cnic_ring_ctl(dev, cid, cli, 0);
5254 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5262 netdev_err(dev->netdev,
5264 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5267 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5276 static int cnic_register_netdev(struct cnic_dev *dev)
5278 struct cnic_local *cp = dev->cnic_priv;
5288 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5290 netdev_err(dev->netdev, "register_cnic failed\n");
5295 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5297 dev->max_iscsi_conn = 0;
5302 static void cnic_unregister_netdev(struct cnic_dev *dev)
5304 struct cnic_local *cp = dev->cnic_priv;
5310 ethdev->drv_unregister_cnic(dev->netdev);
5313 static int cnic_start_hw(struct cnic_dev *dev)
5315 struct cnic_local *cp = dev->cnic_priv;
5319 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5322 dev->regview = ethdev->io_base;
5323 pci_dev_get(dev->pcidev);
5324 cp->func = PCI_FUNC(dev->pcidev->devfn);
5328 err = cp->alloc_resc(dev);
5330 netdev_err(dev->netdev, "allocate resource failure\n");
5334 err = cp->start_hw(dev);
5338 err = cnic_cm_open(dev);
5342 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5344 cp->enable_int(dev);
5350 cp->stop_hw(dev);
5352 cp->free_resc(dev);
5353 pci_dev_put(dev->pcidev);
5357 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5359 cnic_disable_bnx2_int_sync(dev);
5361 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5362 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5364 cnic_init_context(dev, KWQ_CID);
5365 cnic_init_context(dev, KCQ_CID);
5367 cnic_setup_5709_context(dev, 0);
5368 cnic_free_irq(dev);
5370 cnic_free_resc(dev);
5374 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5376 struct cnic_local *cp = dev->cnic_priv;
5377 struct bnx2x *bp = netdev_priv(dev->netdev);
5382 cnic_free_irq(dev);
5395 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5396 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5400 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5402 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5403 cnic_free_resc(dev);
5406 static void cnic_stop_hw(struct cnic_dev *dev)
5408 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5409 struct cnic_local *cp = dev->cnic_priv;
5419 cnic_shutdown_rings(dev);
5420 cp->stop_cm(dev);
5422 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5425 cnic_cm_shutdown(dev);
5426 cp->stop_hw(dev);
5427 pci_dev_put(dev->pcidev);
5431 static void cnic_free_dev(struct cnic_dev *dev)
5435 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5439 if (atomic_read(&dev->ref_count) != 0)
5440 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5442 netdev_info(dev->netdev, "Removed CNIC device\n");
5443 dev_put(dev->netdev);
5444 kfree(dev);
5447 static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5450 struct cnic_local *cp = dev->cnic_priv;
5451 struct bnx2x *bp = netdev_priv(dev->netdev);
5454 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5460 ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5464 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5477 cdev->netdev = dev;
5486 cp->dev = cdev;
5492 netdev_info(dev, "Added CNIC device\n");
5497 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5502 struct bnx2 *bp = netdev_priv(dev);
5506 ethdev = (bp->cnic_probe)(dev);
5515 dev_hold(dev);
5525 cdev = cnic_alloc_dev(dev, pdev);
5553 dev_put(dev);
5557 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5562 struct bnx2x *bp = netdev_priv(dev);
5566 ethdev = bp->cnic_probe(dev);
5575 dev_hold(dev);
5576 cdev = cnic_alloc_dev(dev, pdev);
5578 dev_put(dev);
5625 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5630 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5632 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5635 cdev = init_bnx2_cnic(dev);
5637 cdev = init_bnx2x_cnic(dev);
5680 struct cnic_dev *dev;
5683 dev = cnic_from_netdev(netdev);
5685 if (!dev && event == NETDEV_REGISTER) {
5687 dev = is_cnic_dev(netdev);
5688 if (dev) {
5690 cnic_hold(dev);
5693 if (dev) {
5694 struct cnic_local *cp = dev->cnic_priv;
5697 cnic_ulp_init(dev);
5699 cnic_ulp_exit(dev);
5702 if (cnic_register_netdev(dev) != 0) {
5703 cnic_put(dev);
5706 if (!cnic_start_hw(dev))
5707 cnic_ulp_start(dev);
5713 cnic_ulp_stop(dev);
5714 cnic_stop_hw(dev);
5715 cnic_unregister_netdev(dev);
5718 list_del_init(&dev->list);
5721 cnic_put(dev);
5722 cnic_free_dev(dev);
5725 cnic_put(dev);
5732 dev = cnic_from_netdev(realdev);
5733 if (dev) {
5735 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5736 cnic_put(dev);