Lines Matching refs:dev

109 	struct cnic_dev *dev;
118 dev = udev->dev;
120 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
127 cnic_shutdown_rings(dev);
128 cnic_init_rings(dev);
142 static inline void cnic_hold(struct cnic_dev *dev)
144 atomic_inc(&dev->ref_count);
147 static inline void cnic_put(struct cnic_dev *dev)
149 atomic_dec(&dev->ref_count);
188 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
190 struct cnic_local *cp = dev->cnic_priv;
200 ethdev->drv_ctl(dev->netdev, &info);
203 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
205 struct cnic_local *cp = dev->cnic_priv;
214 ethdev->drv_ctl(dev->netdev, &info);
217 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
219 struct cnic_local *cp = dev->cnic_priv;
232 ethdev->drv_ctl(dev->netdev, &info);
235 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
237 struct cnic_local *cp = dev->cnic_priv;
246 ethdev->drv_ctl(dev->netdev, &info);
249 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
251 struct cnic_local *cp = dev->cnic_priv;
259 ethdev->drv_ctl(dev->netdev, &info);
263 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
265 struct cnic_local *cp = dev->cnic_priv;
274 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
275 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
282 ethdev->drv_ctl(dev->netdev, &info);
290 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
292 struct cnic_local *cp = dev->cnic_priv;
299 ethdev->drv_ctl(dev->netdev, &info);
372 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
388 cp = dev->cnic_priv;
475 struct cnic_dev *dev;
490 list_for_each_entry(dev, &cnic_dev_list, list) {
491 struct cnic_local *cp = dev->cnic_priv;
503 list_for_each_entry(dev, &cnic_dev_list, list) {
504 struct cnic_local *cp = dev->cnic_priv;
507 ulp_ops->cnic_init(dev);
516 struct cnic_dev *dev;
532 list_for_each_entry(dev, &cnic_dev_list, list) {
533 struct cnic_local *cp = dev->cnic_priv;
566 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
569 struct cnic_local *cp = dev->cnic_priv;
594 cnic_hold(dev);
596 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
602 cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
609 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
611 struct cnic_local *cp = dev->cnic_priv;
625 cnic_put(dev);
635 dev->fcoe_cap = NULL;
645 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
648 cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
650 cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
732 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
741 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
747 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
756 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
770 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
784 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
788 struct cnic_local *cp = dev->cnic_priv;
799 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
811 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
816 cp->setup_pgtbl(dev, dma);
821 cnic_free_dma(dev, dma);
825 static void cnic_free_context(struct cnic_dev *dev)
827 struct cnic_local *cp = dev->cnic_priv;
832 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
843 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
849 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
877 static void cnic_free_resc(struct cnic_dev *dev)
879 struct cnic_local *cp = dev->cnic_priv;
883 udev->dev = NULL;
889 cnic_free_context(dev);
894 cnic_free_dma(dev, &cp->gbl_buf_info);
895 cnic_free_dma(dev, &cp->kwq_info);
896 cnic_free_dma(dev, &cp->kwq_16_data_info);
897 cnic_free_dma(dev, &cp->kcq2.dma);
898 cnic_free_dma(dev, &cp->kcq1.dma);
908 static int cnic_alloc_context(struct cnic_dev *dev)
910 struct cnic_local *cp = dev->cnic_priv;
932 reg = cnic_reg_rd_ind(dev, off);
947 dma_alloc_coherent(&dev->pcidev->dev,
984 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
993 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
1023 struct cnic_local *cp = udev->dev->cnic_priv;
1029 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1037 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1049 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1051 struct cnic_local *cp = dev->cnic_priv;
1055 if (udev->pdev == dev->pcidev) {
1056 udev->dev = dev;
1058 udev->dev = NULL;
1072 udev->dev = dev;
1073 udev->pdev = dev->pcidev;
1091 static int cnic_init_uio(struct cnic_dev *dev)
1093 struct cnic_local *cp = dev->cnic_priv;
1103 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1104 uinfo->mem[0].internal_addr = dev->regview;
1107 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1118 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1119 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1148 ret = uio_register_device(&udev->pdev->dev, uinfo);
1151 cnic_init_rings(dev);
1157 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1159 struct cnic_local *cp = dev->cnic_priv;
1162 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1167 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1171 ret = cnic_alloc_context(dev);
1175 ret = cnic_alloc_uio_rings(dev, 2);
1179 ret = cnic_init_uio(dev);
1186 cnic_free_resc(dev);
1190 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1192 struct cnic_local *cp = dev->cnic_priv;
1193 struct bnx2x *bp = netdev_priv(dev->netdev);
1220 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1228 cnic_free_context(dev);
1238 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1240 struct cnic_local *cp = dev->cnic_priv;
1241 struct bnx2x *bp = netdev_priv(dev->netdev);
1252 cp->max_cid_space += dev->max_fcoe_conn;
1279 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1295 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1300 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1306 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1310 ret = cnic_alloc_bnx2x_context(dev);
1321 ret = cnic_alloc_uio_rings(dev, 4);
1325 ret = cnic_init_uio(dev);
1332 cnic_free_resc(dev);
1342 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1345 struct cnic_local *cp = dev->cnic_priv;
1349 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1371 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1389 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1392 struct cnic_local *cp = dev->cnic_priv;
1393 struct bnx2x *bp = netdev_priv(dev->netdev);
1415 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1424 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1427 struct cnic_local *cp = dev->cnic_priv;
1439 static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1442 struct bnx2x *bp = netdev_priv(dev->netdev);
1453 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1456 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1460 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1462 struct cnic_local *cp = dev->cnic_priv;
1463 struct bnx2x *bp = netdev_priv(dev->netdev);
1479 if (!dev->max_iscsi_conn)
1483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1485 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1487 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1489 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1494 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1497 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1499 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1501 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1504 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1506 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1508 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1512 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1514 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1516 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1519 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1521 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1523 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1527 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1529 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1531 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1534 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1536 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1539 cnic_bnx2x_set_tcp_options(dev,
1546 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1549 struct bnx2x *bp = netdev_priv(dev->netdev);
1555 if (!dev->max_iscsi_conn) {
1561 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1563 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1567 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1569 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1571 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1575 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1583 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1588 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1590 struct cnic_local *cp = dev->cnic_priv;
1596 cnic_free_dma(dev, &iscsi->hq_info);
1597 cnic_free_dma(dev, &iscsi->r2tq_info);
1598 cnic_free_dma(dev, &iscsi->task_array_info);
1607 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1611 struct cnic_local *cp = dev->cnic_priv;
1634 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1639 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1644 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1651 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1655 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1658 struct cnic_local *cp = dev->cnic_priv;
1685 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1688 struct cnic_local *cp = dev->cnic_priv;
1689 struct bnx2x *bp = netdev_priv(dev->netdev);
1710 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1872 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1877 struct cnic_local *cp = dev->cnic_priv;
1878 struct bnx2x *bp = netdev_priv(dev->netdev);
1914 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1918 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1923 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1925 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1935 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1940 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1942 struct cnic_local *cp = dev->cnic_priv;
1959 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1964 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1966 struct cnic_local *cp = dev->cnic_priv;
1967 struct bnx2x *bp = netdev_priv(dev->netdev);
1978 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1990 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1992 struct cnic_local *cp = dev->cnic_priv;
2015 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2018 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2033 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2038 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2062 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2082 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2084 struct bnx2x *bp = netdev_priv(dev->netdev);
2086 u8 *mac = dev->mac_addr;
2088 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2090 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2092 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2094 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2096 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2098 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2101 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2103 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2106 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2108 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2111 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2113 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2118 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2121 struct cnic_local *cp = dev->cnic_priv;
2122 struct bnx2x *bp = netdev_priv(dev->netdev);
2150 netdev_err(dev->netdev, "conn_buf size too big\n");
2188 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2190 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2193 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2201 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2208 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2213 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2220 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2224 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2235 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2239 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2250 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2254 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2259 struct cnic_local *cp = dev->cnic_priv;
2260 struct bnx2x *bp = netdev_priv(dev->netdev);
2274 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2279 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2283 struct cnic_local *cp = dev->cnic_priv;
2284 struct bnx2x *bp = netdev_priv(dev->netdev);
2309 netdev_err(dev->netdev, "fcoe_init size too big\n");
2330 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2336 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2341 struct cnic_local *cp = dev->cnic_priv;
2342 struct bnx2x *bp = netdev_priv(dev->netdev);
2367 if (l5_cid >= dev->max_fcoe_conn)
2376 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2383 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2396 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2410 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2419 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2427 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2431 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2438 struct cnic_local *cp = dev->cnic_priv;
2445 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2454 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2459 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2466 struct cnic_local *cp = dev->cnic_priv;
2471 if (l5_cid >= dev->max_fcoe_conn)
2477 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2486 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2491 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2497 struct cnic_local *cp = dev->cnic_priv;
2505 if (l5_cid >= dev->max_fcoe_conn)
2518 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2534 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2538 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2540 struct cnic_local *cp = dev->cnic_priv;
2557 netdev_warn(dev->netdev, "CID %x not deleted\n",
2562 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2565 struct cnic_local *cp = dev->cnic_priv;
2566 struct bnx2x *bp = netdev_priv(dev->netdev);
2570 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2575 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2580 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2582 struct cnic_local *cp = dev->cnic_priv;
2650 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2653 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2660 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2670 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2673 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2676 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2680 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2683 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2686 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2690 ret = cnic_bnx2x_close(dev, kwqe);
2693 ret = cnic_bnx2x_reset(dev, kwqe);
2696 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2699 ret = cnic_bnx2x_update_pg(dev, kwqe);
2706 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2711 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2719 cnic_bnx2x_kwqe_err(dev, kwqe);
2726 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2729 struct bnx2x *bp = netdev_priv(dev->netdev);
2734 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2747 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2751 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2755 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2758 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2761 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2764 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2767 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2771 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2776 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2784 cnic_bnx2x_kwqe_err(dev, kwqe);
2791 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2797 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2808 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2812 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2826 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2828 struct cnic_local *cp = dev->cnic_priv;
2864 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2882 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2885 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2887 struct cnic_local *cp = dev->cnic_priv;
2921 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2970 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2972 struct cnic_local *cp = dev->cnic_priv;
2980 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2982 service_kcqes(dev, kcqe_cnt);
2992 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
3001 struct cnic_dev *dev = data;
3003 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3009 return cnic_service_bnx2_queues(dev);
3015 struct cnic_dev *dev = cp->dev;
3017 cp->last_status_idx = cnic_service_bnx2_queues(dev);
3019 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3023 static void cnic_doirq(struct cnic_dev *dev)
3025 struct cnic_local *cp = dev->cnic_priv;
3027 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3039 struct cnic_dev *dev = dev_instance;
3040 struct cnic_local *cp = dev->cnic_priv;
3043 cp->ack_int(dev);
3045 cnic_doirq(dev);
3050 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3053 struct bnx2x *bp = netdev_priv(dev->netdev);
3065 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3068 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3081 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3084 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3086 struct cnic_local *cp = dev->cnic_priv;
3088 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3092 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3094 struct cnic_local *cp = dev->cnic_priv;
3096 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3100 static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3102 struct cnic_local *cp = dev->cnic_priv;
3104 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3108 static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3110 struct cnic_local *cp = dev->cnic_priv;
3112 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3116 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3123 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3125 service_kcqes(dev, kcqe_cnt);
3140 struct cnic_dev *dev = cp->dev;
3141 struct bnx2x *bp = netdev_priv(dev->netdev);
3144 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3148 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3150 CNIC_WR16(dev, cp->kcq1.io_addr,
3154 cp->arm_int(dev, status_idx);
3158 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3163 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3166 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3175 struct cnic_dev *dev = data;
3176 struct cnic_local *cp = dev->cnic_priv;
3179 cnic_doirq(dev);
3209 static void cnic_ulp_stop(struct cnic_dev *dev)
3211 struct cnic_local *cp = dev->cnic_priv;
3218 static void cnic_ulp_start(struct cnic_dev *dev)
3220 struct cnic_local *cp = dev->cnic_priv;
3243 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3245 struct cnic_local *cp = dev->cnic_priv;
3262 struct cnic_dev *dev = data;
3267 cnic_hold(dev);
3269 cnic_ulp_stop(dev);
3270 cnic_stop_hw(dev);
3272 cnic_put(dev);
3275 cnic_hold(dev);
3277 if (!cnic_start_hw(dev))
3278 cnic_ulp_start(dev);
3280 cnic_put(dev);
3283 struct cnic_local *cp = dev->cnic_priv;
3292 struct cnic_local *cp = dev->cnic_priv;
3294 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3302 netdev_err(dev->netdev,
3316 cnic_hold(dev);
3317 cnic_copy_ulp_stats(dev, ulp_type);
3318 cnic_put(dev);
3327 static void cnic_ulp_init(struct cnic_dev *dev)
3330 struct cnic_local *cp = dev->cnic_priv;
3345 ulp_ops->cnic_init(dev);
3351 static void cnic_ulp_exit(struct cnic_dev *dev)
3354 struct cnic_local *cp = dev->cnic_priv;
3369 ulp_ops->cnic_exit(dev);
3377 struct cnic_dev *dev = csk->dev;
3397 l4kwqe->sa0 = dev->mac_addr[0];
3398 l4kwqe->sa1 = dev->mac_addr[1];
3399 l4kwqe->sa2 = dev->mac_addr[2];
3400 l4kwqe->sa3 = dev->mac_addr[3];
3401 l4kwqe->sa4 = dev->mac_addr[4];
3402 l4kwqe->sa5 = dev->mac_addr[5];
3414 return dev->submit_kwqes(dev, wqes, 1);
3419 struct cnic_dev *dev = csk->dev;
3442 return dev->submit_kwqes(dev, wqes, 1);
3447 struct cnic_dev *dev = csk->dev;
3460 return dev->submit_kwqes(dev, wqes, 1);
3465 struct cnic_dev *dev = csk->dev;
3544 return dev->submit_kwqes(dev, wqes, num_wqes);
3549 struct cnic_dev *dev = csk->dev;
3561 return dev->submit_kwqes(dev, wqes, 1);
3566 struct cnic_dev *dev = csk->dev;
3578 return dev->submit_kwqes(dev, wqes, 1);
3581 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3584 struct cnic_local *cp = dev->cnic_priv;
3604 csk1->dev = dev;
3628 struct cnic_dev *dev = csk->dev;
3629 struct cnic_local *cp = dev->cnic_priv;
3662 static inline u16 cnic_get_vlan(struct net_device *dev,
3665 if (is_vlan_dev(dev)) {
3666 *vlan_dev = vlan_dev_real_dev(dev);
3667 return vlan_dev_vlan_id(dev);
3669 *vlan_dev = dev;
3716 struct cnic_dev *dev = NULL;
3734 if (!dst->dev)
3737 cnic_get_vlan(dst->dev, &netdev);
3739 dev = cnic_from_netdev(netdev);
3743 if (dev)
3744 cnic_put(dev);
3745 return dev;
3750 struct cnic_dev *dev = csk->dev;
3751 struct cnic_local *cp = dev->cnic_priv;
3758 struct cnic_dev *dev = csk->dev;
3759 struct cnic_local *cp = dev->cnic_priv;
3795 csk->mtu = dev->netdev->mtu;
3796 if (dst && dst->dev) {
3797 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3798 if (realdev == dev->netdev) {
3836 struct cnic_local *cp = csk->dev->cnic_priv;
3865 struct cnic_local *cp = csk->dev->cnic_priv;
3942 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3944 struct cnic_local *cp = dev->cnic_priv;
3973 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3975 struct cnic_local *cp = dev->cnic_priv;
3985 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3987 struct cnic_local *cp = dev->cnic_priv;
3994 cnic_process_fcoe_term_conn(dev, kcqe);
3999 cnic_cm_process_offld_pg(dev, l4kcqe);
4043 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4061 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4074 struct cnic_dev *dev = data;
4078 cnic_cm_process_kcqe(dev, kcqe[i]);
4085 static void cnic_cm_free_mem(struct cnic_dev *dev)
4087 struct cnic_local *cp = dev->cnic_priv;
4094 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4096 struct cnic_local *cp = dev->cnic_priv;
4112 cnic_cm_free_mem(dev);
4146 struct cnic_dev *dev = csk->dev;
4147 struct cnic_local *cp = dev->cnic_priv;
4160 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4164 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4169 cnic_ctx_wr(dev, 45, 0, seed);
4175 struct cnic_dev *dev = csk->dev;
4176 struct cnic_local *cp = dev->cnic_priv;
4205 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4214 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4216 struct cnic_local *cp = dev->cnic_priv;
4221 if (!netif_running(dev->netdev))
4224 cnic_bnx2x_delete_wait(dev, 0);
4230 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4234 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4236 struct bnx2x *bp = netdev_priv(dev->netdev);
4240 cnic_init_bnx2x_mac(dev);
4241 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4243 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4246 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4248 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4252 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4254 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4256 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4258 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4261 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4269 struct cnic_dev *dev;
4274 dev = cp->dev;
4283 cp->ethdev->drv_ctl(dev->netdev, &info);
4302 err = cnic_bnx2x_destroy_ramrod(dev, i);
4304 cnic_free_bnx2x_conn_resc(dev, i);
4319 static int cnic_cm_open(struct cnic_dev *dev)
4321 struct cnic_local *cp = dev->cnic_priv;
4324 err = cnic_cm_alloc_mem(dev);
4328 err = cp->start_cm(dev);
4335 dev->cm_create = cnic_cm_create;
4336 dev->cm_destroy = cnic_cm_destroy;
4337 dev->cm_connect = cnic_cm_connect;
4338 dev->cm_abort = cnic_cm_abort;
4339 dev->cm_close = cnic_cm_close;
4340 dev->cm_select_dev = cnic_cm_select_dev;
4342 cp->ulp_handle[CNIC_ULP_L4] = dev;
4347 cnic_cm_free_mem(dev);
4351 static int cnic_cm_shutdown(struct cnic_dev *dev)
4353 struct cnic_local *cp = dev->cnic_priv;
4365 cnic_cm_free_mem(dev);
4370 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4378 cnic_ctx_wr(dev, cid_addr, i, 0);
4381 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4383 struct cnic_local *cp = dev->cnic_priv;
4397 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4399 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4401 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4405 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4418 static void cnic_free_irq(struct cnic_dev *dev)
4420 struct cnic_local *cp = dev->cnic_priv;
4424 cp->disable_int_sync(dev);
4426 free_irq(ethdev->irq_arr[0].vector, dev);
4430 static int cnic_request_irq(struct cnic_dev *dev)
4432 struct cnic_local *cp = dev->cnic_priv;
4436 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4443 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4445 struct cnic_local *cp = dev->cnic_priv;
4454 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4456 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4457 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4458 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4462 err = cnic_request_irq(dev);
4468 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4475 cnic_free_irq(dev);
4481 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4485 CNIC_WR(dev, BNX2_HC_COMMAND,
4498 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4502 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4504 struct cnic_local *cp = dev->cnic_priv;
4510 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4514 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4516 struct cnic_local *cp = dev->cnic_priv;
4522 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4524 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4528 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4530 struct cnic_local *cp = dev->cnic_priv;
4547 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4558 cnic_ctx_wr(dev, cid_addr2, i, 0);
4565 cnic_init_context(dev, tx_cid);
4566 cnic_init_context(dev, tx_cid + 1);
4574 cnic_ctx_wr(dev, cid_addr, offset0, val);
4577 cnic_ctx_wr(dev, cid_addr, offset1, val);
4587 cnic_ctx_wr(dev, cid_addr, offset2, val);
4591 cnic_ctx_wr(dev, cid_addr, offset3, val);
4595 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4597 struct cnic_local *cp = dev->cnic_priv;
4607 cnic_init_context(dev, 2);
4610 coal_val = CNIC_RD(dev, coal_reg);
4620 CNIC_WR(dev, coal_reg, coal_val);
4630 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4636 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4650 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4654 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4657 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4658 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4661 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4670 dev->submit_kwqes(dev, wqes, 1);
4673 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4675 struct cnic_local *cp = dev->cnic_priv;
4680 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4682 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4684 dev->mac_addr[0] = (u8) (val >> 8);
4685 dev->mac_addr[1] = (u8) val;
4687 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4689 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4691 dev->mac_addr[2] = (u8) (val >> 24);
4692 dev->mac_addr[3] = (u8) (val >> 16);
4693 dev->mac_addr[4] = (u8) (val >> 8);
4694 dev->mac_addr[5] = (u8) val;
4696 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4702 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4703 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4704 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4707 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4709 struct cnic_local *cp = dev->cnic_priv;
4715 cnic_set_bnx2_mac(dev);
4717 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4724 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4726 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4727 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4728 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4730 err = cnic_setup_5709_context(dev, 1);
4734 cnic_init_context(dev, KWQ_CID);
4735 cnic_init_context(dev, KCQ_CID);
4753 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4756 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4759 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4762 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4765 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4779 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4782 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4785 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4788 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4791 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4804 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4805 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4810 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4815 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4816 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4819 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4824 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4825 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4827 cnic_init_bnx2_tx_ring(dev);
4828 cnic_init_bnx2_rx_ring(dev);
4830 err = cnic_init_bnx2_irq(dev);
4832 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4833 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4834 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4843 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4845 struct cnic_local *cp = dev->cnic_priv;
4860 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4864 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4866 struct cnic_local *cp = dev->cnic_priv;
4872 err = cnic_request_irq(dev);
4877 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4881 struct bnx2x *bp = netdev_priv(dev->netdev);
4888 u16 flags = CNIC_RD16(dev, addr);
4893 CNIC_WR16(dev, addr, flags);
4896 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4898 struct cnic_local *cp = dev->cnic_priv;
4899 struct bnx2x *bp = netdev_priv(dev->netdev);
4902 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4907 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4910 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4914 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4917 struct cnic_local *cp = dev->cnic_priv;
4918 struct bnx2x *bp = netdev_priv(dev->netdev);
4980 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4983 struct cnic_local *cp = dev->cnic_priv;
4984 struct bnx2x *bp = netdev_priv(dev->netdev);
5048 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5050 struct cnic_local *cp = dev->cnic_priv;
5051 struct bnx2x *bp = netdev_priv(dev->netdev);
5087 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5089 struct cnic_local *cp = dev->cnic_priv;
5090 struct bnx2x *bp = netdev_priv(dev->netdev);
5095 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5107 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5116 cnic_init_bnx2x_kcq(dev);
5119 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5120 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5122 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5125 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5128 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5131 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5134 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5136 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5138 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5142 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5145 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5149 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5152 cnic_setup_bnx2x_context(dev);
5154 ret = cnic_init_bnx2x_irq(dev);
5162 static void cnic_init_rings(struct cnic_dev *dev)
5164 struct cnic_local *cp = dev->cnic_priv;
5165 struct bnx2x *bp = netdev_priv(dev->netdev);
5171 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5172 cnic_init_bnx2_tx_ring(dev);
5173 cnic_init_bnx2_rx_ring(dev);
5175 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5196 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5205 cnic_init_bnx2x_tx_ring(dev, data);
5206 cnic_init_bnx2x_rx_ring(dev, data);
5215 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5224 netdev_err(dev->netdev,
5226 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5227 cnic_ring_ctl(dev, cid, cli, 1);
5234 static void cnic_shutdown_rings(struct cnic_dev *dev)
5236 struct cnic_local *cp = dev->cnic_priv;
5243 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5244 cnic_shutdown_bnx2_rx_ring(dev);
5245 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5251 cnic_ring_ctl(dev, cid, cli, 0);
5257 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5265 netdev_err(dev->netdev,
5267 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5270 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5279 static int cnic_register_netdev(struct cnic_dev *dev)
5281 struct cnic_local *cp = dev->cnic_priv;
5291 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5293 netdev_err(dev->netdev, "register_cnic failed\n");
5298 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5300 dev->max_iscsi_conn = 0;
5305 static void cnic_unregister_netdev(struct cnic_dev *dev)
5307 struct cnic_local *cp = dev->cnic_priv;
5313 ethdev->drv_unregister_cnic(dev->netdev);
5316 static int cnic_start_hw(struct cnic_dev *dev)
5318 struct cnic_local *cp = dev->cnic_priv;
5322 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5325 dev->regview = ethdev->io_base;
5326 pci_dev_get(dev->pcidev);
5327 cp->func = PCI_FUNC(dev->pcidev->devfn);
5331 err = cp->alloc_resc(dev);
5333 netdev_err(dev->netdev, "allocate resource failure\n");
5337 err = cp->start_hw(dev);
5341 err = cnic_cm_open(dev);
5345 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5347 cp->enable_int(dev);
5353 cp->stop_hw(dev);
5355 cp->free_resc(dev);
5356 pci_dev_put(dev->pcidev);
5360 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5362 cnic_disable_bnx2_int_sync(dev);
5364 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5365 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5367 cnic_init_context(dev, KWQ_CID);
5368 cnic_init_context(dev, KCQ_CID);
5370 cnic_setup_5709_context(dev, 0);
5371 cnic_free_irq(dev);
5373 cnic_free_resc(dev);
5377 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5379 struct cnic_local *cp = dev->cnic_priv;
5380 struct bnx2x *bp = netdev_priv(dev->netdev);
5385 cnic_free_irq(dev);
5398 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5399 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5403 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5405 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5406 cnic_free_resc(dev);
5409 static void cnic_stop_hw(struct cnic_dev *dev)
5411 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5412 struct cnic_local *cp = dev->cnic_priv;
5422 cnic_shutdown_rings(dev);
5423 cp->stop_cm(dev);
5425 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5428 cnic_cm_shutdown(dev);
5429 cp->stop_hw(dev);
5430 pci_dev_put(dev->pcidev);
5434 static void cnic_free_dev(struct cnic_dev *dev)
5438 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5442 if (atomic_read(&dev->ref_count) != 0)
5443 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5445 netdev_info(dev->netdev, "Removed CNIC device\n");
5446 dev_put(dev->netdev);
5447 kfree(dev);
5450 static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5453 struct cnic_local *cp = dev->cnic_priv;
5454 struct bnx2x *bp = netdev_priv(dev->netdev);
5457 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5463 ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5467 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5480 cdev->netdev = dev;
5489 cp->dev = cdev;
5495 netdev_info(dev, "Added CNIC device\n");
5500 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5505 struct bnx2 *bp = netdev_priv(dev);
5509 ethdev = (bp->cnic_probe)(dev);
5518 dev_hold(dev);
5528 cdev = cnic_alloc_dev(dev, pdev);
5556 dev_put(dev);
5560 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5565 struct bnx2x *bp = netdev_priv(dev);
5569 ethdev = bp->cnic_probe(dev);
5578 dev_hold(dev);
5579 cdev = cnic_alloc_dev(dev, pdev);
5581 dev_put(dev);
5628 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5633 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5635 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5638 cdev = init_bnx2_cnic(dev);
5640 cdev = init_bnx2x_cnic(dev);
5683 struct cnic_dev *dev;
5686 dev = cnic_from_netdev(netdev);
5688 if (!dev && event == NETDEV_REGISTER) {
5690 dev = is_cnic_dev(netdev);
5691 if (dev) {
5693 cnic_hold(dev);
5696 if (dev) {
5697 struct cnic_local *cp = dev->cnic_priv;
5700 cnic_ulp_init(dev);
5702 cnic_ulp_exit(dev);
5705 if (cnic_register_netdev(dev) != 0) {
5706 cnic_put(dev);
5709 if (!cnic_start_hw(dev))
5710 cnic_ulp_start(dev);
5716 cnic_ulp_stop(dev);
5717 cnic_stop_hw(dev);
5718 cnic_unregister_netdev(dev);
5721 list_del_init(&dev->list);
5724 cnic_put(dev);
5725 cnic_free_dev(dev);
5728 cnic_put(dev);
5735 dev = cnic_from_netdev(realdev);
5736 if (dev) {
5738 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5739 cnic_put(dev);