Lines Matching refs:vdev
90 static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
92 static inline int is_vxge_card_up(struct vxgedev *vdev)
94 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
121 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
126 for (i = 0; i < vdev->no_of_vpath; i++)
127 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
130 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
136 for (i = 0; i < vdev->no_of_vpath; i++) {
137 ring = &vdev->vpaths[i].ring;
151 struct vxgedev *vdev = netdev_priv(dev);
154 vdev->ndev->name, __func__, __LINE__);
155 netdev_notice(vdev->ndev, "Link Up\n");
156 vdev->stats.link_up++;
158 netif_carrier_on(vdev->ndev);
159 netif_tx_wake_all_queues(vdev->ndev);
162 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
174 struct vxgedev *vdev = netdev_priv(dev);
177 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
178 netdev_notice(vdev->ndev, "Link Down\n");
180 vdev->stats.link_down++;
181 netif_carrier_off(vdev->ndev);
182 netif_tx_stop_all_queues(vdev->ndev);
185 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
632 static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
645 queue_len = vdev->no_of_vpath;
648 vdev->vpath_selector[queue_len - 1];
700 vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
711 vpath = &vdev->vpaths[mac->vpath_no];
725 static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
738 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
739 vpath = &vdev->vpaths[vpath_idx];
748 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
749 vpath = &vdev->vpaths[vpath_idx];
754 status = vxge_add_mac_addr(vdev, &mac_info);
765 vpath = &vdev->vpaths[vpath_idx];
775 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
809 struct vxgedev *vdev = NULL;
831 vdev = netdev_priv(dev);
833 if (unlikely(!is_vxge_card_up(vdev))) {
835 "%s: vdev not initialized", dev->name);
840 if (vdev->config.addr_learn_en) {
841 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
851 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
853 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
854 vpath_no = vxge_get_vpath_no(vdev, skb);
858 if (vpath_no >= vdev->no_of_vpath)
861 fifo = &vdev->vpaths[vpath_no].fifo;
1093 vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1098 vpath = &vdev->vpaths[mac->vpath_no];
1124 struct vxgedev *vdev;
1138 vdev = netdev_priv(dev);
1140 if (unlikely(!is_vxge_card_up(vdev)))
1143 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1144 for (i = 0; i < vdev->no_of_vpath; i++) {
1145 vpath = &vdev->vpaths[i];
1151 vdev->all_multi_flg = 1;
1153 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1154 for (i = 0; i < vdev->no_of_vpath; i++) {
1155 vpath = &vdev->vpaths[i];
1161 vdev->all_multi_flg = 0;
1166 if (!vdev->config.addr_learn_en) {
1167 for (i = 0; i < vdev->no_of_vpath; i++) {
1168 vpath = &vdev->vpaths[i];
1186 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1187 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1188 list_head = &vdev->vpaths[0].mac_addr_list;
1190 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1191 vdev->vpaths[0].max_mac_addr_cnt)
1204 vdev->no_of_vpath;
1208 vdev,
1218 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1222 status = vxge_add_mac_addr(vdev, &mac_info);
1235 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1248 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1251 status = vxge_del_mac_addr(vdev, &mac_info);
1256 for (i = 0; i < vdev->no_of_vpath; i++) {
1257 vpath = &vdev->vpaths[i];
1266 vdev->all_multi_flg = 1;
1285 struct vxgedev *vdev;
1292 vdev = netdev_priv(dev);
1311 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1312 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1327 status = vxge_del_mac_addr(vdev, &mac_info_old);
1330 if (unlikely(!is_vxge_card_up(vdev))) {
1336 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1339 status = vxge_add_mac_addr(vdev, &mac_info_new);
1351 * @vdev: pointer to vdev
1356 static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1358 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1365 if (vdev->config.intr_type == INTA)
1384 * @vdev: pointer to vdev
1389 static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1391 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1395 hldev = pci_get_drvdata(vdev->pdev);
1401 if (vdev->config.intr_type == INTA)
1488 struct vxgedev *vdev = vpath->vdev;
1494 for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
1502 * @vdev: pointer to vdev
1507 static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1510 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1514 if (unlikely(!is_vxge_card_up(vdev)))
1518 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1523 if (is_vxge_card_up(vdev) &&
1547 if (vdev->all_multi_flg) {
1556 vxge_vpath_intr_enable(vdev, vp_id);
1568 clear_bit(vp_id, &vdev->vp_reset);
1578 static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1583 if (vdev->config.intr_type == MSI_X) {
1584 for (i = 0; i < vdev->no_of_vpath; i++) {
1587 hw_ring = vdev->vpaths[i].ring.handle;
1593 for (i = 0; i < vdev->no_of_vpath; i++) {
1594 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1600 if ((vdev->config.intr_type == INTA) && (i == 0))
1607 static int do_vxge_reset(struct vxgedev *vdev, int event)
1616 if (unlikely(!is_vxge_card_up(vdev)))
1620 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1625 netif_carrier_off(vdev->ndev);
1628 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1629 while (test_bit(vp_id, &vdev->vp_reset))
1633 netif_carrier_on(vdev->ndev);
1636 if (unlikely(vdev->exec_mode)) {
1639 vdev->ndev->name);
1640 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1641 netif_tx_stop_all_queues(vdev->ndev);
1647 vxge_hw_device_wait_receive_idle(vdev->devh);
1648 vxge_hw_device_intr_disable(vdev->devh);
1650 switch (vdev->cric_err_event) {
1652 netif_tx_stop_all_queues(vdev->ndev);
1656 vdev->ndev->name);
1673 netif_tx_stop_all_queues(vdev->ndev);
1677 vdev->ndev->name);
1683 netif_tx_stop_all_queues(vdev->ndev);
1687 vdev->ndev->name);
1695 netif_tx_stop_all_queues(vdev->ndev);
1699 vdev->ndev->name);
1709 netif_tx_stop_all_queues(vdev->ndev);
1712 status = vxge_reset_all_vpaths(vdev);
1716 vdev->ndev->name);
1723 for (i = 0; i < vdev->no_of_vpath; i++)
1724 if (vdev->vpaths[i].handle) {
1726 vdev->vpaths[i].handle)
1746 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1747 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1748 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1752 for (i = 0; i < vdev->no_of_vpath; i++)
1753 vxge_vpath_intr_enable(vdev, i);
1755 vxge_hw_device_intr_enable(vdev->devh);
1760 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1763 for (i = 0; i < vdev->no_of_vpath; i++) {
1764 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1766 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1769 netif_tx_wake_all_queues(vdev->ndev);
1773 vxge_config_ci_for_tti_rti(vdev);
1781 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1787 * @vdev: pointer to ll device
1793 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1795 if (!netif_running(vdev->ndev))
1798 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1841 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1847 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1849 for (i = 0; i < vdev->no_of_vpath; i++) {
1850 ring = &vdev->vpaths[i].ring;
1860 VXGE_COMPLETE_ALL_TX(vdev);
1884 struct vxgedev *vdev = netdev_priv(dev);
1885 struct pci_dev *pdev = vdev->pdev;
1898 VXGE_COMPLETE_ALL_RX(vdev);
1899 VXGE_COMPLETE_ALL_TX(vdev);
1909 static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1922 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1924 mtable[index] = index % vdev->no_of_vpath;
1928 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1929 vdev->no_of_vpath,
1931 vdev->config.rth_bkt_sz);
1935 "for vpath:%d", vdev->vpaths[0].device_id);
1940 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1941 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1942 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1943 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1945 vdev->config.rth_hash_type_tcpipv6ex;
1946 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1954 for (index = 0; index < vdev->no_of_vpath; index++) {
1956 vdev->vpaths[index].handle,
1957 vdev->config.rth_algorithm,
1959 vdev->config.rth_bkt_sz);
1963 vdev->vpaths[index].device_id);
1972 static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1978 for (i = 0; i < vdev->no_of_vpath; i++) {
1979 vpath = &vdev->vpaths[i];
1982 if (is_vxge_card_up(vdev) &&
2004 static void vxge_close_vpaths(struct vxgedev *vdev, int index)
2009 for (i = index; i < vdev->no_of_vpath; i++) {
2010 vpath = &vdev->vpaths[i];
2014 vdev->stats.vpaths_open--;
2022 static int vxge_open_vpaths(struct vxgedev *vdev)
2030 for (i = 0; i < vdev->no_of_vpath; i++) {
2031 vpath = &vdev->vpaths[i];
2034 if (!vdev->titan1) {
2036 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2043 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2044 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2061 vpath->ring.ndev = vdev->ndev;
2062 vpath->ring.pdev = vdev->pdev;
2064 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2071 vdev->config.tx_steering_type;
2072 vpath->fifo.ndev = vdev->ndev;
2073 vpath->fifo.pdev = vdev->pdev;
2078 if (vdev->config.tx_steering_type)
2080 netdev_get_tx_queue(vdev->ndev, i);
2083 netdev_get_tx_queue(vdev->ndev, 0);
2085 vdev->config.fifo_indicate_max_pkts;
2088 vpath->ring.rx_hwts = vdev->rx_hwts;
2090 vdev->vp_handles[i] = vpath->handle;
2091 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2092 vdev->stats.vpaths_open++;
2094 vdev->stats.vpath_open_fail++;
2097 vdev->ndev->name, vpath->device_id,
2099 vxge_close_vpaths(vdev, 0);
2104 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2180 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2184 hldev = pci_get_drvdata(vdev->pdev);
2186 if (pci_channel_offline(vdev->pdev))
2189 if (unlikely(!is_vxge_card_up(vdev)))
2192 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2198 vdev->vpaths_deployed >>
2202 napi_schedule(&vdev->napi);
2263 struct vxgedev *vdev = vpath->vdev;
2267 for (i = 0; i < vdev->no_of_vpath; i++) {
2272 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2273 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2275 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2276 vdev->exec_mode);
2278 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2289 static int vxge_alloc_msix(struct vxgedev *vdev)
2293 vdev->intr_cnt = 0;
2297 vdev->intr_cnt = vdev->no_of_vpath * 2;
2300 vdev->intr_cnt++;
2302 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2304 if (!vdev->entries) {
2312 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2315 if (!vdev->vxge_entries) {
2322 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2327 vdev->entries[j].entry = msix_intr_vect;
2328 vdev->vxge_entries[j].entry = msix_intr_vect;
2329 vdev->vxge_entries[j].in_use = 0;
2333 vdev->entries[j].entry = msix_intr_vect + 1;
2334 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2335 vdev->vxge_entries[j].in_use = 0;
2340 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2341 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2342 vdev->vxge_entries[j].in_use = 0;
2344 ret = pci_enable_msix_range(vdev->pdev,
2345 vdev->entries, 3, vdev->intr_cnt);
2349 } else if (ret < vdev->intr_cnt) {
2350 pci_disable_msix(vdev->pdev);
2354 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2360 kfree(vdev->entries);
2361 kfree(vdev->vxge_entries);
2362 vdev->entries = NULL;
2363 vdev->vxge_entries = NULL;
2366 vxge_close_vpaths(vdev, temp);
2367 vdev->no_of_vpath = temp;
2373 kfree(vdev->vxge_entries);
2375 kfree(vdev->entries);
2380 static int vxge_enable_msix(struct vxgedev *vdev)
2387 vdev->intr_cnt = 0;
2390 ret = vxge_alloc_msix(vdev);
2392 for (i = 0; i < vdev->no_of_vpath; i++) {
2393 struct vxge_vpath *vpath = &vdev->vpaths[i];
2412 static void vxge_rem_msix_isr(struct vxgedev *vdev)
2416 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2418 if (vdev->vxge_entries[intr_cnt].in_use) {
2419 synchronize_irq(vdev->entries[intr_cnt].vector);
2420 free_irq(vdev->entries[intr_cnt].vector,
2421 vdev->vxge_entries[intr_cnt].arg);
2422 vdev->vxge_entries[intr_cnt].in_use = 0;
2426 kfree(vdev->entries);
2427 kfree(vdev->vxge_entries);
2428 vdev->entries = NULL;
2429 vdev->vxge_entries = NULL;
2431 if (vdev->config.intr_type == MSI_X)
2432 pci_disable_msix(vdev->pdev);
2435 static void vxge_rem_isr(struct vxgedev *vdev)
2438 vdev->config.intr_type == MSI_X) {
2439 vxge_rem_msix_isr(vdev);
2440 } else if (vdev->config.intr_type == INTA) {
2441 synchronize_irq(vdev->pdev->irq);
2442 free_irq(vdev->pdev->irq, vdev);
2446 static int vxge_add_isr(struct vxgedev *vdev)
2450 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2452 if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
2453 ret = vxge_enable_msix(vdev);
2460 vdev->config.intr_type = INTA;
2463 if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
2465 intr_idx < (vdev->no_of_vpath *
2473 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2475 vdev->ndev->name,
2476 vdev->entries[intr_cnt].entry,
2479 vdev->entries[intr_cnt].vector,
2481 vdev->desc[intr_cnt],
2482 &vdev->vpaths[vp_idx].fifo);
2483 vdev->vxge_entries[intr_cnt].arg =
2484 &vdev->vpaths[vp_idx].fifo;
2488 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2490 vdev->ndev->name,
2491 vdev->entries[intr_cnt].entry,
2494 vdev->entries[intr_cnt].vector,
2496 vdev->desc[intr_cnt],
2497 &vdev->vpaths[vp_idx].ring);
2498 vdev->vxge_entries[intr_cnt].arg =
2499 &vdev->vpaths[vp_idx].ring;
2507 vdev->ndev->name, intr_cnt);
2508 vxge_rem_msix_isr(vdev);
2509 vdev->config.intr_type = INTA;
2512 vdev->ndev->name);
2518 vdev->vxge_entries[intr_cnt].in_use = 1;
2519 msix_idx += vdev->vpaths[vp_idx].device_id *
2522 vdev->vpaths[vp_idx].handle,
2529 (vp_idx < (vdev->no_of_vpath - 1)))
2533 intr_cnt = vdev->no_of_vpath * 2;
2534 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2536 vdev->ndev->name,
2537 vdev->entries[intr_cnt].entry,
2540 ret = request_irq(vdev->entries[intr_cnt].vector,
2542 vdev->desc[intr_cnt],
2543 &vdev->vpaths[0]);
2547 vdev->ndev->name, intr_cnt);
2548 vxge_rem_msix_isr(vdev);
2549 vdev->config.intr_type = INTA;
2552 vdev->ndev->name);
2556 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2558 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2560 vdev->vxge_entries[intr_cnt].in_use = 1;
2561 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2565 if (vdev->config.intr_type == INTA) {
2566 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2567 "%s:vxge:INTA", vdev->ndev->name);
2568 vxge_hw_device_set_intr_type(vdev->devh,
2571 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2573 ret = request_irq((int) vdev->pdev->irq,
2575 IRQF_SHARED, vdev->desc[0], vdev);
2579 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2584 "IRQ", vdev->pdev->irq);
2592 struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
2595 for (i = 0; i < vdev->no_of_vpath; i++) {
2596 if (test_bit(i, &vdev->vp_reset)) {
2597 vxge_reset_vpath(vdev, i);
2601 if (j && (vdev->config.intr_type != MSI_X)) {
2602 vxge_hw_device_unmask_all(vdev->devh);
2603 vxge_hw_device_flush_io(vdev->devh);
2606 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2611 struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
2618 for (i = 0; i < vdev->no_of_vpath; i++) {
2619 ring = &vdev->vpaths[i].ring;
2633 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2634 vpath = &vdev->vpaths[i];
2637 vxge_vpath_intr_disable(vdev, i);
2650 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2670 struct vxgedev *vdev = netdev_priv(dev);
2678 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2679 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2681 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2701 struct vxgedev *vdev;
2711 vdev = netdev_priv(dev);
2712 hldev = pci_get_drvdata(vdev->pdev);
2719 status = vxge_open_vpaths(vdev);
2722 "%s: fatal: Vpath open failed", vdev->ndev->name);
2727 vdev->mtu = dev->mtu;
2729 status = vxge_add_isr(vdev);
2737 if (vdev->config.intr_type != MSI_X) {
2738 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2739 vdev->config.napi_weight);
2740 napi_enable(&vdev->napi);
2741 for (i = 0; i < vdev->no_of_vpath; i++) {
2742 vpath = &vdev->vpaths[i];
2743 vpath->ring.napi_p = &vdev->napi;
2746 for (i = 0; i < vdev->no_of_vpath; i++) {
2747 vpath = &vdev->vpaths[i];
2749 vxge_poll_msix, vdev->config.napi_weight);
2756 if (vdev->config.rth_steering) {
2757 status = vxge_rth_configure(vdev);
2769 for (i = 0; i < vdev->no_of_vpath; i++) {
2770 vpath = &vdev->vpaths[i];
2773 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2782 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2783 vxge_debug_init(vdev->level_trace,
2784 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2785 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2790 if (vdev->all_multi_flg) {
2791 for (i = 0; i < vdev->no_of_vpath; i++) {
2792 vpath = &vdev->vpaths[i];
2812 vxge_hw_mgmt_reg_write(vdev->devh,
2819 vxge_hw_mgmt_reg_write(vdev->devh,
2829 for (i = 0; i < vdev->no_of_vpath; i++) {
2830 vpath = &vdev->vpaths[i];
2836 if (vdev->config.addr_learn_en) {
2845 vxge_hw_device_setpause_data(vdev->devh, 0,
2846 vdev->config.tx_pause_enable,
2847 vdev->config.rx_pause_enable);
2849 if (vdev->vp_reset_timer.function == NULL)
2850 vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset,
2854 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2855 vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup,
2858 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2862 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2863 netif_carrier_on(vdev->ndev);
2864 netdev_notice(vdev->ndev, "Link Up\n");
2865 vdev->stats.link_up++;
2868 vxge_hw_device_intr_enable(vdev->devh);
2872 for (i = 0; i < vdev->no_of_vpath; i++) {
2873 vpath = &vdev->vpaths[i];
2880 netif_tx_start_all_queues(vdev->ndev);
2883 vxge_config_ci_for_tti_rti(vdev);
2888 vxge_rem_isr(vdev);
2891 if (vdev->config.intr_type != MSI_X)
2892 napi_disable(&vdev->napi);
2894 for (i = 0; i < vdev->no_of_vpath; i++)
2895 napi_disable(&vdev->vpaths[i].ring.napi);
2899 vxge_close_vpaths(vdev, 0);
2921 static void vxge_napi_del_all(struct vxgedev *vdev)
2924 if (vdev->config.intr_type != MSI_X)
2925 netif_napi_del(&vdev->napi);
2927 for (i = 0; i < vdev->no_of_vpath; i++)
2928 netif_napi_del(&vdev->vpaths[i].ring.napi);
2935 struct vxgedev *vdev;
2942 vdev = netdev_priv(dev);
2943 hldev = pci_get_drvdata(vdev->pdev);
2945 if (unlikely(!is_vxge_card_up(vdev)))
2950 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2955 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2956 status = vxge_hw_mgmt_reg_read(vdev->devh,
2965 status = vxge_hw_mgmt_reg_write(vdev->devh,
2975 vxge_hw_mgmt_reg_write(vdev->devh,
2982 vxge_hw_mgmt_reg_write(vdev->devh,
2992 if (vdev->titan1)
2993 del_timer_sync(&vdev->vp_lockup_timer);
2995 del_timer_sync(&vdev->vp_reset_timer);
3000 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3003 if (vdev->config.intr_type != MSI_X)
3004 napi_disable(&vdev->napi);
3006 for (i = 0; i < vdev->no_of_vpath; i++)
3007 napi_disable(&vdev->vpaths[i].ring.napi);
3010 netif_carrier_off(vdev->ndev);
3011 netdev_notice(vdev->ndev, "Link Down\n");
3012 netif_tx_stop_all_queues(vdev->ndev);
3016 vxge_hw_device_intr_disable(vdev->devh);
3018 vxge_rem_isr(vdev);
3020 vxge_napi_del_all(vdev);
3023 vxge_reset_all_vpaths(vdev);
3025 vxge_close_vpaths(vdev, 0);
3030 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3062 struct vxgedev *vdev = netdev_priv(dev);
3064 vxge_debug_entryexit(vdev->level_trace,
3068 if (unlikely(!is_vxge_card_up(vdev))) {
3071 vxge_debug_init(vdev->level_err,
3076 vxge_debug_init(vdev->level_trace,
3083 vdev->mtu = new_mtu;
3088 vxge_debug_init(vdev->level_trace,
3089 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3091 vxge_debug_entryexit(vdev->level_trace,
3106 struct vxgedev *vdev = netdev_priv(dev);
3110 for (k = 0; k < vdev->no_of_vpath; k++) {
3111 struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3112 struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3169 static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
3192 vdev->rx_hwts = 0;
3211 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3214 vdev->rx_hwts = 1;
3222 for (i = 0; i < vdev->no_of_vpath; i++)
3223 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3231 static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
3237 config.rx_filter = (vdev->rx_hwts ?
3258 struct vxgedev *vdev = netdev_priv(dev);
3262 return vxge_hwtstamp_set(vdev, rq->ifr_data);
3264 return vxge_hwtstamp_get(vdev, rq->ifr_data);
3281 struct vxgedev *vdev;
3285 vdev = netdev_priv(dev);
3287 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3289 schedule_work(&vdev->reset_task);
3305 struct vxgedev *vdev = netdev_priv(dev);
3310 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3311 vpath = &vdev->vpaths[vp_id];
3316 set_bit(vid, vdev->active_vlans);
3331 struct vxgedev *vdev = netdev_priv(dev);
3338 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3339 vpath = &vdev->vpaths[vp_id];
3346 clear_bit(vid, vdev->active_vlans);
3376 struct vxgedev *vdev;
3399 vdev = netdev_priv(ndev);
3400 memset(vdev, 0, sizeof(struct vxgedev));
3402 vdev->ndev = ndev;
3403 vdev->devh = hldev;
3404 vdev->pdev = hldev->pdev;
3405 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3406 vdev->rx_hwts = 0;
3407 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3409 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3415 if (vdev->config.rth_steering != NO_STEERING)
3425 INIT_WORK(&vdev->reset_task, vxge_reset);
3430 vdev->vpaths = kcalloc(no_of_vpath, sizeof(struct vxge_vpath),
3432 if (!vdev->vpaths) {
3435 vdev->ndev->name);
3475 *vdev_out = vdev;
3497 kfree(vdev->vpaths);
3511 struct vxgedev *vdev;
3516 vdev = netdev_priv(dev);
3518 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3523 flush_work(&vdev->reset_task);
3528 kfree(vdev->vpaths);
3530 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3532 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3550 struct vxgedev *vdev = netdev_priv(dev);
3554 vxge_debug_entryexit(vdev->level_trace,
3555 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3560 vdev->cric_err_event = type;
3562 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3563 vpath = &vdev->vpaths[vpath_idx];
3568 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3571 "%s: Slot is frozen", vdev->ndev->name);
3575 vdev->ndev->name);
3579 vdev->ndev->name);
3584 if (unlikely(vdev->exec_mode))
3585 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3588 if (unlikely(vdev->exec_mode))
3589 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3593 if (unlikely(vdev->exec_mode))
3594 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3597 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3600 vxge_vpath_intr_disable(vdev, vpath_idx);
3608 vxge_debug_entryexit(vdev->level_trace,
3610 vdev->ndev->name, __func__, __LINE__);
3912 static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3918 vdev->ndev->name, vdev->no_of_vpath);
3920 switch (vdev->config.intr_type) {
3923 "%s: Interrupt type INTA", vdev->ndev->name);
3928 "%s: Interrupt type MSI-X", vdev->ndev->name);
3932 if (vdev->config.rth_steering) {
3935 vdev->ndev->name);
3938 "%s: RTH steering disabled", vdev->ndev->name);
3941 switch (vdev->config.tx_steering_type) {
3944 "%s: Tx steering disabled", vdev->ndev->name);
3949 vdev->ndev->name);
3951 "%s: Tx steering disabled", vdev->ndev->name);
3952 vdev->config.tx_steering_type = 0;
3957 vdev->ndev->name);
3959 "%s: Tx steering disabled", vdev->ndev->name);
3960 vdev->config.tx_steering_type = 0;
3965 vdev->ndev->name);
3970 vdev->ndev->name);
3975 vdev->ndev->name);
3977 "%s: Tx steering disabled", vdev->ndev->name);
3978 vdev->config.tx_steering_type = 0;
3981 if (vdev->config.addr_learn_en)
3983 "%s: MAC Address learning enabled", vdev->ndev->name);
3989 "%s: MTU size - %d", vdev->ndev->name,
3990 ((vdev->devh))->
3993 "%s: VLAN tag stripping %s", vdev->ndev->name,
3994 ((vdev->devh))->
3998 "%s: Max frags : %d", vdev->ndev->name,
3999 ((vdev->devh))->
4067 struct vxgedev *vdev = netdev_priv(netdev);
4075 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
4133 int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4135 struct __vxge_hw_device *hldev = vdev->devh;
4141 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4168 cmaj = vdev->config.device_hw_info.fw_version.major;
4169 cmin = vdev->config.device_hw_info.fw_version.minor;
4170 cbld = vdev->config.device_hw_info.fw_version.build;
4202 static int vxge_probe_fw_update(struct vxgedev *vdev)
4208 maj = vdev->config.device_hw_info.fw_version.major;
4209 min = vdev->config.device_hw_info.fw_version.minor;
4210 bld = vdev->config.device_hw_info.fw_version.build;
4238 if (vdev->devh->eprom_versions[i]) {
4248 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4307 struct vxgedev *vdev;
4579 &vdev);
4585 ret = vxge_probe_fw_update(vdev);
4590 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4594 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4595 vdev->bar0 = attr.bar0;
4596 vdev->max_vpath_supported = max_vpath_supported;
4597 vdev->no_of_vpath = no_of_vpath;
4603 if (j >= vdev->no_of_vpath)
4606 vdev->vpaths[j].is_configured = 1;
4607 vdev->vpaths[j].device_id = i;
4608 vdev->vpaths[j].ring.driver_id = j;
4609 vdev->vpaths[j].vdev = vdev;
4610 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4611 memcpy((u8 *)vdev->vpaths[j].macaddr,
4616 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4618 vdev->vpaths[j].mac_addr_cnt = 0;
4619 vdev->vpaths[j].mcast_addr_cnt = 0;
4622 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4623 vdev->max_config_port = max_config_port;
4625 vdev->vlan_tag_strip = vlan_tag_strip;
4628 for (i = 0; i < vdev->no_of_vpath; i++)
4629 vdev->vpath_selector[i] = vpath_selector[i];
4631 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4638 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4641 vdev->ndev->name, ll_config->device_hw_info.part_number);
4644 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4647 vdev->ndev->name, macaddr);
4650 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4653 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4661 "%s: Single Function Mode Enabled", vdev->ndev->name);
4665 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4669 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4673 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4678 vxge_print_parm(vdev, vpath_mask);
4681 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4682 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4685 for (i = 0; i < vdev->no_of_vpath; i++) {
4690 vdev->ndev->name);
4695 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4696 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4697 vdev->vpaths[i].mac_addr_cnt = 1;
4719 if (vdev->config.intr_type == INTA)
4723 vdev->ndev->name, __func__, __LINE__);
4726 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4733 for (i = 0; i < vdev->no_of_vpath; i++)
4734 vxge_free_mac_add_list(&vdev->vpaths[i]);
4763 struct vxgedev *vdev;
4770 vdev = netdev_priv(hldev->ndev);
4772 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4773 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4776 for (i = 0; i < vdev->no_of_vpath; i++)
4777 vxge_free_mac_add_list(&vdev->vpaths[i]);
4782 iounmap(vdev->bar0);
4788 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4790 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,