Lines Matching refs:ss
197 struct myri10ge_slice_state *ss;
915 struct myri10ge_slice_state *ss;
943 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
1000 ss = &mgp->ss[i];
1001 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
1002 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
1011 ss = &mgp->ss[i];
1012 ss->irq_claim =
1032 ss = &mgp->ss[i];
1034 ss->dca_tag = (__iomem __be32 *)
1037 ss->dca_tag = NULL;
1046 ss = &mgp->ss[i];
1048 memset(ss->rx_done.entry, 0, bytes);
1049 ss->tx.req = 0;
1050 ss->tx.done = 0;
1051 ss->tx.pkt_start = 0;
1052 ss->tx.pkt_done = 0;
1053 ss->rx_big.cnt = 0;
1054 ss->rx_small.cnt = 0;
1055 ss->rx_done.idx = 0;
1056 ss->rx_done.cnt = 0;
1057 ss->tx.wake_queue = 0;
1058 ss->tx.stop_queue = 0;
1085 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1087 ss->cached_dca_tag = tag;
1088 put_be32(htonl(tag), ss->dca_tag);
1091 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1096 if (cpu != ss->cpu) {
1097 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
1098 if (ss->cached_dca_tag != tag)
1099 myri10ge_write_dca(ss, cpu, tag);
1100 ss->cpu = cpu;
1110 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
1126 mgp->ss[i].cpu = -1;
1127 mgp->ss[i].cached_dca_tag = -1;
1128 myri10ge_update_dca(&mgp->ss[i]);
1305 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1307 struct myri10ge_priv *mgp = ss->mgp;
1317 rx = &ss->rx_small;
1320 rx = &ss->rx_big;
1329 skb = napi_get_frags(&ss->napi);
1331 ss->stats.rx_dropped++;
1367 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1369 napi_gro_frags(&ss->napi);
1375 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1377 struct pci_dev *pdev = ss->mgp->pdev;
1378 struct myri10ge_tx_buf *tx = &ss->tx;
1397 ss->stats.tx_bytes += skb->len;
1398 ss->stats.tx_packets++;
1414 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
1424 if ((ss->mgp->dev->real_num_tx_queues > 1) &&
1437 ss->mgp->running == MYRI10GE_ETH_RUNNING) {
1444 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1446 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1447 struct myri10ge_priv *mgp = ss->mgp;
1461 rx_ok = myri10ge_rx_done(ss, length, checksum);
1470 ss->stats.rx_packets += rx_packets;
1471 ss->stats.rx_bytes += rx_bytes;
1474 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
1475 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
1477 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
1478 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1485 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1520 struct myri10ge_slice_state *ss =
1525 if (ss->mgp->dca_enabled)
1526 myri10ge_update_dca(ss);
1529 work_done = myri10ge_clean_rx_done(ss, budget);
1533 put_be32(htonl(3), ss->irq_claim);
1540 struct myri10ge_slice_state *ss = arg;
1541 struct myri10ge_priv *mgp = ss->mgp;
1542 struct mcp_irq_data *stats = ss->fw_stats;
1543 struct myri10ge_tx_buf *tx = &ss->tx;
1549 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1550 napi_schedule(&ss->napi);
1561 napi_schedule(&ss->napi);
1578 myri10ge_tx_done(ss, (int)send_done_count);
1591 if (ss == mgp->ss)
1594 put_be32(htonl(3), ss->irq_claim + 1);
1705 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1706 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1708 ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
1786 struct myri10ge_slice_state *ss;
1807 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1813 ss = &mgp->ss[0];
1814 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1815 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1817 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
1818 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
1819 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
1820 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
1821 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
1823 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
1824 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
1825 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
1826 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1827 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1830 ss = &mgp->ss[slice];
1832 data[i++] = (unsigned int)ss->tx.pkt_start;
1833 data[i++] = (unsigned int)ss->tx.pkt_done;
1834 data[i++] = (unsigned int)ss->tx.req;
1835 data[i++] = (unsigned int)ss->tx.done;
1836 data[i++] = (unsigned int)ss->rx_small.cnt;
1837 data[i++] = (unsigned int)ss->rx_big.cnt;
1838 data[i++] = (unsigned int)ss->tx.wake_queue;
1839 data[i++] = (unsigned int)ss->tx.stop_queue;
1840 data[i++] = (unsigned int)ss->tx.linearized;
1928 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1930 struct myri10ge_priv *mgp = ss->mgp;
1939 slice = ss - mgp->ss;
1951 ss->tx.mask = tx_ring_entries - 1;
1952 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
1959 * sizeof(*ss->tx.req_list);
1960 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
1961 if (ss->tx.req_bytes == NULL)
1965 ss->tx.req_list = (struct mcp_kreq_ether_send *)
1966 ALIGN((unsigned long)ss->tx.req_bytes, 8);
1967 ss->tx.queue_active = 0;
1969 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
1970 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
1971 if (ss->rx_small.shadow == NULL)
1974 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
1975 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
1976 if (ss->rx_big.shadow == NULL)
1981 bytes = tx_ring_entries * sizeof(*ss->tx.info);
1982 ss->tx.info = kzalloc(bytes, GFP_KERNEL);
1983 if (ss->tx.info == NULL)
1986 bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
1987 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
1988 if (ss->rx_small.info == NULL)
1991 bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
1992 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
1993 if (ss->rx_big.info == NULL)
1997 ss->rx_big.cnt = 0;
1998 ss->rx_small.cnt = 0;
1999 ss->rx_big.fill_cnt = 0;
2000 ss->rx_small.fill_cnt = 0;
2001 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
2002 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
2003 ss->rx_small.watchdog_needed = 0;
2004 ss->rx_big.watchdog_needed = 0;
2006 ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
2008 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
2012 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
2014 slice, ss->rx_small.fill_cnt);
2018 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
2019 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
2021 slice, ss->rx_big.fill_cnt);
2028 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2029 int idx = i & ss->rx_big.mask;
2030 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2032 put_page(ss->rx_big.info[idx].page);
2037 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2038 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2039 int idx = i & ss->rx_small.mask;
2040 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2042 put_page(ss->rx_small.info[idx].page);
2045 kfree(ss->rx_big.info);
2048 kfree(ss->rx_small.info);
2051 kfree(ss->tx.info);
2054 kfree(ss->rx_big.shadow);
2057 kfree(ss->rx_small.shadow);
2060 kfree(ss->tx.req_bytes);
2061 ss->tx.req_bytes = NULL;
2062 ss->tx.req_list = NULL;
2068 static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
2070 struct myri10ge_priv *mgp = ss->mgp;
2076 if (ss->tx.req_list == NULL)
2079 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2080 idx = i & ss->rx_big.mask;
2081 if (i == ss->rx_big.fill_cnt - 1)
2082 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
2083 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2085 put_page(ss->rx_big.info[idx].page);
2089 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2090 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2091 idx = i & ss->rx_small.mask;
2092 if (i == ss->rx_small.fill_cnt - 1)
2093 ss->rx_small.info[idx].page_offset =
2095 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2097 put_page(ss->rx_small.info[idx].page);
2099 tx = &ss->tx;
2110 ss->stats.tx_dropped++;
2125 kfree(ss->rx_big.info);
2127 kfree(ss->rx_small.info);
2129 kfree(ss->tx.info);
2131 kfree(ss->rx_big.shadow);
2133 kfree(ss->rx_small.shadow);
2135 kfree(ss->tx.req_bytes);
2136 ss->tx.req_bytes = NULL;
2137 ss->tx.req_list = NULL;
2143 struct myri10ge_slice_state *ss;
2175 ss = &mgp->ss[i];
2176 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
2179 myri10ge_intr, 0, ss->irq_desc,
2180 ss);
2187 &mgp->ss[i]);
2196 mgp->dev->name, &mgp->ss[0]);
2213 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2215 free_irq(pdev->irq, &mgp->ss[0]);
2226 struct myri10ge_slice_state *ss;
2229 ss = &mgp->ss[slice];
2235 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
2241 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
2246 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
2249 ss->tx.send_go = (__iomem __be32 *)
2251 ss->tx.send_stop = (__iomem __be32 *)
2260 struct myri10ge_slice_state *ss;
2263 ss = &mgp->ss[slice];
2264 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
2265 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
2269 dma_addr_t bus = ss->fw_stats_bus;
2288 struct myri10ge_slice_state *ss;
2382 ss = &mgp->ss[slice];
2389 status = myri10ge_allocate_rings(ss);
2404 napi_enable(&(ss)->napi);
2452 napi_disable(&mgp->ss[slice].napi);
2455 myri10ge_free_rings(&mgp->ss[i]);
2474 if (mgp->ss[0].tx.req_bytes == NULL)
2480 napi_disable(&mgp->ss[i].napi);
2501 myri10ge_free_rings(&mgp->ss[i]);
2617 struct myri10ge_slice_state *ss;
2632 ss = &mgp->ss[queue];
2634 tx = &ss->tx;
2711 ss->stats.tx_dropped += 1;
2876 ss->stats.tx_dropped += 1;
2886 struct myri10ge_slice_state *ss;
2909 ss = &mgp->ss[skb_get_queue_mapping(skb)];
2911 ss->stats.tx_dropped += 1;
2923 slice_stats = &mgp->ss[i].stats;
3334 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
3337 struct myri10ge_priv *mgp = ss->mgp;
3338 int slice = ss - mgp->ss;
3340 if (ss->tx.req != ss->tx.done &&
3341 ss->tx.done == ss->watchdog_tx_done &&
3342 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3351 slice, ss->tx.queue_active, ss->tx.req,
3352 ss->tx.done, ss->tx.pkt_start,
3353 ss->tx.pkt_done,
3354 (int)ntohl(mgp->ss[slice].fw_stats->
3357 ss->stuck = 1;
3360 if (ss->watchdog_tx_done != ss->tx.done ||
3361 ss->watchdog_rx_done != ss->rx_done.cnt) {
3364 ss->watchdog_tx_done = ss->tx.done;
3365 ss->watchdog_tx_req = ss->tx.req;
3366 ss->watchdog_rx_done = ss->rx_done.cnt;
3377 struct myri10ge_slice_state *ss;
3428 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3430 ss = mgp->ss;
3431 if (ss->stuck) {
3432 myri10ge_check_slice(ss, &reset_needed,
3435 ss->stuck = 0;
3468 struct myri10ge_slice_state *ss;
3475 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3480 ss = &mgp->ss[i];
3481 if (ss->rx_small.watchdog_needed) {
3482 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3485 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3487 ss->rx_small.watchdog_needed = 0;
3489 if (ss->rx_big.watchdog_needed) {
3490 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3492 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3494 ss->rx_big.watchdog_needed = 0;
3496 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
3521 struct myri10ge_slice_state *ss;
3526 if (mgp->ss == NULL)
3530 ss = &mgp->ss[i];
3531 if (ss->rx_done.entry != NULL) {
3533 sizeof(*ss->rx_done.entry);
3535 ss->rx_done.entry, ss->rx_done.bus);
3536 ss->rx_done.entry = NULL;
3538 if (ss->fw_stats != NULL) {
3539 bytes = sizeof(*ss->fw_stats);
3541 ss->fw_stats, ss->fw_stats_bus);
3542 ss->fw_stats = NULL;
3544 __netif_napi_del(&ss->napi);
3546 /* Wait till napi structs are no longer used, and then free ss. */
3548 kfree(mgp->ss);
3549 mgp->ss = NULL;
3554 struct myri10ge_slice_state *ss;
3559 bytes = sizeof(*mgp->ss) * mgp->num_slices;
3560 mgp->ss = kzalloc(bytes, GFP_KERNEL);
3561 if (mgp->ss == NULL) {
3566 ss = &mgp->ss[i];
3567 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3568 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3569 &ss->rx_done.bus,
3571 if (ss->rx_done.entry == NULL)
3573 bytes = sizeof(*ss->fw_stats);
3574 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3575 &ss->fw_stats_bus,
3577 if (ss->fw_stats == NULL)
3579 ss->mgp = mgp;
3580 ss->dev = mgp->dev;
3581 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,