Lines Matching defs:ring
44 * RX DATA: the rx completion ring has all the info, but the rx desc
45 * ring has all of the data. RX can conceivably come in under multiple
270 static void cas_disable_irq(struct cas *cp, const int ring)
273 if (ring == 0) {
280 switch (ring) {
292 cp->regs + REG_PLUS_INTRN_MASK(ring));
297 REG_PLUS_INTRN_MASK(ring));
311 static void cas_enable_irq(struct cas *cp, const int ring)
313 if (ring == 0) { /* all but TX_DONE */
319 switch (ring) {
331 REG_PLUS_INTRN_MASK(ring));
1343 /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1364 /* this needs to be changed if we actually use the ENC RX DESC ring */
1365 static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1385 /* only clean ring 0 as ring 1 is used for spare buffers */
1823 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1830 spin_lock(&cp->tx_lock[ring]);
1831 txds = cp->init_txds[ring];
1832 skbs = cp->tx_skbs[ring];
1833 entry = cp->tx_old[ring];
1835 count = TX_BUFF_COUNT(ring, entry, limit);
1844 entry = TX_DESC_NEXT(ring, entry);
1850 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1855 "tx[%d] done, slot %d\n", ring, entry);
1858 cp->tx_tiny_use[ring][entry].nbufs = 0;
1868 entry = TX_DESC_NEXT(ring, entry);
1871 if (cp->tx_tiny_use[ring][entry].used) {
1872 cp->tx_tiny_use[ring][entry].used = 0;
1873 entry = TX_DESC_NEXT(ring, entry);
1877 spin_lock(&cp->stat_lock[ring]);
1878 cp->net_stats[ring].tx_packets++;
1879 cp->net_stats[ring].tx_bytes += skb->len;
1880 spin_unlock(&cp->stat_lock[ring]);
1883 cp->tx_old[ring] = entry;
1890 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1892 spin_unlock(&cp->tx_lock[ring]);
1898 int limit, ring;
1906 for (ring = 0; ring < N_TX_RINGS; ring++) {
1913 limit = readl(cp->regs + REG_TX_COMPN(ring));
1915 if (cp->tx_old[ring] != limit)
1916 cas_tx_ringN(cp, ring, limit);
2162 /* put rx descriptor back on ring. if a buffer is in use by a higher
2165 static void cas_post_page(struct cas *cp, const int ring, const int index)
2170 entry = cp->rx_old[ring];
2172 new = cas_page_swap(cp, ring, index);
2173 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2174 cp->init_rxds[ring][entry].index =
2176 CAS_BASE(RX_INDEX_RING, ring));
2178 entry = RX_DESC_ENTRY(ring, entry + 1);
2179 cp->rx_old[ring] = entry;
2184 if (ring == 0)
2193 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2197 cas_page_t **page = cp->rx_pages[ring];
2199 entry = cp->rx_old[ring];
2202 "rxd[%d] interrupt, done: %d\n", ring, entry);
2206 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2216 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2220 cp->rx_old[ring] = entry;
2221 cp->rx_last[ring] = num ? num - released : 0;
2227 cp->init_rxds[ring][entry].buffer =
2238 entry = RX_DESC_ENTRY(ring, entry + 1);
2240 cp->rx_old[ring] = entry;
2245 if (ring == 0)
2254 /* process a completion ring. packets are set up in three basic ways:
2264 * force serialization on the single descriptor ring.
2266 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2268 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2274 ring,
2275 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2277 entry = cp->rx_new[ring];
2303 spin_lock(&cp->stat_lock[ring]);
2304 cp->net_stats[ring].rx_errors++;
2306 cp->net_stats[ring].rx_length_errors++;
2308 cp->net_stats[ring].rx_crc_errors++;
2309 spin_unlock(&cp->stat_lock[ring]);
2313 spin_lock(&cp->stat_lock[ring]);
2314 ++cp->net_stats[ring].rx_dropped;
2315 spin_unlock(&cp->stat_lock[ring]);
2335 spin_lock(&cp->stat_lock[ring]);
2336 cp->net_stats[ring].rx_packets++;
2337 cp->net_stats[ring].rx_bytes += len;
2338 spin_unlock(&cp->stat_lock[ring]);
2366 entry = RX_COMP_ENTRY(ring, entry + 1 +
2373 cp->rx_new[ring] = entry;
2381 /* put completion entries back on the ring */
2383 struct cas *cp, int ring)
2385 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2388 last = cp->rx_cur[ring];
2389 entry = cp->rx_new[ring];
2392 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2397 last = RX_COMP_ENTRY(ring, last + 1);
2399 cp->rx_cur[ring] = last;
2401 if (ring == 0)
2404 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2409 /* cassini can use all four PCI interrupts for the completion ring.
2415 const int ring)
2418 cas_post_rxcs_ringN(dev, cp, ring);
2426 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2427 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2439 cas_rx_ringN(cp, ring, 0);
2445 cas_handle_irqN(dev, cp, status, ring);
2472 /* ring 2 handles a few more events than 3 and 4 */
2575 * ring N_RX_COMP_RING times with a request of
2703 static inline int cas_intme(int ring, int entry)
2706 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2712 static void cas_write_txd(struct cas *cp, int ring, int entry,
2715 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2718 if (cas_intme(ring, entry))
2726 static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2729 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2732 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2735 cp->tx_tiny_use[ring][tentry].nbufs++;
2736 cp->tx_tiny_use[ring][entry].used = 1;
2737 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2740 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2750 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2753 if (TX_BUFFS_AVAIL(cp, ring) <=
2756 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2771 entry = cp->tx_new[ring];
2772 cp->tx_skbs[ring][entry] = skb;
2783 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2785 entry = TX_DESC_NEXT(ring, entry);
2788 tx_tiny_buf(cp, ring, entry), tabort);
2789 mapping = tx_tiny_map(cp, ring, entry, tentry);
2790 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2793 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2796 entry = TX_DESC_NEXT(ring, entry);
2810 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2812 entry = TX_DESC_NEXT(ring, entry);
2815 memcpy(tx_tiny_buf(cp, ring, entry),
2819 mapping = tx_tiny_map(cp, ring, entry, tentry);
2823 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2825 entry = TX_DESC_NEXT(ring, entry);
2828 cp->tx_new[ring] = entry;
2829 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2834 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2835 writel(entry, cp->regs + REG_TX_KICKN(ring));
2836 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2847 static int ring;
2855 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2882 /* write out tx ring info and tx desc bases */
3865 static void cas_clean_txd(struct cas *cp, int ring)
3867 struct cas_tx_desc *txd = cp->init_txds[ring];
3868 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3872 size = TX_DESC_RINGN_SIZE(ring);
3901 if (cp->tx_tiny_use[ring][ent].used)
3909 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3913 static inline void cas_free_rx_desc(struct cas *cp, int ring)
3915 cas_page_t **page = cp->rx_pages[ring];
3918 size = RX_DESC_RINGN_SIZE(ring);
3953 static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3955 cas_page_t **page = cp->rx_pages[ring];
3958 size = RX_DESC_RINGN_SIZE(ring);
3997 /* The link went down, we reset the ring, but keep
4428 /* saved bits that are unique to ring 0 */