Lines Matching refs:bp

164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
166 return ssb_read32(bp->sdev, reg);
169 static inline void bw32(const struct b44 *bp,
172 ssb_write32(bp->sdev, reg, val);
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
181 u32 val = br32(bp, reg);
191 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
199 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
203 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
206 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
208 val = br32(bp, B44_CAM_DATA_LO);
215 val = br32(bp, B44_CAM_DATA_HI);
221 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
229 bw32(bp, B44_CAM_DATA_LO, val);
233 bw32(bp, B44_CAM_DATA_HI, val);
234 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
236 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
239 static inline void __b44_disable_ints(struct b44 *bp)
241 bw32(bp, B44_IMASK, 0);
244 static void b44_disable_ints(struct b44 *bp)
246 __b44_disable_ints(bp);
249 br32(bp, B44_IMASK);
252 static void b44_enable_ints(struct b44 *bp)
254 bw32(bp, B44_IMASK, bp->imask);
257 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
261 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
267 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
273 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
275 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
282 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
285 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
287 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
290 return __b44_readphy(bp, bp->phy_addr, reg, val);
293 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
295 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
298 return __b44_writephy(bp, bp->phy_addr, reg, val);
305 struct b44 *bp = netdev_priv(dev);
306 int rc = __b44_readphy(bp, phy_id, location, &val);
315 struct b44 *bp = netdev_priv(dev);
316 __b44_writephy(bp, phy_id, location, val);
322 struct b44 *bp = bus->priv;
323 int rc = __b44_readphy(bp, phy_id, location, &val);
332 struct b44 *bp = bus->priv;
333 return __b44_writephy(bp, phy_id, location, val);
336 static int b44_phy_reset(struct b44 *bp)
341 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
343 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
347 err = b44_readphy(bp, MII_BMCR, &val);
350 netdev_err(bp->dev, "PHY Reset would not complete\n");
358 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
362 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
363 bp->flags |= pause_flags;
365 val = br32(bp, B44_RXCONFIG);
370 bw32(bp, B44_RXCONFIG, val);
372 val = br32(bp, B44_MAC_FLOW);
378 bw32(bp, B44_MAC_FLOW, val);
381 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
397 __b44_set_flow_ctrl(bp, pause_enab);
402 static void b44_wap54g10_workaround(struct b44 *bp)
416 err = __b44_readphy(bp, 0, MII_BMCR, &val);
422 err = __b44_writephy(bp, 0, MII_BMCR, val);
431 static inline void b44_wap54g10_workaround(struct b44 *bp)
436 static int b44_setup_phy(struct b44 *bp)
441 b44_wap54g10_workaround(bp);
443 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
445 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
447 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
450 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
452 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
456 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
459 if (bp->flags & B44_FLAG_ADV_10HALF)
461 if (bp->flags & B44_FLAG_ADV_10FULL)
463 if (bp->flags & B44_FLAG_ADV_100HALF)
465 if (bp->flags & B44_FLAG_ADV_100FULL)
468 if (bp->flags & B44_FLAG_PAUSE_AUTO)
471 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
473 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
479 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
482 if (bp->flags & B44_FLAG_100_BASE_T)
484 if (bp->flags & B44_FLAG_FULL_DUPLEX)
486 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
493 b44_set_flow_ctrl(bp, 0, 0);
500 static void b44_stats_update(struct b44 *bp)
505 val = &bp->hw_stats.tx_good_octets;
506 u64_stats_update_begin(&bp->hw_stats.syncp);
509 *val++ += br32(bp, reg);
513 *val++ += br32(bp, reg);
516 u64_stats_update_end(&bp->hw_stats.syncp);
519 static void b44_link_report(struct b44 *bp)
521 if (!netif_carrier_ok(bp->dev)) {
522 netdev_info(bp->dev, "Link is down\n");
524 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
525 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
528 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
529 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
530 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
534 static void b44_check_phy(struct b44 *bp)
538 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
539 bp->flags |= B44_FLAG_100_BASE_T;
540 if (!netif_carrier_ok(bp->dev)) {
541 u32 val = br32(bp, B44_TX_CTRL);
542 if (bp->flags & B44_FLAG_FULL_DUPLEX)
546 bw32(bp, B44_TX_CTRL, val);
547 netif_carrier_on(bp->dev);
548 b44_link_report(bp);
553 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
554 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
557 bp->flags |= B44_FLAG_100_BASE_T;
559 bp->flags &= ~B44_FLAG_100_BASE_T;
561 bp->flags |= B44_FLAG_FULL_DUPLEX;
563 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
565 if (!netif_carrier_ok(bp->dev) &&
567 u32 val = br32(bp, B44_TX_CTRL);
570 if (bp->flags & B44_FLAG_FULL_DUPLEX)
574 bw32(bp, B44_TX_CTRL, val);
576 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
577 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
578 !b44_readphy(bp, MII_LPA, &remote_adv))
579 b44_set_flow_ctrl(bp, local_adv, remote_adv);
582 netif_carrier_on(bp->dev);
583 b44_link_report(bp);
584 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
586 netif_carrier_off(bp->dev);
587 b44_link_report(bp);
591 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
593 netdev_warn(bp->dev, "Jabber detected in PHY\n");
599 struct b44 *bp = from_timer(bp, t, timer);
601 spin_lock_irq(&bp->lock);
603 b44_check_phy(bp);
605 b44_stats_update(bp);
607 spin_unlock_irq(&bp->lock);
609 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
612 static void b44_tx(struct b44 *bp)
617 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
621 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
622 struct ring_info *rp = &bp->tx_buffers[cons];
627 dma_unmap_single(bp->sdev->dma_dev,
639 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
640 bp->tx_cons = cons;
641 if (netif_queue_stopped(bp->dev) &&
642 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
643 netif_wake_queue(bp->dev);
645 bw32(bp, B44_GPTIMER, 0);
653 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
665 src_map = &bp->rx_buffers[src_idx];
667 map = &bp->rx_buffers[dest_idx];
668 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
672 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
678 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
681 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
682 dma_unmap_single(bp->sdev->dma_dev, mapping,
688 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
691 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
693 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
694 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
698 bp->force_copybreak = 1;
716 dp = &bp->rx_ring[dest_idx];
718 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
720 if (bp->flags & B44_FLAG_RX_RING_HACK)
721 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
728 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
737 dest_desc = &bp->rx_ring[dest_idx];
738 dest_map = &bp->rx_buffers[dest_idx];
739 src_desc = &bp->rx_ring[src_idx];
740 src_map = &bp->rx_buffers[src_idx];
748 if (bp->flags & B44_FLAG_RX_RING_HACK)
749 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
764 if (bp->flags & B44_FLAG_RX_RING_HACK)
765 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
769 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
774 static int b44_rx(struct b44 *bp, int budget)
780 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
782 cons = bp->rx_cons;
785 struct ring_info *rp = &bp->rx_buffers[cons];
791 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
799 b44_recycle_rx(bp, cons, bp->rx_prod);
801 bp->dev->stats.rx_dropped++;
820 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
822 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
825 dma_unmap_single(bp->sdev->dma_dev, map,
833 b44_recycle_rx(bp, cons, bp->rx_prod);
834 copy_skb = napi_alloc_skb(&bp->napi, len);
845 skb->protocol = eth_type_trans(skb, bp->dev);
850 bp->rx_prod = (bp->rx_prod + 1) &
855 bp->rx_cons = cons;
856 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
863 struct b44 *bp = container_of(napi, struct b44, napi);
867 spin_lock_irqsave(&bp->lock, flags);
869 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
870 /* spin_lock(&bp->tx_lock); */
871 b44_tx(bp);
872 /* spin_unlock(&bp->tx_lock); */
874 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
875 bp->istat &= ~ISTAT_RFO;
876 b44_disable_ints(bp);
877 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
878 b44_init_rings(bp);
879 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
880 netif_wake_queue(bp->dev);
883 spin_unlock_irqrestore(&bp->lock, flags);
886 if (bp->istat & ISTAT_RX)
887 work_done += b44_rx(bp, budget);
889 if (bp->istat & ISTAT_ERRORS) {
890 spin_lock_irqsave(&bp->lock, flags);
891 b44_halt(bp);
892 b44_init_rings(bp);
893 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
894 netif_wake_queue(bp->dev);
895 spin_unlock_irqrestore(&bp->lock, flags);
901 b44_enable_ints(bp);
910 struct b44 *bp = netdev_priv(dev);
914 spin_lock(&bp->lock);
916 istat = br32(bp, B44_ISTAT);
917 imask = br32(bp, B44_IMASK);
932 if (napi_schedule_prep(&bp->napi)) {
936 bp->istat = istat;
937 __b44_disable_ints(bp);
938 __napi_schedule(&bp->napi);
942 bw32(bp, B44_ISTAT, istat);
943 br32(bp, B44_ISTAT);
945 spin_unlock(&bp->lock);
951 struct b44 *bp = netdev_priv(dev);
955 spin_lock_irq(&bp->lock);
957 b44_halt(bp);
958 b44_init_rings(bp);
959 b44_init_hw(bp, B44_FULL_RESET);
961 spin_unlock_irq(&bp->lock);
963 b44_enable_ints(bp);
970 struct b44 *bp = netdev_priv(dev);
977 spin_lock_irqsave(&bp->lock, flags);
980 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
986 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
987 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
991 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
992 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
999 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
1001 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
1002 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
1003 dma_unmap_single(bp->sdev->dma_dev, mapping,
1014 entry = bp->tx_prod;
1015 bp->tx_buffers[entry].skb = skb;
1016 bp->tx_buffers[entry].mapping = mapping;
1023 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1024 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1026 if (bp->flags & B44_FLAG_TX_RING_HACK)
1027 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1028 entry * sizeof(bp->tx_ring[0]),
1033 bp->tx_prod = entry;
1037 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1039 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040 if (bp->flags & B44_FLAG_REORDER_BUG)
1041 br32(bp, B44_DMATX_PTR);
1045 if (TX_BUFFS_AVAIL(bp) < 1)
1049 spin_unlock_irqrestore(&bp->lock, flags);
1060 struct b44 *bp = netdev_priv(dev);
1070 spin_lock_irq(&bp->lock);
1071 b44_halt(bp);
1073 b44_init_rings(bp);
1074 b44_init_hw(bp, B44_FULL_RESET);
1075 spin_unlock_irq(&bp->lock);
1077 b44_enable_ints(bp);
1086 * end up in the driver. bp->lock is not held and we are not
1089 static void b44_free_rings(struct b44 *bp)
1095 rp = &bp->rx_buffers[i];
1099 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1107 rp = &bp->tx_buffers[i];
1111 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1124 static void b44_init_rings(struct b44 *bp)
1128 b44_free_rings(bp);
1130 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1131 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1133 if (bp->flags & B44_FLAG_RX_RING_HACK)
1134 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1137 if (bp->flags & B44_FLAG_TX_RING_HACK)
1138 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1141 for (i = 0; i < bp->rx_pending; i++) {
1142 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1151 static void b44_free_consistent(struct b44 *bp)
1153 kfree(bp->rx_buffers);
1154 bp->rx_buffers = NULL;
1155 kfree(bp->tx_buffers);
1156 bp->tx_buffers = NULL;
1157 if (bp->rx_ring) {
1158 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1159 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1161 kfree(bp->rx_ring);
1163 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1164 bp->rx_ring, bp->rx_ring_dma);
1165 bp->rx_ring = NULL;
1166 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1168 if (bp->tx_ring) {
1169 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1170 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1172 kfree(bp->tx_ring);
1174 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1175 bp->tx_ring, bp->tx_ring_dma);
1176 bp->tx_ring = NULL;
1177 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1185 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1190 bp->rx_buffers = kzalloc(size, gfp);
1191 if (!bp->rx_buffers)
1195 bp->tx_buffers = kzalloc(size, gfp);
1196 if (!bp->tx_buffers)
1200 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1201 &bp->rx_ring_dma, gfp);
1202 if (!bp->rx_ring) {
1213 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1217 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1223 bp->rx_ring = rx_ring;
1224 bp->rx_ring_dma = rx_ring_dma;
1225 bp->flags |= B44_FLAG_RX_RING_HACK;
1228 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1229 &bp->tx_ring_dma, gfp);
1230 if (!bp->tx_ring) {
1241 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1245 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1251 bp->tx_ring = tx_ring;
1252 bp->tx_ring_dma = tx_ring_dma;
1253 bp->flags |= B44_FLAG_TX_RING_HACK;
1259 b44_free_consistent(bp);
1263 /* bp->lock is held. */
1264 static void b44_clear_stats(struct b44 *bp)
1268 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1270 br32(bp, reg);
1272 br32(bp, reg);
1275 /* bp->lock is held. */
1276 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1278 struct ssb_device *sdev = bp->sdev;
1281 was_enabled = ssb_device_is_enabled(bp->sdev);
1283 ssb_device_enable(bp->sdev, 0);
1287 bw32(bp, B44_RCV_LAZY, 0);
1288 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1289 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1290 bw32(bp, B44_DMATX_CTRL, 0);
1291 bp->tx_prod = bp->tx_cons = 0;
1292 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1293 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1296 bw32(bp, B44_DMARX_CTRL, 0);
1297 bp->rx_prod = bp->rx_cons = 0;
1300 b44_clear_stats(bp);
1311 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1317 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1326 br32(bp, B44_MDIO_CTRL);
1328 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1329 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1330 br32(bp, B44_ENET_CTRL);
1331 bp->flags |= B44_FLAG_EXTERNAL_PHY;
1333 u32 val = br32(bp, B44_DEVCTRL);
1336 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1337 br32(bp, B44_DEVCTRL);
1340 bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1344 /* bp->lock is held. */
1345 static void b44_halt(struct b44 *bp)
1347 b44_disable_ints(bp);
1349 b44_phy_reset(bp);
1351 netdev_info(bp->dev, "powering down PHY\n");
1352 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1355 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1356 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1358 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1361 /* bp->lock is held. */
1362 static void __b44_set_mac_addr(struct b44 *bp)
1364 bw32(bp, B44_CAM_CTRL, 0);
1365 if (!(bp->dev->flags & IFF_PROMISC)) {
1368 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1369 val = br32(bp, B44_CAM_CTRL);
1370 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1376 struct b44 *bp = netdev_priv(dev);
1388 spin_lock_irq(&bp->lock);
1390 val = br32(bp, B44_RXCONFIG);
1392 __b44_set_mac_addr(bp);
1394 spin_unlock_irq(&bp->lock);
1400 * packet processing. Invoked with bp->lock held.
1403 static void b44_init_hw(struct b44 *bp, int reset_kind)
1407 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1409 b44_phy_reset(bp);
1410 b44_setup_phy(bp);
1414 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1415 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1418 __b44_set_rx_mode(bp->dev);
1421 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1422 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1424 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1426 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1429 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1430 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1431 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1433 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1435 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1436 bp->rx_prod = bp->rx_pending;
1438 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1441 val = br32(bp, B44_ENET_CTRL);
1442 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1444 netdev_reset_queue(bp->dev);
1449 struct b44 *bp = netdev_priv(dev);
1452 err = b44_alloc_consistent(bp, GFP_KERNEL);
1456 napi_enable(&bp->napi);
1458 b44_init_rings(bp);
1459 b44_init_hw(bp, B44_FULL_RESET);
1461 b44_check_phy(bp);
1465 napi_disable(&bp->napi);
1466 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1467 b44_free_rings(bp);
1468 b44_free_consistent(bp);
1472 timer_setup(&bp->timer, b44_timer, 0);
1473 bp->timer.expires = jiffies + HZ;
1474 add_timer(&bp->timer);
1476 b44_enable_ints(bp);
1478 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1499 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1505 bw32(bp, B44_FILT_ADDR, table_offset + i);
1506 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1542 static void b44_setup_pseudo_magicp(struct b44 *bp)
1556 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1559 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1560 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1565 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1568 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1570 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1576 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1579 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1581 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1588 bw32(bp, B44_WKUP_LEN, val);
1591 val = br32(bp, B44_DEVCTRL);
1592 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1597 static void b44_setup_wol_pci(struct b44 *bp)
1601 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1602 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1603 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1604 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1608 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1611 static void b44_setup_wol(struct b44 *bp)
1615 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1617 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1619 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1621 val = bp->dev->dev_addr[2] << 24 |
1622 bp->dev->dev_addr[3] << 16 |
1623 bp->dev->dev_addr[4] << 8 |
1624 bp->dev->dev_addr[5];
1625 bw32(bp, B44_ADDR_LO, val);
1627 val = bp->dev->dev_addr[0] << 8 |
1628 bp->dev->dev_addr[1];
1629 bw32(bp, B44_ADDR_HI, val);
1631 val = br32(bp, B44_DEVCTRL);
1632 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1635 b44_setup_pseudo_magicp(bp);
1637 b44_setup_wol_pci(bp);
1642 struct b44 *bp = netdev_priv(dev);
1646 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1649 napi_disable(&bp->napi);
1651 del_timer_sync(&bp->timer);
1653 spin_lock_irq(&bp->lock);
1655 b44_halt(bp);
1656 b44_free_rings(bp);
1659 spin_unlock_irq(&bp->lock);
1663 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1664 b44_init_hw(bp, B44_PARTIAL_RESET);
1665 b44_setup_wol(bp);
1668 b44_free_consistent(bp);
1676 struct b44 *bp = netdev_priv(dev);
1677 struct b44_hw_stats *hwstat = &bp->hw_stats;
1719 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1729 __b44_cam_write(bp, ha->addr, i++ + 1);
1736 struct b44 *bp = netdev_priv(dev);
1739 val = br32(bp, B44_RXCONFIG);
1743 bw32(bp, B44_RXCONFIG, val);
1748 __b44_set_mac_addr(bp);
1754 i = __b44_load_mcast(bp, dev);
1757 __b44_cam_write(bp, zero, i);
1759 bw32(bp, B44_RXCONFIG, val);
1760 val = br32(bp, B44_CAM_CTRL);
1761 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1767 struct b44 *bp = netdev_priv(dev);
1769 spin_lock_irq(&bp->lock);
1771 spin_unlock_irq(&bp->lock);
1776 struct b44 *bp = netdev_priv(dev);
1777 return bp->msg_enable;
1782 struct b44 *bp = netdev_priv(dev);
1783 bp->msg_enable = value;
1788 struct b44 *bp = netdev_priv(dev);
1789 struct ssb_bus *bus = bp->sdev->bus;
1808 struct b44 *bp = netdev_priv(dev);
1812 spin_lock_irq(&bp->lock);
1813 b44_readphy(bp, MII_BMCR, &bmcr);
1814 b44_readphy(bp, MII_BMCR, &bmcr);
1817 b44_writephy(bp, MII_BMCR,
1821 spin_unlock_irq(&bp->lock);
1829 struct b44 *bp = netdev_priv(dev);
1832 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1847 if (bp->flags & B44_FLAG_ADV_10HALF)
1849 if (bp->flags & B44_FLAG_ADV_10FULL)
1851 if (bp->flags & B44_FLAG_ADV_100HALF)
1853 if (bp->flags & B44_FLAG_ADV_100FULL)
1856 cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1858 cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1861 cmd->base.phy_address = bp->phy_addr;
1862 cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1883 struct b44 *bp = netdev_priv(dev);
1888 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1890 spin_lock_irq(&bp->lock);
1892 b44_setup_phy(bp);
1896 spin_unlock_irq(&bp->lock);
1919 spin_lock_irq(&bp->lock);
1922 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1930 bp->flags |= (B44_FLAG_ADV_10HALF |
1936 bp->flags |= B44_FLAG_ADV_10HALF;
1938 bp->flags |= B44_FLAG_ADV_10FULL;
1940 bp->flags |= B44_FLAG_ADV_100HALF;
1942 bp->flags |= B44_FLAG_ADV_100FULL;
1945 bp->flags |= B44_FLAG_FORCE_LINK;
1946 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1948 bp->flags |= B44_FLAG_100_BASE_T;
1950 bp->flags |= B44_FLAG_FULL_DUPLEX;
1954 b44_setup_phy(bp);
1956 spin_unlock_irq(&bp->lock);
1964 struct b44 *bp = netdev_priv(dev);
1967 ering->rx_pending = bp->rx_pending;
1975 struct b44 *bp = netdev_priv(dev);
1983 spin_lock_irq(&bp->lock);
1985 bp->rx_pending = ering->rx_pending;
1986 bp->tx_pending = ering->tx_pending;
1988 b44_halt(bp);
1989 b44_init_rings(bp);
1990 b44_init_hw(bp, B44_FULL_RESET);
1991 netif_wake_queue(bp->dev);
1992 spin_unlock_irq(&bp->lock);
1994 b44_enable_ints(bp);
2002 struct b44 *bp = netdev_priv(dev);
2005 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
2007 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
2009 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
2015 struct b44 *bp = netdev_priv(dev);
2017 spin_lock_irq(&bp->lock);
2019 bp->flags |= B44_FLAG_PAUSE_AUTO;
2021 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2023 bp->flags |= B44_FLAG_RX_PAUSE;
2025 bp->flags &= ~B44_FLAG_RX_PAUSE;
2027 bp->flags |= B44_FLAG_TX_PAUSE;
2029 bp->flags &= ~B44_FLAG_TX_PAUSE;
2030 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2031 b44_halt(bp);
2032 b44_init_rings(bp);
2033 b44_init_hw(bp, B44_FULL_RESET);
2035 __b44_set_flow_ctrl(bp, bp->flags);
2037 spin_unlock_irq(&bp->lock);
2039 b44_enable_ints(bp);
2066 struct b44 *bp = netdev_priv(dev);
2067 struct b44_hw_stats *hwstat = &bp->hw_stats;
2072 spin_lock_irq(&bp->lock);
2073 b44_stats_update(bp);
2074 spin_unlock_irq(&bp->lock);
2089 struct b44 *bp = netdev_priv(dev);
2092 if (bp->flags & B44_FLAG_WOL_ENABLE)
2101 struct b44 *bp = netdev_priv(dev);
2103 spin_lock_irq(&bp->lock);
2105 bp->flags |= B44_FLAG_WOL_ENABLE;
2107 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2108 spin_unlock_irq(&bp->lock);
2110 device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2135 struct b44 *bp = netdev_priv(dev);
2141 spin_lock_irq(&bp->lock);
2142 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2146 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2148 spin_unlock_irq(&bp->lock);
2153 static int b44_get_invariants(struct b44 *bp)
2155 struct ssb_device *sdev = bp->sdev;
2159 bp->dma_offset = ssb_dma_translation(sdev);
2164 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2167 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2172 bp->phy_addr &= 0x1F;
2174 memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2176 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2181 bp->imask = IMASK_DEF;
2184 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2187 if (bp->sdev->id.revision >= 7)
2188 bp->flags |= B44_FLAG_B0_ANDLATER;
2211 struct b44 *bp = netdev_priv(dev);
2217 if (bp->old_link != phydev->link) {
2219 bp->old_link = phydev->link;
2225 (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2227 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2229 !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2231 bp->flags |= B44_FLAG_FULL_DUPLEX;
2236 u32 val = br32(bp, B44_TX_CTRL);
2237 if (bp->flags & B44_FLAG_FULL_DUPLEX)
2241 bw32(bp, B44_TX_CTRL, val);
2246 static int b44_register_phy_one(struct b44 *bp)
2250 struct ssb_device *sdev = bp->sdev;
2263 mii_bus->priv = bp;
2268 mii_bus->phy_mask = ~(1 << bp->phy_addr);
2271 bp->mii_bus = mii_bus;
2279 if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2284 bp->phy_addr);
2286 bp->phy_addr = 0;
2288 bp->phy_addr);
2291 bp->phy_addr);
2294 phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2298 bp->phy_addr);
2311 bp->old_link = 0;
2312 bp->phy_addr = phydev->mdio.addr;
2328 static void b44_unregister_phy_one(struct b44 *bp)
2330 struct net_device *dev = bp->dev;
2331 struct mii_bus *mii_bus = bp->mii_bus;
2342 struct b44 *bp;
2347 dev = alloc_etherdev(sizeof(*bp));
2358 bp = netdev_priv(dev);
2359 bp->sdev = sdev;
2360 bp->dev = dev;
2361 bp->force_copybreak = 0;
2363 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2365 spin_lock_init(&bp->lock);
2366 u64_stats_init(&bp->hw_stats.syncp);
2368 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2369 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2372 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2393 err = b44_get_invariants(bp);
2400 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2406 bp->mii_if.dev = dev;
2407 bp->mii_if.mdio_read = b44_mdio_read_mii;
2408 bp->mii_if.mdio_write = b44_mdio_write_mii;
2409 bp->mii_if.phy_id = bp->phy_addr;
2410 bp->mii_if.phy_id_mask = 0x1f;
2411 bp->mii_if.reg_num_mask = 0x1f;
2414 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2418 bp->flags |= B44_FLAG_PAUSE_AUTO;
2433 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2436 err = b44_phy_reset(bp);
2442 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2443 err = b44_register_phy_one(bp);
2461 netif_napi_del(&bp->napi);
2471 struct b44 *bp = netdev_priv(dev);
2474 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2475 b44_unregister_phy_one(bp);
2478 netif_napi_del(&bp->napi);
2487 struct b44 *bp = netdev_priv(dev);
2492 del_timer_sync(&bp->timer);
2494 spin_lock_irq(&bp->lock);
2496 b44_halt(bp);
2497 netif_carrier_off(bp->dev);
2498 netif_device_detach(bp->dev);
2499 b44_free_rings(bp);
2501 spin_unlock_irq(&bp->lock);
2504 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2505 b44_init_hw(bp, B44_PARTIAL_RESET);
2506 b44_setup_wol(bp);
2516 struct b44 *bp = netdev_priv(dev);
2529 spin_lock_irq(&bp->lock);
2530 b44_init_rings(bp);
2531 b44_init_hw(bp, B44_FULL_RESET);
2532 spin_unlock_irq(&bp->lock);
2542 spin_lock_irq(&bp->lock);
2543 b44_halt(bp);
2544 b44_free_rings(bp);
2545 spin_unlock_irq(&bp->lock);
2549 netif_device_attach(bp->dev);
2551 b44_enable_ints(bp);
2554 mod_timer(&bp->timer, jiffies + 1);