Lines Matching defs:port

7  * Ethernet port config (0x00 is not present on IXP42X):
9 * logical port 0x00 0x10 0x20
14 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
19 * bits 3 -> 4 - port ID (user-set?)
184 struct port {
195 int id; /* logical port ID */
251 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
253 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
255 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
257 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
273 static struct port *npe_port_tab[MAX_NPES];
301 static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
309 if (!port->hwts_rx_en)
312 ch = PORT2CHANNEL(port);
314 regs = port->timesync_regs;
343 static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
352 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
357 ch = PORT2CHANNEL(port);
359 regs = port->timesync_regs;
393 struct port *port = netdev_priv(netdev);
400 ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
404 ch = PORT2CHANNEL(port);
405 regs = port->timesync_regs;
412 port->hwts_rx_en = 0;
415 port->hwts_rx_en = PTP_SLAVE_MODE;
419 port->hwts_rx_en = PTP_MASTER_MODE;
426 port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
438 struct port *port = netdev_priv(netdev);
441 cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
443 switch (port->hwts_rx_en) {
572 struct port *port = netdev_priv(dev);
576 if (port->speed) {
577 port->speed = 0;
583 if (port->speed == phydev->speed && port->duplex == phydev->duplex)
586 port->speed = phydev->speed;
587 port->duplex = phydev->duplex;
589 if (port->duplex)
591 &port->regs->tx_control[0]);
594 &port->regs->tx_control[0]);
597 dev->name, port->speed, port->duplex ? "full" : "half");
635 static inline int queue_get_desc(unsigned int queue, struct port *port,
645 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
646 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
665 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
668 dma_unmap_single(&port->netdev->dev, desc->data,
671 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
681 struct port *port = netdev_priv(dev);
686 qmgr_disable_irq(port->plat->rxq);
687 napi_schedule(&port->napi);
692 struct port *port = container_of(napi, struct port, napi);
693 struct net_device *dev = port->netdev;
694 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
710 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
730 desc = rx_desc_ptr(port, n);
751 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
758 skb = port->rx_buff_tab[n];
764 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
772 ixp_rx_timestamp(port, skb);
780 port->rx_buff_tab[n] = temp;
785 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
805 struct port *port;
811 port = npe_port_tab[npe_id];
812 BUG_ON(!port);
814 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
816 desc = tx_desc_ptr(port, n_desc);
819 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
820 port->netdev->stats.tx_packets++;
821 port->netdev->stats.tx_bytes += desc->pkt_len;
823 dma_unmap_tx(port, desc);
826 port->netdev->name, port->tx_buff_tab[n_desc]);
828 free_buffer_irq(port->tx_buff_tab[n_desc]);
829 port->tx_buff_tab[n_desc] = NULL;
832 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
833 queue_put_desc(port->plat->txreadyq, phys, desc);
837 port->netdev->name);
839 netif_wake_queue(port->netdev);
846 struct port *port = netdev_priv(dev);
847 unsigned int txreadyq = port->plat->txreadyq;
891 n = queue_get_desc(txreadyq, port, 1);
893 desc = tx_desc_ptr(port, n);
896 port->tx_buff_tab[n] = skb;
898 port->tx_buff_tab[n] = mem;
905 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
926 ixp_tx_timestamp(port, skb);
938 struct port *port = netdev_priv(dev);
946 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
947 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
950 &port->regs->rx_control[0]);
956 &port->regs->rx_control[0]);
971 __raw_writel(addr[i], &port->regs->mcast_addr[i]);
972 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
976 &port->regs->rx_control[0]);
1000 struct port *port = netdev_priv(dev);
1004 port->firmware[0], port->firmware[1],
1005 port->firmware[2], port->firmware[3]);
1012 struct port *port = netdev_priv(dev);
1014 if (port->phc_index < 0)
1015 ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
1017 info->phc_index = port->phc_index;
1050 static int request_queues(struct port *port)
1054 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
1055 "%s:RX-free", port->netdev->name);
1059 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
1060 "%s:RX", port->netdev->name);
1064 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
1065 "%s:TX", port->netdev->name);
1069 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
1070 "%s:TX-ready", port->netdev->name);
1084 qmgr_release_queue(port->plat->txreadyq);
1086 qmgr_release_queue(TX_QUEUE(port->id));
1088 qmgr_release_queue(port->plat->rxq);
1090 qmgr_release_queue(RXFREE_QUEUE(port->id));
1092 port->netdev->name);
1096 static void release_queues(struct port *port)
1098 qmgr_release_queue(RXFREE_QUEUE(port->id));
1099 qmgr_release_queue(port->plat->rxq);
1100 qmgr_release_queue(TX_QUEUE(port->id));
1101 qmgr_release_queue(port->plat->txreadyq);
1107 static int init_queues(struct port *port)
1112 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
1118 port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys);
1119 if (!port->desc_tab)
1121 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
1122 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
1126 struct desc *desc = rx_desc_ptr(port, i);
1130 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
1139 desc->data = dma_map_single(&port->netdev->dev, data,
1141 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
1146 port->rx_buff_tab[i] = buff;
1152 static void destroy_queues(struct port *port)
1156 if (port->desc_tab) {
1158 struct desc *desc = rx_desc_ptr(port, i);
1159 buffer_t *buff = port->rx_buff_tab[i];
1161 dma_unmap_single(&port->netdev->dev,
1168 struct desc *desc = tx_desc_ptr(port, i);
1169 buffer_t *buff = port->tx_buff_tab[i];
1171 dma_unmap_tx(port, desc);
1175 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1176 port->desc_tab = NULL;
1187 struct port *port = netdev_priv(dev);
1188 struct npe *npe = port->npe;
1201 port->firmware[0] = msg.byte4;
1202 port->firmware[1] = msg.byte5;
1203 port->firmware[2] = msg.byte6;
1204 port->firmware[3] = msg.byte7;
1209 msg.eth_id = port->id;
1210 msg.byte5 = port->plat->rxq | 0x80;
1211 msg.byte7 = port->plat->rxq << 4;
1214 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
1219 msg.eth_id = PHYSICAL_ID(port->id);
1226 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
1231 msg.eth_id = port->id;
1232 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
1235 if ((err = request_queues(port)) != 0)
1238 if ((err = init_queues(port)) != 0) {
1239 destroy_queues(port);
1240 release_queues(port);
1244 port->speed = 0; /* force "link up" message */
1248 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
1249 __raw_writel(0x08, &port->regs->random_seed);
1250 __raw_writel(0x12, &port->regs->partial_empty_threshold);
1251 __raw_writel(0x30, &port->regs->partial_full_threshold);
1252 __raw_writel(0x08, &port->regs->tx_start_bytes);
1253 __raw_writel(0x15, &port->regs->tx_deferral);
1254 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
1255 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
1256 __raw_writel(0x80, &port->regs->slot_time);
1257 __raw_writel(0x01, &port->regs->int_clock_threshold);
1261 queue_put_desc(port->plat->txreadyq,
1262 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1265 queue_put_desc(RXFREE_QUEUE(port->id),
1266 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1268 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
1269 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
1270 __raw_writel(0, &port->regs->rx_control[1]);
1271 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1273 napi_enable(&port->napi);
1277 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1286 napi_schedule(&port->napi);
1292 struct port *port = netdev_priv(dev);
1298 qmgr_disable_irq(port->plat->rxq);
1299 napi_disable(&port->napi);
1302 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
1307 msg.eth_id = port->id;
1309 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
1314 while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
1318 if (qmgr_stat_empty(TX_QUEUE(port->id))) {
1322 int n = queue_get_desc(port->plat->txreadyq, port, 1);
1324 desc = tx_desc_ptr(port, n);
1325 phys = tx_desc_phys(port, n);
1328 queue_put_desc(TX_QUEUE(port->id), phys, desc);
1342 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
1347 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1362 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
1369 destroy_queues(port);
1370 release_queues(port);
1449 struct port *port;
1456 if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port))))
1460 port = netdev_priv(ndev);
1461 port->netdev = ndev;
1462 port->id = plat->npe;
1463 port->phc_index = -1;
1465 /* Get the port resource and remap */
1466 port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
1467 if (IS_ERR(port->regs))
1468 return PTR_ERR(port->regs);
1472 err = ixp4xx_mdio_register(port->regs);
1491 netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
1493 if (!(port->npe = npe_request(NPE_ID(port->id))))
1496 port->plat = plat;
1497 npe_port_tab[NPE_ID(port->id)] = port;
1506 &port->regs->core_control);
1508 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1524 npe_name(port->npe));
1531 npe_port_tab[NPE_ID(port->id)] = NULL;
1532 npe_release(port->npe);
1540 struct port *port = netdev_priv(ndev);
1545 npe_port_tab[NPE_ID(port->id)] = NULL;
1546 npe_release(port->npe);