Lines Matching refs:port

3  * Intel IXP4xx HSS (synchronous serial port) driver for Linux
202 #define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
254 struct port {
312 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
314 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
316 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
318 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
340 static inline struct port* dev_to_port(struct net_device *dev)
358 static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
361 if (npe_send_message(port->npe, msg, what)) {
363 port->id, val[0], val[1], npe_name(port->npe));
368 static void hss_config_set_lut(struct port *port)
375 msg.hss_port = port->id;
383 hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
386 hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
391 static void hss_config(struct port *port)
397 msg.hss_port = port->id;
401 if (port->clock_type == CLOCK_INT)
403 hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
407 hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
411 msg.hss_port = port->id;
413 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
414 (port->id ? CCR_SECOND_HSS : 0);
415 hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
419 msg.hss_port = port->id;
421 msg.data32 = port->clock_reg;
422 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
426 msg.hss_port = port->id;
430 hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
434 msg.hss_port = port->id;
438 hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
440 hss_config_set_lut(port);
444 msg.hss_port = port->id;
445 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
447 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
448 /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
450 pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port->id);
455 npe_recv_message(port->npe, &msg, "FLUSH_IT");
458 static void hss_set_hdlc_cfg(struct port *port)
464 msg.hss_port = port->id;
465 msg.data8a = port->hdlc_cfg; /* rx_cfg */
466 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
467 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
470 static u32 hss_get_status(struct port *port)
476 msg.hss_port = port->id;
477 hss_npe_send(port, &msg, "PORT_ERROR_READ");
478 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
479 pr_crit("HSS-%i: unable to read HSS status\n", port->id);
486 static void hss_start_hdlc(struct port *port)
492 msg.hss_port = port->id;
494 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
497 static void hss_stop_hdlc(struct port *port)
503 msg.hss_port = port->id;
504 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
505 hss_get_status(port); /* make sure it's halted */
508 static int hss_load_firmware(struct port *port)
513 if (port->initialized)
516 if (!npe_running(port->npe) &&
517 (err = npe_load_firmware(port->npe, npe_name(port->npe),
518 port->dev)))
524 msg.hss_port = port->id;
526 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
530 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
536 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
540 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
544 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
546 port->initialized = 1;
580 static inline int queue_get_desc(unsigned int queue, struct port *port,
590 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
591 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
610 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
613 dma_unmap_single(&port->netdev->dev, desc->data,
616 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
626 struct port *port = dev_to_port(netdev);
630 port->carrier = carrier;
631 if (!port->loopback) {
643 struct port *port = dev_to_port(dev);
648 qmgr_disable_irq(queue_ids[port->id].rx);
649 napi_schedule(&port->napi);
654 struct port *port = container_of(napi, struct port, napi);
655 struct net_device *dev = port->netdev;
656 unsigned int rxq = queue_ids[port->id].rx;
657 unsigned int rxfreeq = queue_ids[port->id].rxfree;
673 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
697 desc = rx_desc_ptr(port, n);
746 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
753 skb = port->rx_buff_tab[n];
759 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
773 port->rx_buff_tab[n] = temp;
778 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
791 struct port *port = dev_to_port(dev);
797 while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
798 port, 1)) >= 0) {
802 desc = tx_desc_ptr(port, n_desc);
807 dma_unmap_tx(port, desc);
810 dev->name, port->tx_buff_tab[n_desc]);
812 free_buffer_irq(port->tx_buff_tab[n_desc]);
813 port->tx_buff_tab[n_desc] = NULL;
815 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
816 queue_put_desc(port->plat->txreadyq,
817 tx_desc_phys(port, n_desc), desc);
830 struct port *port = dev_to_port(dev);
831 unsigned int txreadyq = port->plat->txreadyq;
877 n = queue_get_desc(txreadyq, port, 1);
879 desc = tx_desc_ptr(port, n);
882 port->tx_buff_tab[n] = skb;
884 port->tx_buff_tab[n] = mem;
890 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
914 static int request_hdlc_queues(struct port *port)
918 err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
919 "%s:RX-free", port->netdev->name);
923 err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
924 "%s:RX", port->netdev->name);
928 err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
929 "%s:TX", port->netdev->name);
933 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
934 "%s:TX-ready", port->netdev->name);
938 err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
939 "%s:TX-done", port->netdev->name);
945 qmgr_release_queue(port->plat->txreadyq);
947 qmgr_release_queue(queue_ids[port->id].tx);
949 qmgr_release_queue(queue_ids[port->id].rx);
951 qmgr_release_queue(queue_ids[port->id].rxfree);
953 port->netdev->name);
957 static void release_hdlc_queues(struct port *port)
959 qmgr_release_queue(queue_ids[port->id].rxfree);
960 qmgr_release_queue(queue_ids[port->id].rx);
961 qmgr_release_queue(queue_ids[port->id].txdone);
962 qmgr_release_queue(queue_ids[port->id].tx);
963 qmgr_release_queue(port->plat->txreadyq);
966 static int init_hdlc_queues(struct port *port)
971 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
977 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
978 &port->desc_tab_phys)))
980 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
981 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
982 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
986 struct desc *desc = rx_desc_ptr(port, i);
990 if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
999 desc->data = dma_map_single(&port->netdev->dev, data,
1001 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
1005 port->rx_buff_tab[i] = buff;
1011 static void destroy_hdlc_queues(struct port *port)
1015 if (port->desc_tab) {
1017 struct desc *desc = rx_desc_ptr(port, i);
1018 buffer_t *buff = port->rx_buff_tab[i];
1020 dma_unmap_single(&port->netdev->dev,
1027 struct desc *desc = tx_desc_ptr(port, i);
1028 buffer_t *buff = port->tx_buff_tab[i];
1030 dma_unmap_tx(port, desc);
1034 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1035 port->desc_tab = NULL;
1046 struct port *port = dev_to_port(dev);
1053 if ((err = hss_load_firmware(port)))
1056 if ((err = request_hdlc_queues(port)))
1059 if ((err = init_hdlc_queues(port)))
1063 if (port->plat->open)
1064 if ((err = port->plat->open(port->id, dev,
1071 queue_put_desc(port->plat->txreadyq,
1072 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1075 queue_put_desc(queue_ids[port->id].rxfree,
1076 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1078 napi_enable(&port->napi);
1081 qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
1084 qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
1086 qmgr_enable_irq(queue_ids[port->id].txdone);
1090 hss_set_hdlc_cfg(port);
1091 hss_config(port);
1093 hss_start_hdlc(port);
1096 napi_schedule(&port->napi);
1102 destroy_hdlc_queues(port);
1103 release_hdlc_queues(port);
1111 struct port *port = dev_to_port(dev);
1117 qmgr_disable_irq(queue_ids[port->id].rx);
1119 napi_disable(&port->napi);
1121 hss_stop_hdlc(port);
1123 while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
1125 while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
1133 while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
1138 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1151 qmgr_disable_irq(queue_ids[port->id].txdone);
1153 if (port->plat->close)
1154 port->plat->close(port->id, dev);
1157 destroy_hdlc_queues(port);
1158 release_hdlc_queues(port);
1167 struct port *port = dev_to_port(dev);
1174 port->hdlc_cfg = 0;
1178 port->hdlc_cfg = PKT_HDLC_CRC_32;
1251 struct port *port = dev_to_port(dev);
1266 new_line.clock_type = port->clock_type;
1267 new_line.clock_rate = port->clock_rate;
1268 new_line.loopback = port->loopback;
1281 if (port->plat->set_clock)
1282 clk = port->plat->set_clock(port->id, clk);
1290 port->clock_type = clk; /* Update settings */
1292 find_best_clock(port->plat->timer_freq,
1294 &port->clock_rate, &port->clock_reg);
1296 port->clock_rate = 0;
1297 port->clock_reg = CLK42X_SPEED_2048KHZ;
1299 port->loopback = new_line.loopback;
1304 hss_config(port);
1306 if (port->loopback || port->carrier)
1307 netif_carrier_on(port->netdev);
1309 netif_carrier_off(port->netdev);
1332 struct port *port;
1337 if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
1340 if ((port->npe = npe_request(0)) == NULL) {
1345 if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
1356 port->clock_type = CLOCK_EXT;
1357 port->clock_rate = 0;
1358 port->clock_reg = CLK42X_SPEED_2048KHZ;
1359 port->id = pdev->id;
1360 port->dev = &pdev->dev;
1361 port->plat = pdev->dev.platform_data;
1362 netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
1367 platform_set_drvdata(pdev, port);
1375 npe_release(port->npe);
1377 kfree(port);
1383 struct port *port = platform_get_drvdata(pdev);
1385 unregister_hdlc_device(port->netdev);
1386 free_netdev(port->netdev);
1387 npe_release(port->npe);
1388 kfree(port);