Lines Matching refs:qca
65 struct qcauart *qca = serdev_device_get_drvdata(serdev);
66 struct net_device *netdev = qca->net_dev;
70 if (!qca->rx_skb) {
71 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
74 if (!qca->rx_skb) {
84 retcode = qcafrm_fsm_decode(&qca->frm_handle,
85 qca->rx_skb->data,
86 skb_tailroom(qca->rx_skb),
106 skb_put(qca->rx_skb, retcode);
107 qca->rx_skb->protocol = eth_type_trans(
108 qca->rx_skb, qca->rx_skb->dev);
109 skb_checksum_none_assert(qca->rx_skb);
110 netif_rx(qca->rx_skb);
111 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
114 if (!qca->rx_skb) {
128 struct qcauart *qca = container_of(work, struct qcauart, tx_work);
129 struct net_device_stats *n_stats = &qca->net_dev->stats;
132 spin_lock_bh(&qca->lock);
135 if (!netif_running(qca->net_dev)) {
136 spin_unlock_bh(&qca->lock);
140 if (qca->tx_left <= 0) {
145 spin_unlock_bh(&qca->lock);
146 netif_wake_queue(qca->net_dev);
150 written = serdev_device_write_buf(qca->serdev, qca->tx_head,
151 qca->tx_left);
153 qca->tx_left -= written;
154 qca->tx_head += written;
156 spin_unlock_bh(&qca->lock);
164 struct qcauart *qca = serdev_device_get_drvdata(serdev);
166 schedule_work(&qca->tx_work);
176 struct qcauart *qca = netdev_priv(dev);
178 netif_start_queue(qca->net_dev);
185 struct qcauart *qca = netdev_priv(dev);
188 flush_work(&qca->tx_work);
190 spin_lock_bh(&qca->lock);
191 qca->tx_left = 0;
192 spin_unlock_bh(&qca->lock);
201 struct qcauart *qca = netdev_priv(dev);
206 spin_lock(&qca->lock);
208 WARN_ON(qca->tx_left);
211 spin_unlock(&qca->lock);
212 netdev_warn(qca->net_dev, "xmit: iface is down\n");
216 pos = qca->tx_buffer;
233 netif_stop_queue(qca->net_dev);
235 written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
236 pos - qca->tx_buffer);
238 qca->tx_left = (pos - qca->tx_buffer) - written;
239 qca->tx_head = qca->tx_buffer + written;
242 spin_unlock(&qca->lock);
252 struct qcauart *qca = netdev_priv(dev);
254 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
262 struct qcauart *qca = netdev_priv(dev);
270 qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
271 if (!qca->tx_buffer)
274 qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
275 qca->net_dev->mtu +
277 if (!qca->rx_skb)
285 struct qcauart *qca = netdev_priv(dev);
287 dev_kfree_skb(qca->rx_skb);
315 .compatible = "qca,qca7000",
324 struct qcauart *qca;
334 qca = netdev_priv(qcauart_dev);
335 if (!qca) {
340 qca->net_dev = qcauart_dev;
341 qca->serdev = serdev;
342 qcafrm_fsm_init_uart(&qca->frm_handle);
344 spin_lock_init(&qca->lock);
345 INIT_WORK(&qca->tx_work, qcauart_transmit);
349 ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
351 eth_hw_addr_random(qca->net_dev);
353 qca->net_dev->dev_addr);
356 netif_carrier_on(qca->net_dev);
357 serdev_device_set_drvdata(serdev, qca);
377 cancel_work_sync(&qca->tx_work);
390 struct qcauart *qca = serdev_device_get_drvdata(serdev);
392 unregister_netdev(qca->net_dev);
396 cancel_work_sync(&qca->tx_work);
398 free_netdev(qca->net_dev);