Lines Matching refs:qca
66 struct qcauart *qca = serdev_device_get_drvdata(serdev);
67 struct net_device *netdev = qca->net_dev;
71 if (!qca->rx_skb) {
72 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
75 if (!qca->rx_skb) {
85 retcode = qcafrm_fsm_decode(&qca->frm_handle,
86 qca->rx_skb->data,
87 skb_tailroom(qca->rx_skb),
107 skb_put(qca->rx_skb, retcode);
108 qca->rx_skb->protocol = eth_type_trans(
109 qca->rx_skb, qca->rx_skb->dev);
110 skb_checksum_none_assert(qca->rx_skb);
111 netif_rx_ni(qca->rx_skb);
112 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
115 if (!qca->rx_skb) {
129 struct qcauart *qca = container_of(work, struct qcauart, tx_work);
130 struct net_device_stats *n_stats = &qca->net_dev->stats;
133 spin_lock_bh(&qca->lock);
136 if (!netif_running(qca->net_dev)) {
137 spin_unlock_bh(&qca->lock);
141 if (qca->tx_left <= 0) {
146 spin_unlock_bh(&qca->lock);
147 netif_wake_queue(qca->net_dev);
151 written = serdev_device_write_buf(qca->serdev, qca->tx_head,
152 qca->tx_left);
154 qca->tx_left -= written;
155 qca->tx_head += written;
157 spin_unlock_bh(&qca->lock);
165 struct qcauart *qca = serdev_device_get_drvdata(serdev);
167 schedule_work(&qca->tx_work);
177 struct qcauart *qca = netdev_priv(dev);
179 netif_start_queue(qca->net_dev);
186 struct qcauart *qca = netdev_priv(dev);
189 flush_work(&qca->tx_work);
191 spin_lock_bh(&qca->lock);
192 qca->tx_left = 0;
193 spin_unlock_bh(&qca->lock);
202 struct qcauart *qca = netdev_priv(dev);
207 spin_lock(&qca->lock);
209 WARN_ON(qca->tx_left);
212 spin_unlock(&qca->lock);
213 netdev_warn(qca->net_dev, "xmit: iface is down\n");
217 pos = qca->tx_buffer;
234 netif_stop_queue(qca->net_dev);
236 written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
237 pos - qca->tx_buffer);
239 qca->tx_left = (pos - qca->tx_buffer) - written;
240 qca->tx_head = qca->tx_buffer + written;
243 spin_unlock(&qca->lock);
253 struct qcauart *qca = netdev_priv(dev);
255 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
263 struct qcauart *qca = netdev_priv(dev);
271 qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
272 if (!qca->tx_buffer)
275 qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
276 qca->net_dev->mtu +
278 if (!qca->rx_skb)
286 struct qcauart *qca = netdev_priv(dev);
288 dev_kfree_skb(qca->rx_skb);
316 .compatible = "qca,qca7000",
325 struct qcauart *qca;
336 qca = netdev_priv(qcauart_dev);
337 if (!qca) {
342 qca->net_dev = qcauart_dev;
343 qca->serdev = serdev;
344 qcafrm_fsm_init_uart(&qca->frm_handle);
346 spin_lock_init(&qca->lock);
347 INIT_WORK(&qca->tx_work, qcauart_transmit);
354 ether_addr_copy(qca->net_dev->dev_addr, mac);
356 if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
357 eth_hw_addr_random(qca->net_dev);
359 qca->net_dev->dev_addr);
362 netif_carrier_on(qca->net_dev);
363 serdev_device_set_drvdata(serdev, qca);
383 cancel_work_sync(&qca->tx_work);
396 struct qcauart *qca = serdev_device_get_drvdata(serdev);
398 unregister_netdev(qca->net_dev);
402 cancel_work_sync(&qca->tx_work);
404 free_netdev(qca->net_dev);