1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  Copyright (C) 2002 Intersil Americas Inc.
4 *  Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
5 */
6
7#include <linux/module.h>
8#include <linux/gfp.h>
9
10#include <linux/pci.h>
11#include <linux/delay.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/if_arp.h>
15#include <asm/byteorder.h>
16
17#include "prismcompat.h"
18#include "isl_38xx.h"
19#include "islpci_eth.h"
20#include "islpci_mgt.h"
21#include "oid_mgt.h"
22
23/******************************************************************************
24    Network Interface functions
25******************************************************************************/
26void
27islpci_eth_cleanup_transmit(islpci_private *priv,
28			    isl38xx_control_block *control_block)
29{
30	struct sk_buff *skb;
31	u32 index;
32
33	/* compare the control block read pointer with the free pointer */
34	while (priv->free_data_tx !=
35	       le32_to_cpu(control_block->
36			   device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
37		/* read the index of the first fragment to be freed */
38		index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
39
40		/* check for holes in the arrays caused by multi fragment frames
41		 * searching for the last fragment of a frame */
42		if (priv->pci_map_tx_address[index]) {
43			/* entry is the last fragment of a frame
44			 * free the skb structure and unmap pci memory */
45			skb = priv->data_low_tx[index];
46
47#if VERBOSE > SHOW_ERROR_MESSAGES
48			DEBUG(SHOW_TRACING,
49			      "cleanup skb %p skb->data %p skb->len %u truesize %u\n",
50			      skb, skb->data, skb->len, skb->truesize);
51#endif
52
53			dma_unmap_single(&priv->pdev->dev,
54					 priv->pci_map_tx_address[index],
55					 skb->len, DMA_TO_DEVICE);
56			dev_kfree_skb_irq(skb);
57			skb = NULL;
58		}
59		/* increment the free data low queue pointer */
60		priv->free_data_tx++;
61	}
62}
63
64netdev_tx_t
65islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
66{
67	islpci_private *priv = netdev_priv(ndev);
68	isl38xx_control_block *cb = priv->control_block;
69	u32 index;
70	dma_addr_t pci_map_address;
71	int frame_size;
72	isl38xx_fragment *fragment;
73	int offset;
74	struct sk_buff *newskb;
75	int newskb_offset;
76	unsigned long flags;
77	unsigned char wds_mac[6];
78	u32 curr_frag;
79
80#if VERBOSE > SHOW_ERROR_MESSAGES
81	DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
82#endif
83
84	/* lock the driver code */
85	spin_lock_irqsave(&priv->slock, flags);
86
87	/* check whether the destination queue has enough fragments for the frame */
88	curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
89	if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
90		printk(KERN_ERR "%s: transmit device queue full when awake\n",
91		       ndev->name);
92		netif_stop_queue(ndev);
93
94		/* trigger the device */
95		isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
96				  ISL38XX_DEV_INT_REG);
97		udelay(ISL38XX_WRITEIO_DELAY);
98		goto drop_free;
99	}
100	/* Check alignment and WDS frame formatting. The start of the packet should
101	 * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
102	 * and add WDS address information */
103	if (likely(((long) skb->data & 0x03) | init_wds)) {
104		/* get the number of bytes to add and re-align */
105		offset = (4 - (long) skb->data) & 0x03;
106		offset += init_wds ? 6 : 0;
107
108		/* check whether the current skb can be used  */
109		if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
110			unsigned char *src = skb->data;
111
112#if VERBOSE > SHOW_ERROR_MESSAGES
113			DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
114			      init_wds);
115#endif
116
117			/* align the buffer on 4-byte boundary */
118			skb_reserve(skb, (4 - (long) skb->data) & 0x03);
119			if (init_wds) {
120				/* wds requires an additional address field of 6 bytes */
121				skb_put(skb, 6);
122#ifdef ISLPCI_ETH_DEBUG
123				printk("islpci_eth_transmit:wds_mac\n");
124#endif
125				memmove(skb->data + 6, src, skb->len);
126				skb_copy_to_linear_data(skb, wds_mac, 6);
127			} else {
128				memmove(skb->data, src, skb->len);
129			}
130
131#if VERBOSE > SHOW_ERROR_MESSAGES
132			DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
133			      src, skb->len);
134#endif
135		} else {
136			newskb =
137			    dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
138			if (unlikely(newskb == NULL)) {
139				printk(KERN_ERR "%s: Cannot allocate skb\n",
140				       ndev->name);
141				goto drop_free;
142			}
143			newskb_offset = (4 - (long) newskb->data) & 0x03;
144
145			/* Check if newskb->data is aligned */
146			if (newskb_offset)
147				skb_reserve(newskb, newskb_offset);
148
149			skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
150			if (init_wds) {
151				skb_copy_from_linear_data(skb,
152							  newskb->data + 6,
153							  skb->len);
154				skb_copy_to_linear_data(newskb, wds_mac, 6);
155#ifdef ISLPCI_ETH_DEBUG
156				printk("islpci_eth_transmit:wds_mac\n");
157#endif
158			} else
159				skb_copy_from_linear_data(skb, newskb->data,
160							  skb->len);
161
162#if VERBOSE > SHOW_ERROR_MESSAGES
163			DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
164			      newskb->data, skb->data, skb->len, init_wds);
165#endif
166
167			newskb->dev = skb->dev;
168			dev_kfree_skb_irq(skb);
169			skb = newskb;
170		}
171	}
172	/* display the buffer contents for debugging */
173#if VERBOSE > SHOW_ERROR_MESSAGES
174	DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
175	display_buffer((char *) skb->data, skb->len);
176#endif
177
178	/* map the skb buffer to pci memory for DMA operation */
179	pci_map_address = dma_map_single(&priv->pdev->dev, (void *)skb->data,
180					 skb->len, DMA_TO_DEVICE);
181	if (dma_mapping_error(&priv->pdev->dev, pci_map_address)) {
182		printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
183		       ndev->name);
184		goto drop_free;
185	}
186	/* Place the fragment in the control block structure. */
187	index = curr_frag % ISL38XX_CB_TX_QSIZE;
188	fragment = &cb->tx_data_low[index];
189
190	priv->pci_map_tx_address[index] = pci_map_address;
191	/* store the skb address for future freeing  */
192	priv->data_low_tx[index] = skb;
193	/* set the proper fragment start address and size information */
194	frame_size = skb->len;
195	fragment->size = cpu_to_le16(frame_size);
196	fragment->flags = cpu_to_le16(0);	/* set to 1 if more fragments */
197	fragment->address = cpu_to_le32(pci_map_address);
198	curr_frag++;
199
200	/* The fragment address in the control block must have been
201	 * written before announcing the frame buffer to device. */
202	wmb();
203	cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
204
205	if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
206	    > ISL38XX_CB_TX_QSIZE) {
207		/* stop sends from upper layers */
208		netif_stop_queue(ndev);
209
210		/* set the full flag for the transmission queue */
211		priv->data_low_tx_full = 1;
212	}
213
214	ndev->stats.tx_packets++;
215	ndev->stats.tx_bytes += skb->len;
216
217	/* trigger the device */
218	islpci_trigger(priv);
219
220	/* unlock the driver code */
221	spin_unlock_irqrestore(&priv->slock, flags);
222
223	return NETDEV_TX_OK;
224
225      drop_free:
226	ndev->stats.tx_dropped++;
227	spin_unlock_irqrestore(&priv->slock, flags);
228	dev_kfree_skb(skb);
229	return NETDEV_TX_OK;
230}
231
232static inline int
233islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
234{
235	/* The card reports full 802.11 packets but with a 20 bytes
236	 * header and without the FCS. But there a is a bit that
237	 * indicates if the packet is corrupted :-) */
238	struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
239
240	if (hdr->flags & 0x01)
241		/* This one is bad. Drop it ! */
242		return -1;
243	if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
244		struct avs_80211_1_header *avs;
245		/* extract the relevant data from the header */
246		u32 clock = le32_to_cpu(hdr->clock);
247		u8 rate = hdr->rate;
248		u16 freq = le16_to_cpu(hdr->freq);
249		u8 rssi = hdr->rssi;
250
251		skb_pull(*skb, sizeof (struct rfmon_header));
252
253		if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
254			struct sk_buff *newskb = skb_copy_expand(*skb,
255								 sizeof (struct
256									 avs_80211_1_header),
257								 0, GFP_ATOMIC);
258			if (newskb) {
259				dev_kfree_skb_irq(*skb);
260				*skb = newskb;
261			} else
262				return -1;
263			/* This behavior is not very subtile... */
264		}
265
266		/* make room for the new header and fill it. */
267		avs = skb_push(*skb, sizeof(struct avs_80211_1_header));
268
269		avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
270		avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
271		avs->mactime = cpu_to_be64(clock);
272		avs->hosttime = cpu_to_be64(jiffies);
273		avs->phytype = cpu_to_be32(6);	/*OFDM: 6 for (g), 8 for (a) */
274		avs->channel = cpu_to_be32(channel_of_freq(freq));
275		avs->datarate = cpu_to_be32(rate * 5);
276		avs->antenna = cpu_to_be32(0);	/*unknown */
277		avs->priority = cpu_to_be32(0);	/*unknown */
278		avs->ssi_type = cpu_to_be32(3);	/*2: dBm, 3: raw RSSI */
279		avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
280		avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise);	/*better than 'undefined', I assume */
281		avs->preamble = cpu_to_be32(0);	/*unknown */
282		avs->encoding = cpu_to_be32(0);	/*unknown */
283	} else
284		skb_pull(*skb, sizeof (struct rfmon_header));
285
286	(*skb)->protocol = htons(ETH_P_802_2);
287	skb_reset_mac_header(*skb);
288	(*skb)->pkt_type = PACKET_OTHERHOST;
289
290	return 0;
291}
292
293int
294islpci_eth_receive(islpci_private *priv)
295{
296	struct net_device *ndev = priv->ndev;
297	isl38xx_control_block *control_block = priv->control_block;
298	struct sk_buff *skb;
299	u16 size;
300	u32 index, offset;
301	unsigned char *src;
302	int discard = 0;
303
304#if VERBOSE > SHOW_ERROR_MESSAGES
305	DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
306#endif
307
308	/* the device has written an Ethernet frame in the data area
309	 * of the sk_buff without updating the structure, do it now */
310	index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
311	size = le16_to_cpu(control_block->rx_data_low[index].size);
312	skb = priv->data_low_rx[index];
313	offset = ((unsigned long)
314		  le32_to_cpu(control_block->rx_data_low[index].address) -
315		  (unsigned long) skb->data) & 3;
316
317#if VERBOSE > SHOW_ERROR_MESSAGES
318	DEBUG(SHOW_TRACING,
319	      "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n",
320	      control_block->rx_data_low[priv->free_data_rx].address, skb->data,
321	      skb->len, offset, skb->truesize);
322#endif
323
324	/* delete the streaming DMA mapping before processing the skb */
325	dma_unmap_single(&priv->pdev->dev, priv->pci_map_rx_address[index],
326			 MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
327
328	/* update the skb structure and align the buffer */
329	skb_put(skb, size);
330	if (offset) {
331		/* shift the buffer allocation offset bytes to get the right frame */
332		skb_pull(skb, 2);
333		skb_put(skb, 2);
334	}
335#if VERBOSE > SHOW_ERROR_MESSAGES
336	/* display the buffer contents for debugging */
337	DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
338	display_buffer((char *) skb->data, skb->len);
339#endif
340
341	/* check whether WDS is enabled and whether the data frame is a WDS frame */
342
343	if (init_wds) {
344		/* WDS enabled, check for the wds address on the first 6 bytes of the buffer */
345		src = skb->data + 6;
346		memmove(skb->data, src, skb->len - 6);
347		skb_trim(skb, skb->len - 6);
348	}
349#if VERBOSE > SHOW_ERROR_MESSAGES
350	DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
351	DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
352
353	/* display the buffer contents for debugging */
354	DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
355	display_buffer((char *) skb->data, skb->len);
356#endif
357	/* take care of monitor mode and spy monitoring. */
358	if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
359		skb->dev = ndev;
360		discard = islpci_monitor_rx(priv, &skb);
361	} else {
362		if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
363			/* The packet has a rx_annex. Read it for spy monitoring, Then
364			 * remove it, while keeping the 2 leading MAC addr.
365			 */
366			struct iw_quality wstats;
367			struct rx_annex_header *annex =
368			    (struct rx_annex_header *) skb->data;
369			wstats.level = annex->rfmon.rssi;
370			/* The noise value can be a bit outdated if nobody's
371			 * reading wireless stats... */
372			wstats.noise = priv->local_iwstatistics.qual.noise;
373			wstats.qual = wstats.level - wstats.noise;
374			wstats.updated = 0x07;
375			/* Update spy records */
376			wireless_spy_update(ndev, annex->addr2, &wstats);
377
378			skb_copy_from_linear_data(skb,
379						  (skb->data +
380						   sizeof(struct rfmon_header)),
381						  2 * ETH_ALEN);
382			skb_pull(skb, sizeof (struct rfmon_header));
383		}
384		skb->protocol = eth_type_trans(skb, ndev);
385	}
386	skb->ip_summed = CHECKSUM_NONE;
387	ndev->stats.rx_packets++;
388	ndev->stats.rx_bytes += size;
389
390	/* deliver the skb to the network layer */
391#ifdef ISLPCI_ETH_DEBUG
392	printk
393	    ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
394	     skb->data[0], skb->data[1], skb->data[2], skb->data[3],
395	     skb->data[4], skb->data[5]);
396#endif
397	if (unlikely(discard)) {
398		dev_kfree_skb_irq(skb);
399		skb = NULL;
400	} else
401		netif_rx(skb);
402
403	/* increment the read index for the rx data low queue */
404	priv->free_data_rx++;
405
406	/* add one or more sk_buff structures */
407	while (index =
408	       le32_to_cpu(control_block->
409			   driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
410	       index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
411		/* allocate an sk_buff for received data frames storage
412		 * include any required allignment operations */
413		skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
414		if (unlikely(skb == NULL)) {
415			/* error allocating an sk_buff structure elements */
416			DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
417			break;
418		}
419		skb_reserve(skb, (4 - (long) skb->data) & 0x03);
420		/* store the new skb structure pointer */
421		index = index % ISL38XX_CB_RX_QSIZE;
422		priv->data_low_rx[index] = skb;
423
424#if VERBOSE > SHOW_ERROR_MESSAGES
425		DEBUG(SHOW_TRACING,
426		      "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n",
427		      skb, skb->data, skb->len, index, skb->truesize);
428#endif
429
430		/* set the streaming DMA mapping for proper PCI bus operation */
431		priv->pci_map_rx_address[index] =
432		    dma_map_single(&priv->pdev->dev, (void *)skb->data,
433				   MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
434		if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[index])) {
435			/* error mapping the buffer to device accessible memory address */
436			DEBUG(SHOW_ERROR_MESSAGES,
437			      "Error mapping DMA address\n");
438
439			/* free the skbuf structure before aborting */
440			dev_kfree_skb_irq(skb);
441			skb = NULL;
442			break;
443		}
444		/* update the fragment address */
445		control_block->rx_data_low[index].address =
446			cpu_to_le32((u32)priv->pci_map_rx_address[index]);
447		wmb();
448
449		/* increment the driver read pointer */
450		le32_add_cpu(&control_block->
451			     driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
452	}
453
454	/* trigger the device */
455	islpci_trigger(priv);
456
457	return 0;
458}
459
460void
461islpci_do_reset_and_wake(struct work_struct *work)
462{
463	islpci_private *priv = container_of(work, islpci_private, reset_task);
464
465	islpci_reset(priv, 1);
466	priv->reset_task_pending = 0;
467	smp_wmb();
468	netif_wake_queue(priv->ndev);
469}
470
471void
472islpci_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
473{
474	islpci_private *priv = netdev_priv(ndev);
475
476	/* increment the transmit error counter */
477	ndev->stats.tx_errors++;
478
479	if (!priv->reset_task_pending) {
480		printk(KERN_WARNING
481			"%s: tx_timeout, scheduling reset", ndev->name);
482		netif_stop_queue(ndev);
483		priv->reset_task_pending = 1;
484		schedule_work(&priv->reset_task);
485	} else {
486		printk(KERN_WARNING
487			"%s: tx_timeout, waiting for reset", ndev->name);
488	}
489}
490