1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <linux/ip.h>
7 #include <linux/tcp.h>
8 #include <linux/skbuff.h>
9 #include <linux/if_ether.h>
10 #include <linux/if.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/of_device.h>
13 #include <linux/slab.h>
14 #include <linux/prefetch.h>
15 #include <linux/regmap.h>
16 #include <linux/phy.h>
17 #include <linux/udp.h>
18 #include <linux/skbuff.h>
19 #include <net/pkt_cls.h>
20 #include <net/tcp.h>
21 #include <net/udp.h>
22 #include <linux/soc/rockchip/rk_vendor_storage.h>
23 #include "stmmac.h"
24 #include "dwmac1000.h"
25 #include "dwmac_dma.h"
26 #include "dwmac-rk-tool.h"
27 
28 enum {
29 	LOOPBACK_TYPE_GMAC = 1,
30 	LOOPBACK_TYPE_PHY
31 };
32 
33 enum {
34 	LOOPBACK_SPEED10 = 10,
35 	LOOPBACK_SPEED100 = 100,
36 	LOOPBACK_SPEED1000 = 1000
37 };
38 
39 struct dwmac_rk_packet_attrs {
40 	unsigned char src[6];
41 	unsigned char dst[6];
42 	u32 ip_src;
43 	u32 ip_dst;
44 	int tcp;
45 	int sport;
46 	int dport;
47 	int size;
48 };
49 
50 struct dwmac_rk_hdr {
51 	__be32 version;
52 	__be64 magic;
53 	u32 id;
54 	int tx;
55 	int rx;
56 } __packed;
57 
58 struct dwmac_rk_lb_priv {
59 	/* desc && buffer */
60 	struct dma_desc *dma_tx;
61 	dma_addr_t dma_tx_phy;
62 	struct sk_buff *tx_skbuff;
63 	dma_addr_t tx_skbuff_dma;
64 	unsigned int tx_skbuff_dma_len;
65 
66 	struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
67 	dma_addr_t dma_rx_phy;
68 	struct sk_buff *rx_skbuff;
69 	dma_addr_t rx_skbuff_dma;
70 	u32 rx_tail_addr;
71 	u32 tx_tail_addr;
72 
73 	/* rx buffer size */
74 	unsigned int dma_buf_sz;
75 	unsigned int buf_sz;
76 
77 	int type;
78 	int speed;
79 	struct dwmac_rk_packet_attrs *packet;
80 
81 	unsigned int actual_size;
82 	int scan;
83 	int sysfs;
84 	u32 id;
85 	int tx;
86 	int rx;
87 	int final_tx;
88 	int final_rx;
89 	int max_delay;
90 };
91 
92 #define DMA_CONTROL_OSP		BIT(4)
93 #define DMA_CHAN_BASE_ADDR	0x00001100
94 #define DMA_CHAN_BASE_OFFSET	0x80
95 #define DMA_CHANX_BASE_ADDR(x)	(DMA_CHAN_BASE_ADDR + \
96 				((x) * DMA_CHAN_BASE_OFFSET))
97 #define DMA_CHAN_TX_CONTROL(x)	(DMA_CHANX_BASE_ADDR(x) + 0x4)
98 #define DMA_CHAN_STATUS(x)	(DMA_CHANX_BASE_ADDR(x) + 0x60)
99 #define DMA_CHAN_STATUS_ERI	BIT(11)
100 #define DMA_CHAN_STATUS_ETI	BIT(10)
101 
102 #define	STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
103 #define MAX_DELAYLINE 0x7f
104 #define RK3588_MAX_DELAYLINE 0xc7
105 #define SCAN_STEP 0x5
106 #define SCAN_VALID_RANGE 0xA
107 
108 #define DWMAC_RK_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
109 				sizeof(struct dwmac_rk_hdr))
110 #define DWMAC_RK_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
111 #define DWMAC_RK_TEST_PKT_MAX_SIZE 1500
112 
113 static __maybe_unused struct dwmac_rk_packet_attrs dwmac_rk_udp_attr = {
114 	.dst = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
115 	.tcp = 0,
116 	.size = 1024,
117 };
118 
119 static __maybe_unused struct dwmac_rk_packet_attrs dwmac_rk_tcp_attr = {
120 	.dst = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
121 	.tcp = 1,
122 	.size = 1024,
123 };
124 
dwmac_rk_enable_mac_loopback(struct stmmac_priv *priv, int speed, int addr, bool phy)125 static int dwmac_rk_enable_mac_loopback(struct stmmac_priv *priv, int speed,
126 					int addr, bool phy)
127 {
128 	u32 ctrl;
129 	int phy_val;
130 
131 	ctrl = readl(priv->ioaddr + GMAC_CONTROL);
132 	ctrl &= ~priv->hw->link.speed_mask;
133 	ctrl |= GMAC_CONTROL_LM;
134 
135 	if (phy)
136 		phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
137 
138 	switch (speed) {
139 	case LOOPBACK_SPEED1000:
140 		ctrl |= priv->hw->link.speed1000;
141 		if (phy) {
142 			phy_val &= ~BMCR_SPEED100;
143 			phy_val |= BMCR_SPEED1000;
144 		}
145 		break;
146 	case LOOPBACK_SPEED100:
147 		ctrl |= priv->hw->link.speed100;
148 		if (phy) {
149 			phy_val &= ~BMCR_SPEED1000;
150 			phy_val |= BMCR_SPEED100;
151 		}
152 		break;
153 	case LOOPBACK_SPEED10:
154 		ctrl |= priv->hw->link.speed10;
155 		if (phy) {
156 			phy_val &= ~BMCR_SPEED1000;
157 			phy_val &= ~BMCR_SPEED100;
158 		}
159 		break;
160 	default:
161 		return -EPERM;
162 	}
163 
164 	ctrl |= priv->hw->link.duplex;
165 	writel(ctrl, priv->ioaddr + GMAC_CONTROL);
166 
167 	if (phy) {
168 		phy_val &= ~BMCR_PDOWN;
169 		phy_val &= ~BMCR_ANENABLE;
170 		phy_val &= ~BMCR_PDOWN;
171 		phy_val |= BMCR_FULLDPLX;
172 		mdiobus_write(priv->mii, addr, MII_BMCR, phy_val);
173 		phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
174 	}
175 
176 	if (likely(priv->plat->fix_mac_speed))
177 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
178 
179 	return 0;
180 }
181 
dwmac_rk_disable_mac_loopback(struct stmmac_priv *priv, int addr)182 static int dwmac_rk_disable_mac_loopback(struct stmmac_priv *priv, int addr)
183 {
184 	u32 ctrl;
185 	int phy_val;
186 
187 	ctrl = readl(priv->ioaddr + GMAC_CONTROL);
188 	ctrl &= ~GMAC_CONTROL_LM;
189 	writel(ctrl, priv->ioaddr + GMAC_CONTROL);
190 
191 	phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
192 	phy_val |= BMCR_ANENABLE;
193 
194 	mdiobus_write(priv->mii, addr, MII_BMCR, phy_val);
195 	phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
196 
197 	return 0;
198 }
199 
dwmac_rk_set_mac_loopback(struct stmmac_priv *priv, int speed, bool enable, int addr, bool phy)200 static int dwmac_rk_set_mac_loopback(struct stmmac_priv *priv,
201 				     int speed, bool enable,
202 				     int addr, bool phy)
203 {
204 	if (enable)
205 		return dwmac_rk_enable_mac_loopback(priv, speed, addr, phy);
206 	else
207 		return dwmac_rk_disable_mac_loopback(priv, addr);
208 }
209 
dwmac_rk_enable_phy_loopback(struct stmmac_priv *priv, int speed, int addr, bool phy)210 static int dwmac_rk_enable_phy_loopback(struct stmmac_priv *priv, int speed,
211 					int addr, bool phy)
212 {
213 	u32 ctrl;
214 	int val;
215 
216 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
217 	ctrl &= ~priv->hw->link.speed_mask;
218 
219 	if (phy)
220 		val = mdiobus_read(priv->mii, addr, MII_BMCR);
221 
222 	switch (speed) {
223 	case LOOPBACK_SPEED1000:
224 		ctrl |= priv->hw->link.speed1000;
225 		if (phy) {
226 			val &= ~BMCR_SPEED100;
227 			val |= BMCR_SPEED1000;
228 		}
229 		break;
230 	case LOOPBACK_SPEED100:
231 		ctrl |= priv->hw->link.speed100;
232 		if (phy) {
233 			val &= ~BMCR_SPEED1000;
234 			val |= BMCR_SPEED100;
235 		}
236 		break;
237 	case LOOPBACK_SPEED10:
238 		ctrl |= priv->hw->link.speed10;
239 		if (phy) {
240 			val &= ~BMCR_SPEED1000;
241 			val &= ~BMCR_SPEED100;
242 		}
243 		break;
244 	default:
245 		return -EPERM;
246 	}
247 
248 	ctrl |= priv->hw->link.duplex;
249 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
250 
251 	if (phy) {
252 		val |= BMCR_FULLDPLX;
253 		val &= ~BMCR_PDOWN;
254 		val &= ~BMCR_ANENABLE;
255 		val |= BMCR_LOOPBACK;
256 		mdiobus_write(priv->mii, addr, MII_BMCR, val);
257 		val = mdiobus_read(priv->mii, addr, MII_BMCR);
258 	}
259 
260 	if (likely(priv->plat->fix_mac_speed))
261 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
262 
263 	return 0;
264 }
265 
dwmac_rk_disable_phy_loopback(struct stmmac_priv *priv, int addr)266 static int dwmac_rk_disable_phy_loopback(struct stmmac_priv *priv, int addr)
267 {
268 	int val;
269 
270 	val = mdiobus_read(priv->mii, addr, MII_BMCR);
271 	val |= BMCR_ANENABLE;
272 	val &= ~BMCR_LOOPBACK;
273 
274 	mdiobus_write(priv->mii, addr, MII_BMCR, val);
275 	val = mdiobus_read(priv->mii, addr, MII_BMCR);
276 
277 	return 0;
278 }
279 
dwmac_rk_set_phy_loopback(struct stmmac_priv *priv, int speed, bool enable, int addr, bool phy)280 static int dwmac_rk_set_phy_loopback(struct stmmac_priv *priv,
281 				     int speed, bool enable,
282 				     int addr, bool phy)
283 {
284 	if (enable)
285 		return dwmac_rk_enable_phy_loopback(priv, speed,
286 						     addr, phy);
287 	else
288 		return dwmac_rk_disable_phy_loopback(priv, addr);
289 }
290 
dwmac_rk_set_loopback(struct stmmac_priv *priv, int type, int speed, bool enable, int addr, bool phy)291 static int dwmac_rk_set_loopback(struct stmmac_priv *priv,
292 				 int type, int speed, bool enable,
293 				 int addr, bool phy)
294 {
295 	int ret;
296 
297 	switch (type) {
298 	case LOOPBACK_TYPE_PHY:
299 		ret = dwmac_rk_set_phy_loopback(priv, speed, enable, addr, phy);
300 		break;
301 	case LOOPBACK_TYPE_GMAC:
302 		ret = dwmac_rk_set_mac_loopback(priv, speed, enable, addr, phy);
303 		break;
304 	default:
305 		ret = -EOPNOTSUPP;
306 	}
307 
308 	usleep_range(100000, 200000);
309 	return ret;
310 }
311 
dwmac_rk_ether_addr_copy(u8 *dst, const u8 *src)312 static inline void dwmac_rk_ether_addr_copy(u8 *dst, const u8 *src)
313 {
314 	u16 *a = (u16 *)dst;
315 	const u16 *b = (const u16 *)src;
316 
317 	a[0] = b[0];
318 	a[1] = b[1];
319 	a[2] = b[2];
320 }
321 
dwmac_rk_udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)322 static void dwmac_rk_udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
323 {
324 	struct udphdr *uh = udp_hdr(skb);
325 	int offset = skb_transport_offset(skb);
326 	int len = skb->len - offset;
327 
328 	skb->csum_start = skb_transport_header(skb) - skb->head;
329 	skb->csum_offset = offsetof(struct udphdr, check);
330 	uh->check = ~csum_tcpudp_magic(src, dst, len,
331 				       IPPROTO_UDP, 0);
332 }
333 
dwmac_rk_get_skb(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)334 static struct sk_buff *dwmac_rk_get_skb(struct stmmac_priv *priv,
335 					struct dwmac_rk_lb_priv *lb_priv)
336 {
337 	struct sk_buff *skb = NULL;
338 	struct udphdr *uhdr = NULL;
339 	struct tcphdr *thdr = NULL;
340 	struct dwmac_rk_hdr *shdr;
341 	struct ethhdr *ehdr;
342 	struct iphdr *ihdr;
343 	struct dwmac_rk_packet_attrs *attr;
344 	int iplen, size, nfrags;
345 
346 	attr = lb_priv->packet;
347 	size = attr->size + DWMAC_RK_TEST_PKT_SIZE;
348 	if (attr->tcp)
349 		size += sizeof(struct tcphdr);
350 	else
351 		size += sizeof(struct udphdr);
352 
353 	if (size >= DWMAC_RK_TEST_PKT_MAX_SIZE)
354 		return NULL;
355 
356 	lb_priv->actual_size = size;
357 
358 	skb = netdev_alloc_skb_ip_align(priv->dev, size);
359 	if (!skb)
360 		return NULL;
361 
362 	skb_linearize(skb);
363 	nfrags = skb_shinfo(skb)->nr_frags;
364 	if (nfrags > 0) {
365 		pr_err("%s: TX nfrags is not zero\n", __func__);
366 		dev_kfree_skb(skb);
367 		return NULL;
368 	}
369 
370 	ehdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
371 	skb_reset_mac_header(skb);
372 
373 	skb_set_network_header(skb, skb->len);
374 	ihdr = (struct iphdr *)skb_put(skb, sizeof(*ihdr));
375 
376 	skb_set_transport_header(skb, skb->len);
377 	if (attr->tcp)
378 		thdr = (struct tcphdr *)skb_put(skb, sizeof(*thdr));
379 	else
380 		uhdr = (struct udphdr *)skb_put(skb, sizeof(*uhdr));
381 
382 	eth_zero_addr(ehdr->h_source);
383 	eth_zero_addr(ehdr->h_dest);
384 
385 	dwmac_rk_ether_addr_copy(ehdr->h_source, priv->dev->dev_addr);
386 	dwmac_rk_ether_addr_copy(ehdr->h_dest, attr->dst);
387 
388 	ehdr->h_proto = htons(ETH_P_IP);
389 
390 	if (attr->tcp) {
391 		if (!thdr) {
392 			dev_kfree_skb(skb);
393 			return NULL;
394 		}
395 
396 		thdr->source = htons(attr->sport);
397 		thdr->dest = htons(attr->dport);
398 		thdr->doff = sizeof(struct tcphdr) / 4;
399 		thdr->check = 0;
400 	} else {
401 		if (!uhdr) {
402 			dev_kfree_skb(skb);
403 			return NULL;
404 		}
405 
406 		uhdr->source = htons(attr->sport);
407 		uhdr->dest = htons(attr->dport);
408 		uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
409 		uhdr->check = 0;
410 	}
411 
412 	ihdr->ihl = 5;
413 	ihdr->ttl = 32;
414 	ihdr->version = 4;
415 	if (attr->tcp)
416 		ihdr->protocol = IPPROTO_TCP;
417 	else
418 		ihdr->protocol = IPPROTO_UDP;
419 
420 	iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
421 	if (attr->tcp)
422 		iplen += sizeof(*thdr);
423 	else
424 		iplen += sizeof(*uhdr);
425 
426 	ihdr->tot_len = htons(iplen);
427 	ihdr->frag_off = 0;
428 	ihdr->saddr = htonl(attr->ip_src);
429 	ihdr->daddr = htonl(attr->ip_dst);
430 	ihdr->tos = 0;
431 	ihdr->id = 0;
432 	ip_send_check(ihdr);
433 
434 	shdr = (struct dwmac_rk_hdr *)skb_put(skb, sizeof(*shdr));
435 	shdr->version = 0;
436 	shdr->magic = cpu_to_be64(DWMAC_RK_TEST_PKT_MAGIC);
437 	shdr->id = lb_priv->id;
438 	shdr->tx = lb_priv->tx;
439 	shdr->rx = lb_priv->rx;
440 
441 	if (attr->size) {
442 		skb_put(skb, attr->size);
443 		get_random_bytes((u8 *)shdr + sizeof(*shdr), attr->size);
444 	}
445 
446 	skb->csum = 0;
447 	skb->ip_summed = CHECKSUM_PARTIAL;
448 	if (attr->tcp) {
449 		if (!thdr) {
450 			dev_kfree_skb(skb);
451 			return NULL;
452 		}
453 
454 		thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr,
455 					    ihdr->daddr, 0);
456 		skb->csum_start = skb_transport_header(skb) - skb->head;
457 		skb->csum_offset = offsetof(struct tcphdr, check);
458 	} else {
459 		dwmac_rk_udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
460 	}
461 
462 	skb->protocol = htons(ETH_P_IP);
463 	skb->pkt_type = PACKET_HOST;
464 
465 	return skb;
466 }
467 
dwmac_rk_loopback_validate(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv, struct sk_buff *skb)468 static int dwmac_rk_loopback_validate(struct stmmac_priv *priv,
469 				      struct dwmac_rk_lb_priv *lb_priv,
470 				      struct sk_buff *skb)
471 {
472 	struct dwmac_rk_hdr *shdr;
473 	struct ethhdr *ehdr;
474 	struct udphdr *uhdr;
475 	struct tcphdr *thdr;
476 	struct iphdr *ihdr;
477 	int ret = -EAGAIN;
478 
479 	if (skb->len >= DWMAC_RK_TEST_PKT_MAX_SIZE)
480 		goto out;
481 
482 	if (lb_priv->actual_size != skb->len)
483 		goto out;
484 
485 	ehdr = (struct ethhdr *)(skb->data);
486 	if (!ether_addr_equal(ehdr->h_dest, lb_priv->packet->dst))
487 		goto out;
488 
489 	if (!ether_addr_equal(ehdr->h_source, priv->dev->dev_addr))
490 		goto out;
491 
492 	ihdr = (struct iphdr *)(skb->data + ETH_HLEN);
493 
494 	if (lb_priv->packet->tcp) {
495 		if (ihdr->protocol != IPPROTO_TCP)
496 			goto out;
497 
498 		thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
499 		if (thdr->dest != htons(lb_priv->packet->dport))
500 			goto out;
501 
502 		shdr = (struct dwmac_rk_hdr *)((u8 *)thdr + sizeof(*thdr));
503 	} else {
504 		if (ihdr->protocol != IPPROTO_UDP)
505 			goto out;
506 
507 		uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
508 		if (uhdr->dest != htons(lb_priv->packet->dport))
509 			goto out;
510 
511 		shdr = (struct dwmac_rk_hdr *)((u8 *)uhdr + sizeof(*uhdr));
512 	}
513 
514 	if (shdr->magic != cpu_to_be64(DWMAC_RK_TEST_PKT_MAGIC))
515 		goto out;
516 
517 	if (lb_priv->id != shdr->id)
518 		goto out;
519 
520 	if (lb_priv->tx != shdr->tx || lb_priv->rx != shdr->rx)
521 		goto out;
522 
523 	ret = 0;
524 out:
525 	return ret;
526 }
527 
dwmac_rk_rx_fill(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)528 static inline int dwmac_rk_rx_fill(struct stmmac_priv *priv,
529 				   struct dwmac_rk_lb_priv *lb_priv)
530 {
531 	struct dma_desc *p;
532 	struct sk_buff *skb;
533 
534 	p = lb_priv->dma_rx;
535 	if (likely(!lb_priv->rx_skbuff)) {
536 		skb = netdev_alloc_skb_ip_align(priv->dev, lb_priv->buf_sz);
537 		if (unlikely(!skb))
538 			return -ENOMEM;
539 
540 		if (skb_linearize(skb)) {
541 			pr_err("%s: Rx skb linearize failed\n", __func__);
542 			lb_priv->rx_skbuff = NULL;
543 			dev_kfree_skb(skb);
544 			return -EPERM;
545 		}
546 
547 		lb_priv->rx_skbuff = skb;
548 		lb_priv->rx_skbuff_dma =
549 		    dma_map_single(priv->device, skb->data, lb_priv->dma_buf_sz,
550 				   DMA_FROM_DEVICE);
551 		if (dma_mapping_error(priv->device,
552 				      lb_priv->rx_skbuff_dma)) {
553 			pr_err("%s: Rx dma map failed\n", __func__);
554 			lb_priv->rx_skbuff = NULL;
555 			dev_kfree_skb(skb);
556 			return -EFAULT;
557 		}
558 
559 		stmmac_set_desc_addr(priv, p, lb_priv->rx_skbuff_dma);
560 		/* Fill DES3 in case of RING mode */
561 		if (lb_priv->dma_buf_sz == BUF_SIZE_16KiB)
562 			p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
563 	}
564 
565 	wmb();
566 	stmmac_set_rx_owner(priv, p, priv->use_riwt);
567 	wmb();
568 
569 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, lb_priv->rx_tail_addr, 0);
570 
571 	return 0;
572 }
573 
dwmac_rk_rx_clean(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)574 static void dwmac_rk_rx_clean(struct stmmac_priv *priv,
575 			      struct dwmac_rk_lb_priv *lb_priv)
576 {
577 	struct sk_buff *skb;
578 
579 	skb = lb_priv->rx_skbuff;
580 
581 	if (likely(lb_priv->rx_skbuff)) {
582 		dma_unmap_single(priv->device,
583 				 lb_priv->rx_skbuff_dma,
584 				 lb_priv->dma_buf_sz, DMA_FROM_DEVICE);
585 		dev_kfree_skb(skb);
586 		lb_priv->rx_skbuff = NULL;
587 	}
588 }
589 
dwmac_rk_rx_validate(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)590 static int dwmac_rk_rx_validate(struct stmmac_priv *priv,
591 				struct dwmac_rk_lb_priv *lb_priv)
592 {
593 	struct dma_desc *p;
594 	struct sk_buff *skb;
595 	int coe = priv->hw->rx_csum;
596 	unsigned int frame_len;
597 	int ret;
598 
599 	p = lb_priv->dma_rx;
600 	skb = lb_priv->rx_skbuff;
601 	if (unlikely(!skb)) {
602 		pr_err("%s: Inconsistent Rx descriptor chain\n",
603 		       __func__);
604 		return -EINVAL;
605 	}
606 
607 	frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
608 	/*  check if frame_len fits the preallocated memory */
609 	if (frame_len > lb_priv->dma_buf_sz) {
610 		pr_err("%s: frame_len long: %d\n", __func__, frame_len);
611 		return -ENOMEM;
612 	}
613 
614 	frame_len -= ETH_FCS_LEN;
615 	prefetch(skb->data - NET_IP_ALIGN);
616 	skb_put(skb, frame_len);
617 	dma_unmap_single(priv->device,
618 			 lb_priv->rx_skbuff_dma,
619 			 lb_priv->dma_buf_sz,
620 			 DMA_FROM_DEVICE);
621 
622 	ret = dwmac_rk_loopback_validate(priv, lb_priv, skb);
623 	dwmac_rk_rx_clean(priv, lb_priv);
624 	dwmac_rk_rx_fill(priv, lb_priv);
625 
626 	return ret;
627 }
628 
dwmac_rk_get_desc_status(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)629 static int dwmac_rk_get_desc_status(struct stmmac_priv *priv,
630 				    struct dwmac_rk_lb_priv *lb_priv)
631 {
632 	struct dma_desc *txp, *rxp;
633 	int tx_status, rx_status;
634 
635 	txp = lb_priv->dma_tx;
636 	tx_status = priv->hw->desc->tx_status(&priv->dev->stats,
637 					      &priv->xstats, txp,
638 					      priv->ioaddr);
639 	/* Check if the descriptor is owned by the DMA */
640 	if (unlikely(tx_status & tx_dma_own))
641 		return -EBUSY;
642 
643 	rxp = lb_priv->dma_rx;
644 	/* read the status of the incoming frame */
645 	rx_status = priv->hw->desc->rx_status(&priv->dev->stats,
646 					      &priv->xstats, rxp);
647 	if (unlikely(rx_status & dma_own))
648 		return -EBUSY;
649 
650 	usleep_range(100, 150);
651 
652 	return 0;
653 }
654 
dwmac_rk_tx_clean(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)655 static void dwmac_rk_tx_clean(struct stmmac_priv *priv,
656 			      struct dwmac_rk_lb_priv *lb_priv)
657 {
658 	struct sk_buff *skb;
659 	struct dma_desc *p;
660 
661 	skb = lb_priv->tx_skbuff;
662 	p = lb_priv->dma_tx;
663 
664 	if (likely(lb_priv->tx_skbuff_dma)) {
665 		dma_unmap_single(priv->device,
666 				 lb_priv->tx_skbuff_dma,
667 				 lb_priv->tx_skbuff_dma_len,
668 				 DMA_TO_DEVICE);
669 		lb_priv->tx_skbuff_dma = 0;
670 	}
671 
672 	if (likely(skb)) {
673 		dev_kfree_skb(skb);
674 		lb_priv->tx_skbuff = NULL;
675 	}
676 
677 	priv->hw->desc->release_tx_desc(p, priv->mode);
678 }
679 
dwmac_rk_xmit(struct sk_buff *skb, struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)680 static int dwmac_rk_xmit(struct sk_buff *skb, struct net_device *dev,
681 			 struct dwmac_rk_lb_priv *lb_priv)
682 {
683 	struct stmmac_priv *priv = netdev_priv(dev);
684 	unsigned int nopaged_len = skb_headlen(skb);
685 	int csum_insertion = 0;
686 	struct dma_desc *desc;
687 	unsigned int des;
688 
689 	priv->hw->mac->reset_eee_mode(priv->hw);
690 
691 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
692 
693 	desc = lb_priv->dma_tx;
694 	lb_priv->tx_skbuff = skb;
695 
696 	des = dma_map_single(priv->device, skb->data,
697 				    nopaged_len, DMA_TO_DEVICE);
698 	if (dma_mapping_error(priv->device, des))
699 		goto dma_map_err;
700 
701 	stmmac_set_desc_addr(priv, desc, des);
702 	lb_priv->tx_skbuff_dma_len = nopaged_len;
703 
704 	/* Prepare the first descriptor setting the OWN bit too */
705 	stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len,
706 			       csum_insertion, priv->mode, 1, 1,
707 			       skb->len);
708 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
709 
710 	lb_priv->tx_tail_addr = lb_priv->dma_tx_phy + sizeof(*desc);
711 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, lb_priv->tx_tail_addr, 0);
712 
713 	return 0;
714 
715 dma_map_err:
716 	pr_err("%s: Tx dma map failed\n", __func__);
717 	dev_kfree_skb(skb);
718 	return -EFAULT;
719 }
720 
__dwmac_rk_loopback_run(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)721 static int __dwmac_rk_loopback_run(struct stmmac_priv *priv,
722 				   struct dwmac_rk_lb_priv *lb_priv)
723 {
724 	u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
725 	u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
726 	struct sk_buff *tx_skb;
727 	u32 chan = 0;
728 	int ret = -EIO, delay;
729 	u32 status;
730 	bool finish = false;
731 
732 	if (lb_priv->speed == LOOPBACK_SPEED1000)
733 		delay = 10;
734 	else if (lb_priv->speed == LOOPBACK_SPEED100)
735 		delay = 20;
736 	else if (lb_priv->speed == LOOPBACK_SPEED10)
737 		delay = 50;
738 	else
739 		return -EPERM;
740 
741 	if (dwmac_rk_rx_fill(priv, lb_priv))
742 		return -ENOMEM;
743 
744 	/* Enable the MAC Rx/Tx */
745 	stmmac_mac_set(priv, priv->ioaddr, true);
746 
747 	for (chan = 0; chan < rx_channels_count; chan++)
748 		stmmac_start_rx(priv, priv->ioaddr, chan);
749 	for (chan = 0; chan < tx_channels_count; chan++)
750 		stmmac_start_tx(priv, priv->ioaddr, chan);
751 
752 	tx_skb = dwmac_rk_get_skb(priv, lb_priv);
753 	if (!tx_skb) {
754 		ret = -ENOMEM;
755 		goto stop;
756 	}
757 
758 	if (dwmac_rk_xmit(tx_skb, priv->dev, lb_priv)) {
759 		ret = -EFAULT;
760 		goto stop;
761 	}
762 
763 	do {
764 		usleep_range(100, 150);
765 		delay--;
766 		if (priv->plat->has_gmac4) {
767 			status = readl(priv->ioaddr + DMA_CHAN_STATUS(0));
768 			finish = (status & DMA_CHAN_STATUS_ERI) && (status & DMA_CHAN_STATUS_ETI);
769 		} else {
770 			status = readl(priv->ioaddr + DMA_STATUS);
771 			finish = (status & DMA_STATUS_ERI) && (status & DMA_STATUS_ETI);
772 		}
773 
774 		if (finish) {
775 			if (!dwmac_rk_get_desc_status(priv, lb_priv)) {
776 				ret = dwmac_rk_rx_validate(priv, lb_priv);
777 				break;
778 			}
779 		}
780 	} while (delay <= 0);
781 	writel((status & 0x1ffff), priv->ioaddr + DMA_STATUS);
782 
783 stop:
784 	for (chan = 0; chan < rx_channels_count; chan++)
785 		stmmac_stop_rx(priv, priv->ioaddr, chan);
786 	for (chan = 0; chan < tx_channels_count; chan++)
787 		stmmac_stop_tx(priv, priv->ioaddr, chan);
788 
789 	stmmac_mac_set(priv, priv->ioaddr, false);
790 	/* wait for state machine is disabled */
791 	usleep_range(100, 150);
792 
793 	dwmac_rk_tx_clean(priv, lb_priv);
794 	dwmac_rk_rx_clean(priv, lb_priv);
795 
796 	return ret;
797 }
798 
dwmac_rk_loopback_with_identify(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv, int tx, int rx)799 static int dwmac_rk_loopback_with_identify(struct stmmac_priv *priv,
800 					   struct dwmac_rk_lb_priv *lb_priv,
801 					   int tx, int rx)
802 {
803 	lb_priv->id++;
804 	lb_priv->tx = tx;
805 	lb_priv->rx = rx;
806 
807 	lb_priv->packet = &dwmac_rk_tcp_attr;
808 	dwmac_rk_set_rgmii_delayline(priv, tx, rx);
809 
810 	return __dwmac_rk_loopback_run(priv, lb_priv);
811 }
812 
dwmac_rk_delayline_is_txvalid(struct dwmac_rk_lb_priv *lb_priv, int tx)813 static inline bool dwmac_rk_delayline_is_txvalid(struct dwmac_rk_lb_priv *lb_priv,
814 						 int tx)
815 {
816 	if (tx > 0 && tx < lb_priv->max_delay)
817 		return true;
818 	else
819 		return false;
820 }
821 
dwmac_rk_delayline_is_valid(struct dwmac_rk_lb_priv *lb_priv, int tx, int rx)822 static inline bool dwmac_rk_delayline_is_valid(struct dwmac_rk_lb_priv *lb_priv,
823 					       int tx, int rx)
824 {
825 	if ((tx > 0 && tx < lb_priv->max_delay) &&
826 	    (rx > 0 && rx < lb_priv->max_delay))
827 		return true;
828 	else
829 		return false;
830 }
831 
dwmac_rk_delayline_scan_cross(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)832 static int dwmac_rk_delayline_scan_cross(struct stmmac_priv *priv,
833 					 struct dwmac_rk_lb_priv *lb_priv)
834 {
835 	int tx_left, tx_right, rx_up, rx_down;
836 	int i, j, tx_index, rx_index;
837 	int tx_mid = 0, rx_mid = 0;
838 
839 	/* initiation */
840 	tx_index = SCAN_STEP;
841 	rx_index = SCAN_STEP;
842 
843 re_scan:
844 	/* start from rx based on the experience */
845 	for (i = rx_index; i <= (lb_priv->max_delay - SCAN_STEP); i += SCAN_STEP) {
846 		tx_left = 0;
847 		tx_right = 0;
848 		tx_mid = 0;
849 
850 		for (j = tx_index; j <= (lb_priv->max_delay - SCAN_STEP);
851 		     j += SCAN_STEP) {
852 			if (!dwmac_rk_loopback_with_identify(priv,
853 			    lb_priv, j, i)) {
854 				if (!tx_left)
855 					tx_left = j;
856 				tx_right = j;
857 			}
858 		}
859 
860 		/* look for tx_mid */
861 		if ((tx_right - tx_left) > SCAN_VALID_RANGE) {
862 			tx_mid = (tx_right + tx_left) / 2;
863 			break;
864 		}
865 	}
866 
867 	/* Worst case: reach the end */
868 	if (i >= (lb_priv->max_delay - SCAN_STEP))
869 		goto end;
870 
871 	rx_up = 0;
872 	rx_down = 0;
873 
874 	/* look for rx_mid base on the tx_mid */
875 	for (i = SCAN_STEP; i <= (lb_priv->max_delay - SCAN_STEP);
876 	     i += SCAN_STEP) {
877 		if (!dwmac_rk_loopback_with_identify(priv, lb_priv,
878 		    tx_mid, i)) {
879 			if (!rx_up)
880 				rx_up = i;
881 			rx_down = i;
882 		}
883 	}
884 
885 	if ((rx_down - rx_up) > SCAN_VALID_RANGE) {
886 		/* Now get the rx_mid */
887 		rx_mid = (rx_up + rx_down) / 2;
888 	} else {
889 		rx_index += SCAN_STEP;
890 		rx_mid = 0;
891 		goto re_scan;
892 	}
893 
894 	if (dwmac_rk_delayline_is_valid(lb_priv, tx_mid, rx_mid)) {
895 		lb_priv->final_tx = tx_mid;
896 		lb_priv->final_rx = rx_mid;
897 
898 		pr_info("Find available tx_delay = 0x%02x, rx_delay = 0x%02x\n",
899 			lb_priv->final_tx, lb_priv->final_rx);
900 
901 		return 0;
902 	}
903 end:
904 	pr_err("Can't find available delayline\n");
905 	return -ENXIO;
906 }
907 
dwmac_rk_delayline_scan(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)908 static int dwmac_rk_delayline_scan(struct stmmac_priv *priv,
909 				   struct dwmac_rk_lb_priv *lb_priv)
910 {
911 	int phy_iface = dwmac_rk_get_phy_interface(priv);
912 	int tx, rx, tx_sum, rx_sum, count;
913 	int tx_mid, rx_mid;
914 	int ret = -ENXIO;
915 
916 	tx_sum = 0;
917 	rx_sum = 0;
918 	count = 0;
919 
920 	for (rx = 0x0; rx <= lb_priv->max_delay; rx++) {
921 		if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID)
922 			rx = -1;
923 		printk(KERN_CONT "RX(%03d):", rx);
924 		for (tx = 0x0; tx <= lb_priv->max_delay; tx++) {
925 			if (!dwmac_rk_loopback_with_identify(priv,
926 			    lb_priv, tx, rx)) {
927 				tx_sum += tx;
928 				rx_sum += rx;
929 				count++;
930 				printk(KERN_CONT "O");
931 			} else {
932 				printk(KERN_CONT " ");
933 			}
934 		}
935 		printk(KERN_CONT "\n");
936 
937 		if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID)
938 			break;
939 	}
940 
941 	if (tx_sum && rx_sum && count) {
942 		tx_mid = tx_sum / count;
943 		rx_mid = rx_sum / count;
944 
945 		if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID) {
946 			if (dwmac_rk_delayline_is_txvalid(lb_priv, tx_mid)) {
947 				lb_priv->final_tx = tx_mid;
948 				lb_priv->final_rx = -1;
949 				ret = 0;
950 			}
951 		} else {
952 			if (dwmac_rk_delayline_is_valid(lb_priv, tx_mid, rx_mid)) {
953 				lb_priv->final_tx = tx_mid;
954 				lb_priv->final_rx = rx_mid;
955 				ret = 0;
956 			}
957 		}
958 	}
959 
960 	if (ret) {
961 		pr_err("\nCan't find suitable delayline\n");
962 	} else {
963 		if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID)
964 			pr_info("Find available tx_delay = 0x%02x, rx_delay = disable\n",
965 				lb_priv->final_tx);
966 		else
967 			pr_info("\nFind suitable tx_delay = 0x%02x, rx_delay = 0x%02x\n",
968 				lb_priv->final_tx, lb_priv->final_rx);
969 	}
970 
971 	return ret;
972 }
973 
dwmac_rk_loopback_delayline_scan(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)974 static int dwmac_rk_loopback_delayline_scan(struct stmmac_priv *priv,
975 					    struct dwmac_rk_lb_priv *lb_priv)
976 {
977 	if (lb_priv->sysfs)
978 		return dwmac_rk_delayline_scan(priv, lb_priv);
979 	else
980 		return dwmac_rk_delayline_scan_cross(priv, lb_priv);
981 }
982 
dwmac_rk_dma_free_rx_skbufs(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)983 static void dwmac_rk_dma_free_rx_skbufs(struct stmmac_priv *priv,
984 					struct dwmac_rk_lb_priv *lb_priv)
985 {
986 	if (lb_priv->rx_skbuff) {
987 		dma_unmap_single(priv->device, lb_priv->rx_skbuff_dma,
988 				 lb_priv->dma_buf_sz, DMA_FROM_DEVICE);
989 		dev_kfree_skb_any(lb_priv->rx_skbuff);
990 	}
991 	lb_priv->rx_skbuff = NULL;
992 }
993 
dwmac_rk_dma_free_tx_skbufs(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)994 static void dwmac_rk_dma_free_tx_skbufs(struct stmmac_priv *priv,
995 					struct dwmac_rk_lb_priv *lb_priv)
996 {
997 	if (lb_priv->tx_skbuff_dma) {
998 		dma_unmap_single(priv->device,
999 				 lb_priv->tx_skbuff_dma,
1000 				 lb_priv->tx_skbuff_dma_len,
1001 				 DMA_TO_DEVICE);
1002 	}
1003 
1004 	if (lb_priv->tx_skbuff) {
1005 		dev_kfree_skb_any(lb_priv->tx_skbuff);
1006 		lb_priv->tx_skbuff = NULL;
1007 		lb_priv->tx_skbuff_dma = 0;
1008 	}
1009 }
1010 
dwmac_rk_init_dma_desc_rings(struct net_device *dev, gfp_t flags, struct dwmac_rk_lb_priv *lb_priv)1011 static int dwmac_rk_init_dma_desc_rings(struct net_device *dev, gfp_t flags,
1012 					struct dwmac_rk_lb_priv *lb_priv)
1013 {
1014 	struct stmmac_priv *priv = netdev_priv(dev);
1015 	struct dma_desc *p;
1016 
1017 	p = lb_priv->dma_tx;
1018 	p->des2 = 0;
1019 	lb_priv->tx_skbuff_dma = 0;
1020 	lb_priv->tx_skbuff_dma_len = 0;
1021 	lb_priv->tx_skbuff = NULL;
1022 
1023 	lb_priv->rx_skbuff = NULL;
1024 	stmmac_init_rx_desc(priv, lb_priv->dma_rx,
1025 				     priv->use_riwt, priv->mode,
1026 				     true, lb_priv->dma_buf_sz);
1027 
1028 	stmmac_init_tx_desc(priv, lb_priv->dma_tx,
1029 				     priv->mode,
1030 				     true);
1031 
1032 	return 0;
1033 }
1034 
dwmac_rk_alloc_dma_desc_resources(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1035 static int dwmac_rk_alloc_dma_desc_resources(struct stmmac_priv *priv,
1036 					     struct dwmac_rk_lb_priv *lb_priv)
1037 {
1038 	int ret = -ENOMEM;
1039 
1040 	/* desc dma map */
1041 	lb_priv->dma_rx = dma_alloc_coherent(priv->device,
1042 					     sizeof(struct dma_desc),
1043 					     &lb_priv->dma_rx_phy,
1044 					     GFP_KERNEL);
1045 	if (!lb_priv->dma_rx)
1046 		return ret;
1047 
1048 	lb_priv->dma_tx = dma_alloc_coherent(priv->device,
1049 					     sizeof(struct dma_desc),
1050 					     &lb_priv->dma_tx_phy,
1051 					     GFP_KERNEL);
1052 	if (!lb_priv->dma_tx) {
1053 		dma_free_coherent(priv->device,
1054 				  sizeof(struct dma_desc),
1055 				  lb_priv->dma_rx, lb_priv->dma_rx_phy);
1056 		return ret;
1057 	}
1058 
1059 	return 0;
1060 }
1061 
dwmac_rk_free_dma_desc_resources(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1062 static void dwmac_rk_free_dma_desc_resources(struct stmmac_priv *priv,
1063 					     struct dwmac_rk_lb_priv *lb_priv)
1064 {
1065 	/* Release the DMA TX/RX socket buffers */
1066 	dwmac_rk_dma_free_rx_skbufs(priv, lb_priv);
1067 	dwmac_rk_dma_free_tx_skbufs(priv, lb_priv);
1068 
1069 	dma_free_coherent(priv->device, sizeof(struct dma_desc),
1070 			  lb_priv->dma_tx, lb_priv->dma_tx_phy);
1071 	dma_free_coherent(priv->device, sizeof(struct dma_desc),
1072 			  lb_priv->dma_rx, lb_priv->dma_rx_phy);
1073 }
1074 
dwmac_rk_init_dma_engine(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1075 static int dwmac_rk_init_dma_engine(struct stmmac_priv *priv,
1076 				    struct dwmac_rk_lb_priv *lb_priv)
1077 {
1078 	u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1079 	u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
1080 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
1081 	u32 chan = 0;
1082 	int ret = 0;
1083 
1084 	ret = stmmac_reset(priv, priv->ioaddr);
1085 	if (ret) {
1086 		dev_err(priv->device, "Failed to reset the dma\n");
1087 		return ret;
1088 	}
1089 
1090 	/* DMA Configuration */
1091 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, 0);
1092 
1093 	if (priv->plat->axi)
1094 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
1095 
1096 	for (chan = 0; chan < dma_csr_ch; chan++)
1097 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 0);
1098 
1099 	/* DMA RX Channel Configuration */
1100 	for (chan = 0; chan < rx_channels_count; chan++) {
1101 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
1102 				    lb_priv->dma_rx_phy, 0);
1103 
1104 		lb_priv->rx_tail_addr = lb_priv->dma_rx_phy +
1105 			    (1 * sizeof(struct dma_desc));
1106 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
1107 				       lb_priv->rx_tail_addr, 0);
1108 	}
1109 
1110 	/* DMA TX Channel Configuration */
1111 	for (chan = 0; chan < tx_channels_count; chan++) {
1112 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
1113 				    lb_priv->dma_tx_phy, chan);
1114 
1115 		lb_priv->tx_tail_addr = lb_priv->dma_tx_phy;
1116 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
1117 				       lb_priv->tx_tail_addr, chan);
1118 	}
1119 
1120 	return ret;
1121 }
1122 
dwmac_rk_dma_operation_mode(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1123 static void dwmac_rk_dma_operation_mode(struct stmmac_priv *priv,
1124 					struct dwmac_rk_lb_priv *lb_priv)
1125 {
1126 	u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1127 	u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
1128 	int rxfifosz = priv->plat->rx_fifo_size;
1129 	int txfifosz = priv->plat->tx_fifo_size;
1130 	u32 txmode = SF_DMA_MODE;
1131 	u32 rxmode = SF_DMA_MODE;
1132 	u32 chan = 0;
1133 	u8 qmode = 0;
1134 
1135 	if (rxfifosz == 0)
1136 		rxfifosz = priv->dma_cap.rx_fifo_size;
1137 	if (txfifosz == 0)
1138 		txfifosz = priv->dma_cap.tx_fifo_size;
1139 
1140 	/* Adjust for real per queue fifo size */
1141 	rxfifosz /= rx_channels_count;
1142 	txfifosz /= tx_channels_count;
1143 
1144 	/* configure all channels */
1145 	for (chan = 0; chan < rx_channels_count; chan++) {
1146 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1147 
1148 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1149 				   rxfifosz, qmode);
1150 		stmmac_set_dma_bfsize(priv, priv->ioaddr, lb_priv->dma_buf_sz,
1151 				      chan);
1152 	}
1153 
1154 	for (chan = 0; chan < tx_channels_count; chan++) {
1155 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1156 
1157 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1158 				   txfifosz, qmode);
1159 	}
1160 }
1161 
dwmac_rk_rx_queue_dma_chan_map(struct stmmac_priv *priv)1162 static void dwmac_rk_rx_queue_dma_chan_map(struct stmmac_priv *priv)
1163 {
1164 	u32 rx_queues_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1165 	u32 queue;
1166 	u32 chan;
1167 
1168 	for (queue = 0; queue < rx_queues_count; queue++) {
1169 		chan = priv->plat->rx_queues_cfg[queue].chan;
1170 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
1171 	}
1172 }
1173 
dwmac_rk_mac_enable_rx_queues(struct stmmac_priv *priv)1174 static void dwmac_rk_mac_enable_rx_queues(struct stmmac_priv *priv)
1175 {
1176 	u32 rx_queues_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1177 	int queue;
1178 	u8 mode;
1179 
1180 	for (queue = 0; queue < rx_queues_count; queue++) {
1181 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1182 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1183 	}
1184 }
1185 
dwmac_rk_mtl_configuration(struct stmmac_priv *priv)1186 static void dwmac_rk_mtl_configuration(struct stmmac_priv *priv)
1187 {
1188 	/* Map RX MTL to DMA channels */
1189 	dwmac_rk_rx_queue_dma_chan_map(priv);
1190 
1191 	/* Enable MAC RX Queues */
1192 	dwmac_rk_mac_enable_rx_queues(priv);
1193 }
1194 
dwmac_rk_mmc_setup(struct stmmac_priv *priv)1195 static void dwmac_rk_mmc_setup(struct stmmac_priv *priv)
1196 {
1197 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1198 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1199 
1200 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
1201 
1202 	if (priv->dma_cap.rmon) {
1203 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
1204 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1205 	} else {
1206 		netdev_info(priv->dev, "No MAC Management Counters available\n");
1207 	}
1208 }
1209 
dwmac_rk_init(struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)1210 static int dwmac_rk_init(struct net_device *dev,
1211 			 struct dwmac_rk_lb_priv *lb_priv)
1212 {
1213 	struct stmmac_priv *priv = netdev_priv(dev);
1214 	int ret;
1215 	u32 mode;
1216 
1217 	lb_priv->dma_buf_sz = 1536; /* mtu 1500 size */
1218 
1219 	if (priv->plat->has_gmac4)
1220 		lb_priv->buf_sz = priv->dma_cap.rx_fifo_size; /* rx fifo size */
1221 	else
1222 		lb_priv->buf_sz = 4096; /* rx fifo size */
1223 
1224 	ret = dwmac_rk_alloc_dma_desc_resources(priv, lb_priv);
1225 	if (ret < 0) {
1226 		pr_err("%s: DMA descriptors allocation failed\n", __func__);
1227 		return ret;
1228 	}
1229 
1230 	ret = dwmac_rk_init_dma_desc_rings(dev, GFP_KERNEL, lb_priv);
1231 	if (ret < 0) {
1232 		pr_err("%s: DMA descriptors initialization failed\n", __func__);
1233 		goto init_error;
1234 	}
1235 
1236 	/* DMA initialization and SW reset */
1237 	ret = dwmac_rk_init_dma_engine(priv, lb_priv);
1238 	if (ret < 0) {
1239 		pr_err("%s: DMA engine initialization failed\n", __func__);
1240 		goto init_error;
1241 	}
1242 
1243 	/* Copy the MAC addr into the HW  */
1244 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1245 
1246 	/* Initialize the MAC Core */
1247 	stmmac_core_init(priv, priv->hw, dev);
1248 
1249 	dwmac_rk_mtl_configuration(priv);
1250 
1251 	dwmac_rk_mmc_setup(priv);
1252 
1253 	ret = priv->hw->mac->rx_ipc(priv->hw);
1254 	if (!ret) {
1255 		pr_warn(" RX IPC Checksum Offload disabled\n");
1256 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1257 		priv->hw->rx_csum = 0;
1258 	}
1259 
1260 	/* Set the HW DMA mode and the COE */
1261 	dwmac_rk_dma_operation_mode(priv, lb_priv);
1262 
1263 	if (priv->plat->has_gmac4) {
1264 		mode = readl(priv->ioaddr + DMA_CHAN_TX_CONTROL(0));
1265 		/* Disable OSP to get best performance */
1266 		mode &= ~DMA_CONTROL_OSP;
1267 		writel(mode, priv->ioaddr + DMA_CHAN_TX_CONTROL(0));
1268 	} else {
1269 		/* Disable OSF */
1270 		mode = readl(priv->ioaddr + DMA_CONTROL);
1271 		writel((mode & ~DMA_CONTROL_OSF), priv->ioaddr + DMA_CONTROL);
1272 	}
1273 
1274 	stmmac_enable_dma_irq(priv, priv->ioaddr, 0, 1, 1);
1275 
1276 	if (priv->hw->pcs)
1277 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
1278 
1279 	return 0;
1280 init_error:
1281 	dwmac_rk_free_dma_desc_resources(priv, lb_priv);
1282 
1283 	return ret;
1284 }
1285 
dwmac_rk_release(struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)1286 static void dwmac_rk_release(struct net_device *dev,
1287 			     struct dwmac_rk_lb_priv *lb_priv)
1288 {
1289 	struct stmmac_priv *priv = netdev_priv(dev);
1290 
1291 	stmmac_disable_dma_irq(priv, priv->ioaddr, 0, 0, 0);
1292 
1293 	/* Release and free the Rx/Tx resources */
1294 	dwmac_rk_free_dma_desc_resources(priv, lb_priv);
1295 }
1296 
dwmac_rk_get_max_delayline(struct stmmac_priv *priv)1297 static int dwmac_rk_get_max_delayline(struct stmmac_priv *priv)
1298 {
1299 	if (of_device_is_compatible(priv->device->of_node,
1300 				    "rockchip,rk3588-gmac"))
1301 		return RK3588_MAX_DELAYLINE;
1302 	else
1303 		return MAX_DELAYLINE;
1304 }
1305 
dwmac_rk_phy_poll_reset(struct stmmac_priv *priv, int addr)1306 static int dwmac_rk_phy_poll_reset(struct stmmac_priv *priv, int addr)
1307 {
1308 	/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
1309 	unsigned int val, retries = 12;
1310 	int ret;
1311 
1312 	val = mdiobus_read(priv->mii, addr, MII_BMCR);
1313 	mdiobus_write(priv->mii, addr, MII_BMCR, val | BMCR_RESET);
1314 
1315 	do {
1316 		msleep(50);
1317 		ret = mdiobus_read(priv->mii, addr, MII_BMCR);
1318 		if (ret < 0)
1319 			return ret;
1320 	} while (ret & BMCR_RESET && --retries);
1321 	if (ret & BMCR_RESET)
1322 		return -ETIMEDOUT;
1323 
1324 	msleep(1);
1325 	return 0;
1326 }
1327 
dwmac_rk_loopback_run(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1328 static int dwmac_rk_loopback_run(struct stmmac_priv *priv,
1329 				 struct dwmac_rk_lb_priv *lb_priv)
1330 {
1331 	struct net_device *ndev = priv->dev;
1332 	int phy_iface = dwmac_rk_get_phy_interface(priv);
1333 	int ndev_up, phy_addr;
1334 	int ret = -EINVAL;
1335 
1336 	if (!ndev || !priv->mii)
1337 		return -EINVAL;
1338 
1339 	phy_addr = priv->dev->phydev->mdio.addr;
1340 	lb_priv->max_delay = dwmac_rk_get_max_delayline(priv);
1341 
1342 	rtnl_lock();
1343 	/* check the netdevice up or not */
1344 	ndev_up = ndev->flags & IFF_UP;
1345 
1346 	if (ndev_up) {
1347 		if (!netif_running(ndev) || !ndev->phydev) {
1348 			rtnl_unlock();
1349 			return -EINVAL;
1350 		}
1351 
1352 		/* check if the negotiation status */
1353 		if (ndev->phydev->state != PHY_NOLINK &&
1354 		    ndev->phydev->state != PHY_RUNNING) {
1355 			rtnl_unlock();
1356 			pr_warn("Try again later, after negotiation done\n");
1357 			return -EAGAIN;
1358 		}
1359 
1360 		ndev->netdev_ops->ndo_stop(ndev);
1361 
1362 		if (priv->plat->stmmac_rst)
1363 			reset_control_assert(priv->plat->stmmac_rst);
1364 		dwmac_rk_phy_poll_reset(priv, phy_addr);
1365 		if (priv->plat->stmmac_rst)
1366 			reset_control_deassert(priv->plat->stmmac_rst);
1367 	}
1368 	/* wait for phy and controller ready */
1369 	usleep_range(100000, 200000);
1370 
1371 	dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed,
1372 			      true, phy_addr, true);
1373 
1374 	ret = dwmac_rk_init(ndev, lb_priv);
1375 	if (ret)
1376 		goto exit_init;
1377 
1378 	dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed,
1379 			      true, phy_addr, false);
1380 
1381 	if (lb_priv->scan) {
1382 		/* scan only support for rgmii mode */
1383 		if (phy_iface != PHY_INTERFACE_MODE_RGMII &&
1384 		    phy_iface != PHY_INTERFACE_MODE_RGMII_ID &&
1385 		    phy_iface != PHY_INTERFACE_MODE_RGMII_RXID &&
1386 		    phy_iface != PHY_INTERFACE_MODE_RGMII_TXID) {
1387 			ret = -EINVAL;
1388 			goto out;
1389 		}
1390 		ret = dwmac_rk_loopback_delayline_scan(priv, lb_priv);
1391 	} else {
1392 		lb_priv->id++;
1393 		lb_priv->tx = 0;
1394 		lb_priv->rx = 0;
1395 
1396 		lb_priv->packet = &dwmac_rk_tcp_attr;
1397 		ret = __dwmac_rk_loopback_run(priv, lb_priv);
1398 	}
1399 
1400 out:
1401 	dwmac_rk_release(ndev, lb_priv);
1402 	dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed,
1403 			      false, phy_addr, false);
1404 exit_init:
1405 	if (ndev_up)
1406 		ndev->netdev_ops->ndo_open(ndev);
1407 
1408 	rtnl_unlock();
1409 
1410 	return ret;
1411 }
1412 
rgmii_delayline_show(struct device *dev, struct device_attribute *attr, char *buf)1413 static ssize_t rgmii_delayline_show(struct device *dev,
1414 				    struct device_attribute *attr,
1415 				    char *buf)
1416 {
1417 	struct net_device *ndev = dev_get_drvdata(dev);
1418 	struct stmmac_priv *priv = netdev_priv(ndev);
1419 	int tx, rx;
1420 
1421 	dwmac_rk_get_rgmii_delayline(priv, &tx, &rx);
1422 
1423 	return sprintf(buf, "tx delayline: 0x%x, rx delayline: 0x%x\n",
1424 		       tx, rx);
1425 }
1426 
rgmii_delayline_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1427 static ssize_t rgmii_delayline_store(struct device *dev,
1428 				     struct device_attribute *attr,
1429 				     const char *buf, size_t count)
1430 {
1431 	struct net_device *ndev = dev_get_drvdata(dev);
1432 	struct stmmac_priv *priv = netdev_priv(ndev);
1433 	int tx = 0, rx = 0;
1434 	char tmp[32];
1435 	size_t buf_size = min(count, (sizeof(tmp) - 1));
1436 	char *data;
1437 
1438 	memset(tmp, 0, sizeof(tmp));
1439 	strncpy(tmp, buf, buf_size);
1440 
1441 	data = tmp;
1442 	data = strstr(data, " ");
1443 	if (!data)
1444 		goto out;
1445 	*data = 0;
1446 	data++;
1447 
1448 	if (kstrtoint(tmp, 0, &tx) || tx > dwmac_rk_get_max_delayline(priv))
1449 		goto out;
1450 
1451 	if (kstrtoint(data, 0, &rx) || rx > dwmac_rk_get_max_delayline(priv))
1452 		goto out;
1453 
1454 	dwmac_rk_set_rgmii_delayline(priv, tx, rx);
1455 	pr_info("Set rgmii delayline tx: 0x%x, rx: 0x%x\n", tx, rx);
1456 
1457 	return count;
1458 out:
1459 	pr_err("wrong delayline value input, range is <0x0, 0x7f>\n");
1460 	pr_err("usage: <tx_delayline> <rx_delayline>\n");
1461 
1462 	return count;
1463 }
1464 static DEVICE_ATTR_RW(rgmii_delayline);
1465 
mac_lb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1466 static ssize_t mac_lb_store(struct device *dev,
1467 			    struct device_attribute *attr,
1468 			    const char *buf, size_t count)
1469 {
1470 	struct net_device *ndev = dev_get_drvdata(dev);
1471 	struct stmmac_priv *priv = netdev_priv(ndev);
1472 	struct dwmac_rk_lb_priv *lb_priv;
1473 	int ret, speed;
1474 
1475 	lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1476 	if (!lb_priv)
1477 		return -ENOMEM;
1478 
1479 	ret = kstrtoint(buf, 0, &speed);
1480 	if (ret) {
1481 		kfree(lb_priv);
1482 		return count;
1483 	}
1484 	pr_info("MAC loopback speed set to %d\n", speed);
1485 
1486 	lb_priv->sysfs = 1;
1487 	lb_priv->type = LOOPBACK_TYPE_GMAC;
1488 	lb_priv->speed = speed;
1489 	lb_priv->scan = 0;
1490 
1491 	ret = dwmac_rk_loopback_run(priv, lb_priv);
1492 	kfree(lb_priv);
1493 
1494 	if (!ret)
1495 		pr_info("MAC loopback: PASS\n");
1496 	else
1497 		pr_info("MAC loopback: FAIL\n");
1498 
1499 	return count;
1500 }
1501 static DEVICE_ATTR_WO(mac_lb);
1502 
phy_lb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1503 static ssize_t phy_lb_store(struct device *dev,
1504 			    struct device_attribute *attr,
1505 			    const char *buf, size_t count)
1506 {
1507 	struct net_device *ndev = dev_get_drvdata(dev);
1508 	struct stmmac_priv *priv = netdev_priv(ndev);
1509 	struct dwmac_rk_lb_priv *lb_priv;
1510 	int ret, speed;
1511 
1512 	lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1513 	if (!lb_priv)
1514 		return  -ENOMEM;
1515 
1516 	ret = kstrtoint(buf, 0, &speed);
1517 	if (ret) {
1518 		kfree(lb_priv);
1519 		return count;
1520 	}
1521 	pr_info("PHY loopback speed set to %d\n", speed);
1522 
1523 	lb_priv->sysfs = 1;
1524 	lb_priv->type = LOOPBACK_TYPE_PHY;
1525 	lb_priv->speed = speed;
1526 	lb_priv->scan = 0;
1527 
1528 	ret = dwmac_rk_loopback_run(priv, lb_priv);
1529 	if (!ret)
1530 		pr_info("PHY loopback: PASS\n");
1531 	else
1532 		pr_info("PHY loopback: FAIL\n");
1533 
1534 	kfree(lb_priv);
1535 	return count;
1536 }
1537 static DEVICE_ATTR_WO(phy_lb);
1538 
phy_lb_scan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1539 static ssize_t phy_lb_scan_store(struct device *dev,
1540 				 struct device_attribute *attr,
1541 				 const char *buf, size_t count)
1542 {
1543 	struct net_device *ndev = dev_get_drvdata(dev);
1544 	struct stmmac_priv *priv = netdev_priv(ndev);
1545 	struct dwmac_rk_lb_priv *lb_priv;
1546 	int ret, speed;
1547 
1548 	lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1549 	if (!lb_priv)
1550 		return -ENOMEM;
1551 
1552 	ret = kstrtoint(buf, 0, &speed);
1553 	if (ret) {
1554 		kfree(lb_priv);
1555 		return count;
1556 	}
1557 	pr_info("Delayline scan speed set to %d\n", speed);
1558 
1559 	lb_priv->sysfs = 1;
1560 	lb_priv->type = LOOPBACK_TYPE_PHY;
1561 	lb_priv->speed = speed;
1562 	lb_priv->scan = 1;
1563 
1564 	dwmac_rk_loopback_run(priv, lb_priv);
1565 
1566 	kfree(lb_priv);
1567 	return count;
1568 }
1569 static DEVICE_ATTR_WO(phy_lb_scan);
1570 
dwmac_rk_create_loopback_sysfs(struct device *device)1571 int dwmac_rk_create_loopback_sysfs(struct device *device)
1572 {
1573 	int ret;
1574 
1575 	ret = device_create_file(device, &dev_attr_rgmii_delayline);
1576 	if (ret)
1577 		return ret;
1578 
1579 	ret = device_create_file(device, &dev_attr_mac_lb);
1580 	if (ret)
1581 		goto remove_rgmii_delayline;
1582 
1583 	ret = device_create_file(device, &dev_attr_phy_lb);
1584 	if (ret)
1585 		goto remove_mac_lb;
1586 
1587 	ret = device_create_file(device, &dev_attr_phy_lb_scan);
1588 	if (ret)
1589 		goto remove_phy_lb;
1590 
1591 	return 0;
1592 
1593 remove_rgmii_delayline:
1594 	device_remove_file(device, &dev_attr_rgmii_delayline);
1595 
1596 remove_mac_lb:
1597 	device_remove_file(device, &dev_attr_mac_lb);
1598 
1599 remove_phy_lb:
1600 	device_remove_file(device, &dev_attr_phy_lb);
1601 
1602 	return ret;
1603 }
1604 
dwmac_rk_remove_loopback_sysfs(struct device *device)1605 int dwmac_rk_remove_loopback_sysfs(struct device *device)
1606 {
1607 	device_remove_file(device, &dev_attr_rgmii_delayline);
1608 	device_remove_file(device, &dev_attr_mac_lb);
1609 	device_remove_file(device, &dev_attr_phy_lb);
1610 	device_remove_file(device, &dev_attr_phy_lb_scan);
1611 
1612 	return 0;
1613 }
1614