1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <linux/ip.h>
7 #include <linux/tcp.h>
8 #include <linux/skbuff.h>
9 #include <linux/if_ether.h>
10 #include <linux/if.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/of_device.h>
13 #include <linux/slab.h>
14 #include <linux/prefetch.h>
15 #include <linux/regmap.h>
16 #include <linux/phy.h>
17 #include <linux/udp.h>
18 #include <linux/skbuff.h>
19 #include <net/pkt_cls.h>
20 #include <net/tcp.h>
21 #include <net/udp.h>
22 #include <linux/soc/rockchip/rk_vendor_storage.h>
23 #include "stmmac.h"
24 #include "dwmac1000.h"
25 #include "dwmac_dma.h"
26 #include "dwmac-rk-tool.h"
27 
28 enum { LOOPBACK_TYPE_GMAC = 1, LOOPBACK_TYPE_PHY };
29 
30 enum { LOOPBACK_SPEED10 = 10, LOOPBACK_SPEED100 = 100, LOOPBACK_SPEED1000 = 1000 };
31 
32 struct dwmac_rk_packet_attrs {
33     unsigned char src[6];
34     unsigned char dst[6];
35     u32 ip_src;
36     u32 ip_dst;
37     int tcp;
38     int sport;
39     int dport;
40     int size;
41 };
42 
43 struct dwmac_rk_hdr {
44     __be32 version;
45     __be64 magic;
46     u32 id;
47     int tx;
48     int rx;
49 } packed;
50 
51 struct dwmac_rk_lb_priv {
52     /* desc && buffer */
53     struct dma_desc *dma_tx;
54     dma_addr_t dma_tx_phy;
55     struct sk_buff *tx_skbuff;
56     dma_addr_t tx_skbuff_dma;
57     unsigned int tx_skbuff_dma_len;
58 
59     struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
60     dma_addr_t dma_rx_phy;
61     struct sk_buff *rx_skbuff;
62     dma_addr_t rx_skbuff_dma;
63     u32 rx_tail_addr;
64     u32 tx_tail_addr;
65 
66     /* rx buffer size */
67     unsigned int dma_buf_sz;
68     unsigned int buf_sz;
69 
70     int type;
71     int speed;
72     struct dwmac_rk_packet_attrs *packet;
73 
74     unsigned int actual_size;
75     int scan;
76     int sysfs;
77     u32 id;
78     int tx;
79     int rx;
80     int final_tx;
81     int final_rx;
82     int max_delay;
83 };
84 
85 #define DMA_CONTROL_OSP BIT(4)
86 #define DMA_CHAN_BASE_ADDR 0x00001100
87 #define DMA_CHAN_BASE_OFFSET 0x80
88 #define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + ((x)*DMA_CHAN_BASE_OFFSET))
89 #define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
90 #define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
91 #define DMA_CHAN_STATUS_ERI BIT(11)
92 #define DMA_CHAN_STATUS_ETI BIT(10)
93 
94 #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
95 #define MAX_DELAYLINE 0x7f
96 #define RK3588_MAX_DELAYLINE 0xc7
97 #define SCAN_STEP 0x5
98 #define SCAN_VALID_RANGE 0xA
99 
100 #define DWMAC_RK_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct dwmac_rk_hdr))
101 #define DWMAC_RK_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
102 #define DWMAC_RK_TEST_PKT_MAX_SIZE 1500
103 
104 static __maybe_unused struct dwmac_rk_packet_attrs dwmac_rk_udp_attr = {
105     .dst = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
106     .tcp = 0,
107     .size = 1024,
108 };
109 
110 static __maybe_unused struct dwmac_rk_packet_attrs dwmac_rk_tcp_attr = {
111     .dst = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
112     .tcp = 1,
113     .size = 1024,
114 };
115 
dwmac_rk_enable_mac_loopback(struct stmmac_priv *priv, int speed, int addr, bool phy)116 static int dwmac_rk_enable_mac_loopback(struct stmmac_priv *priv, int speed, int addr, bool phy)
117 {
118     u32 ctrl;
119     int phy_val;
120 
121     ctrl = readl(priv->ioaddr + GMAC_CONTROL);
122     ctrl &= ~priv->hw->link.speed_mask;
123     ctrl |= GMAC_CONTROL_LM;
124 
125     if (phy) {
126         phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
127     }
128 
129     switch (speed) {
130         case LOOPBACK_SPEED1000:
131             ctrl |= priv->hw->link.speed1000;
132             if (phy) {
133                 phy_val &= ~BMCR_SPEED100;
134                 phy_val |= BMCR_SPEED1000;
135             }
136             break;
137         case LOOPBACK_SPEED100:
138             ctrl |= priv->hw->link.speed100;
139             if (phy) {
140                 phy_val &= ~BMCR_SPEED1000;
141                 phy_val |= BMCR_SPEED100;
142             }
143             break;
144         case LOOPBACK_SPEED10:
145             ctrl |= priv->hw->link.speed10;
146             if (phy) {
147                 phy_val &= ~BMCR_SPEED1000;
148                 phy_val &= ~BMCR_SPEED100;
149             }
150             break;
151         default:
152             return -EPERM;
153     }
154 
155     ctrl |= priv->hw->link.duplex;
156     writel(ctrl, priv->ioaddr + GMAC_CONTROL);
157 
158     if (phy) {
159         phy_val &= ~BMCR_PDOWN;
160         phy_val &= ~BMCR_ANENABLE;
161         phy_val &= ~BMCR_PDOWN;
162         phy_val |= BMCR_FULLDPLX;
163         mdiobus_write(priv->mii, addr, MII_BMCR, phy_val);
164         phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
165     }
166 
167     if (likely(priv->plat->fix_mac_speed)) {
168         priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
169     }
170 
171     return 0;
172 }
173 
dwmac_rk_disable_mac_loopback(struct stmmac_priv *priv, int addr)174 static int dwmac_rk_disable_mac_loopback(struct stmmac_priv *priv, int addr)
175 {
176     u32 ctrl;
177     int phy_val;
178 
179     ctrl = readl(priv->ioaddr + GMAC_CONTROL);
180     ctrl &= ~GMAC_CONTROL_LM;
181     writel(ctrl, priv->ioaddr + GMAC_CONTROL);
182 
183     phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
184     phy_val |= BMCR_ANENABLE;
185 
186     mdiobus_write(priv->mii, addr, MII_BMCR, phy_val);
187     phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
188 
189     return 0;
190 }
191 
dwmac_rk_set_mac_loopback(struct stmmac_priv *priv, int speed, bool enable, int addr, bool phy)192 static int dwmac_rk_set_mac_loopback(struct stmmac_priv *priv, int speed, bool enable, int addr, bool phy)
193 {
194     if (enable) {
195         return dwmac_rk_enable_mac_loopback(priv, speed, addr, phy);
196     } else {
197         return dwmac_rk_disable_mac_loopback(priv, addr);
198     }
199 }
200 
dwmac_rk_enable_phy_loopback(struct stmmac_priv *priv, int speed, int addr, bool phy)201 static int dwmac_rk_enable_phy_loopback(struct stmmac_priv *priv, int speed, int addr, bool phy)
202 {
203     u32 ctrl;
204     int val;
205 
206     ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
207     ctrl &= ~priv->hw->link.speed_mask;
208 
209     if (phy) {
210         val = mdiobus_read(priv->mii, addr, MII_BMCR);
211     }
212 
213     switch (speed) {
214         case LOOPBACK_SPEED1000:
215             ctrl |= priv->hw->link.speed1000;
216             if (phy) {
217                 val &= ~BMCR_SPEED100;
218                 val |= BMCR_SPEED1000;
219             }
220             break;
221         case LOOPBACK_SPEED100:
222             ctrl |= priv->hw->link.speed100;
223             if (phy) {
224                 val &= ~BMCR_SPEED1000;
225                 val |= BMCR_SPEED100;
226             }
227             break;
228         case LOOPBACK_SPEED10:
229             ctrl |= priv->hw->link.speed10;
230             if (phy) {
231                 val &= ~BMCR_SPEED1000;
232                 val &= ~BMCR_SPEED100;
233             }
234             break;
235         default:
236             return -EPERM;
237     }
238 
239     ctrl |= priv->hw->link.duplex;
240     writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
241 
242     if (phy) {
243         val |= BMCR_FULLDPLX;
244         val &= ~BMCR_PDOWN;
245         val &= ~BMCR_ANENABLE;
246         val |= BMCR_LOOPBACK;
247         mdiobus_write(priv->mii, addr, MII_BMCR, val);
248         val = mdiobus_read(priv->mii, addr, MII_BMCR);
249     }
250 
251     if (likely(priv->plat->fix_mac_speed)) {
252         priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
253     }
254 
255     return 0;
256 }
257 
dwmac_rk_disable_phy_loopback(struct stmmac_priv *priv, int addr)258 static int dwmac_rk_disable_phy_loopback(struct stmmac_priv *priv, int addr)
259 {
260     int val;
261 
262     val = mdiobus_read(priv->mii, addr, MII_BMCR);
263     val |= BMCR_ANENABLE;
264     val &= ~BMCR_LOOPBACK;
265 
266     mdiobus_write(priv->mii, addr, MII_BMCR, val);
267     val = mdiobus_read(priv->mii, addr, MII_BMCR);
268 
269     return 0;
270 }
271 
dwmac_rk_set_phy_loopback(struct stmmac_priv *priv, int speed, bool enable, int addr, bool phy)272 static int dwmac_rk_set_phy_loopback(struct stmmac_priv *priv, int speed, bool enable, int addr, bool phy)
273 {
274     if (enable) {
275         return dwmac_rk_enable_phy_loopback(priv, speed, addr, phy);
276     } else {
277         return dwmac_rk_disable_phy_loopback(priv, addr);
278     }
279 }
280 
dwmac_rk_set_loopback(struct stmmac_priv *priv, int type, int speed, bool enable, int addr, bool phy)281 static int dwmac_rk_set_loopback(struct stmmac_priv *priv, int type, int speed, bool enable, int addr, bool phy)
282 {
283     int ret;
284 
285     switch (type) {
286         case LOOPBACK_TYPE_PHY:
287             ret = dwmac_rk_set_phy_loopback(priv, speed, enable, addr, phy);
288             break;
289         case LOOPBACK_TYPE_GMAC:
290             ret = dwmac_rk_set_mac_loopback(priv, speed, enable, addr, phy);
291             break;
292         default:
293             ret = -EOPNOTSUPP;
294     }
295 
296     usleep_range(0x186A0, 0x30D40);
297     return ret;
298 }
299 
dwmac_rk_ether_addr_copy(u8 *dst, const u8 *src)300 static inline void dwmac_rk_ether_addr_copy(u8 *dst, const u8 *src)
301 {
302     u16 *a = (u16 *)dst;
303     const u16 *b = (const u16 *)src;
304 
305     a[0] = b[0];
306     a[1] = b[1];
307     a[0x02] = b[0x02];
308 }
309 
dwmac_rk_udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)310 static void dwmac_rk_udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
311 {
312     struct udphdr *uh = udp_hdr(skb);
313     int offset = skb_transport_offset(skb);
314     int len = skb->len - offset;
315 
316     skb->csum_start = skb_transport_header(skb) - skb->head;
317     skb->csum_offset = offsetof(struct udphdr, check);
318     uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
319 }
320 
dwmac_rk_get_skb(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)321 static struct sk_buff *dwmac_rk_get_skb(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
322 {
323     struct sk_buff *skb = NULL;
324     struct udphdr *uhdr = NULL;
325     struct tcphdr *thdr = NULL;
326     struct dwmac_rk_hdr *shdr;
327     struct ethhdr *ehdr;
328     struct iphdr *ihdr;
329     struct dwmac_rk_packet_attrs *attr;
330     int iplen, size, nfrags;
331 
332     attr = lb_priv->packet;
333     size = attr->size + DWMAC_RK_TEST_PKT_SIZE;
334     if (attr->tcp) {
335         size += sizeof(struct tcphdr);
336     } else {
337         size += sizeof(struct udphdr);
338     }
339 
340     if (size >= DWMAC_RK_TEST_PKT_MAX_SIZE) {
341         return NULL;
342     }
343 
344     lb_priv->actual_size = size;
345 
346     skb = netdev_alloc_skb_ip_align(priv->dev, size);
347     if (!skb) {
348         return NULL;
349     }
350 
351     skb_linearize(skb);
352     nfrags = skb_shinfo(skb)->nr_frags;
353     if (nfrags > 0) {
354         pr_err("%s: TX nfrags is not zero\n", __func__);
355         dev_kfree_skb(skb);
356         return NULL;
357     }
358 
359     ehdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
360     skb_reset_mac_header(skb);
361 
362     skb_set_network_header(skb, skb->len);
363     ihdr = (struct iphdr *)skb_put(skb, sizeof(*ihdr));
364 
365     skb_set_transport_header(skb, skb->len);
366     if (attr->tcp) {
367         thdr = (struct tcphdr *)skb_put(skb, sizeof(*thdr));
368     } else {
369         uhdr = (struct udphdr *)skb_put(skb, sizeof(*uhdr));
370     }
371 
372     eth_zero_addr(ehdr->h_source);
373     eth_zero_addr(ehdr->h_dest);
374 
375     dwmac_rk_ether_addr_copy(ehdr->h_source, priv->dev->dev_addr);
376     dwmac_rk_ether_addr_copy(ehdr->h_dest, attr->dst);
377 
378     ehdr->h_proto = htons(ETH_P_IP);
379 
380     if (attr->tcp) {
381         if (!thdr) {
382             dev_kfree_skb(skb);
383             return NULL;
384         }
385 
386         thdr->source = htons(attr->sport);
387         thdr->dest = htons(attr->dport);
388         thdr->doff = sizeof(struct tcphdr) / 0x04;
389         thdr->check = 0;
390     } else {
391         if (!uhdr) {
392             dev_kfree_skb(skb);
393             return NULL;
394         }
395 
396         uhdr->source = htons(attr->sport);
397         uhdr->dest = htons(attr->dport);
398         uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
399         uhdr->check = 0;
400     }
401 
402     ihdr->ihl = 0x05;
403     ihdr->ttl = 0x20;
404     ihdr->version = 0x04;
405     if (attr->tcp) {
406         ihdr->protocol = IPPROTO_TCP;
407     } else {
408         ihdr->protocol = IPPROTO_UDP;
409     }
410 
411     iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
412     if (attr->tcp) {
413         iplen += sizeof(*thdr);
414     } else {
415         iplen += sizeof(*uhdr);
416     }
417 
418     ihdr->tot_len = htons(iplen);
419     ihdr->frag_off = 0;
420     ihdr->saddr = htonl(attr->ip_src);
421     ihdr->daddr = htonl(attr->ip_dst);
422     ihdr->tos = 0;
423     ihdr->id = 0;
424     ip_send_check(ihdr);
425 
426     shdr = (struct dwmac_rk_hdr *)skb_put(skb, sizeof(*shdr));
427     shdr->version = 0;
428     shdr->magic = cpu_to_be64(DWMAC_RK_TEST_PKT_MAGIC);
429     shdr->id = lb_priv->id;
430     shdr->tx = lb_priv->tx;
431     shdr->rx = lb_priv->rx;
432 
433     if (attr->size) {
434         skb_put(skb, attr->size);
435         get_random_bytes((u8 *)shdr + sizeof(*shdr), attr->size);
436     }
437 
438     skb->csum = 0;
439     skb->ip_summed = CHECKSUM_PARTIAL;
440     if (attr->tcp) {
441         if (!thdr) {
442             dev_kfree_skb(skb);
443             return NULL;
444         }
445 
446         thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
447         skb->csum_start = skb_transport_header(skb) - skb->head;
448         skb->csum_offset = offsetof(struct tcphdr, check);
449     } else {
450         dwmac_rk_udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
451     }
452 
453     skb->protocol = htons(ETH_P_IP);
454     skb->pkt_type = PACKET_HOST;
455 
456     return skb;
457 }
458 
dwmac_rk_loopback_validate(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv, struct sk_buff *skb)459 static int dwmac_rk_loopback_validate(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv, struct sk_buff *skb)
460 {
461     struct dwmac_rk_hdr *shdr;
462     struct ethhdr *ehdr;
463     struct udphdr *uhdr;
464     struct tcphdr *thdr;
465     struct iphdr *ihdr;
466     int ret = -EAGAIN;
467 
468     if (skb->len >= DWMAC_RK_TEST_PKT_MAX_SIZE) {
469         goto out;
470     }
471 
472     if (lb_priv->actual_size != skb->len) {
473         goto out;
474     }
475 
476     ehdr = (struct ethhdr *)(skb->data);
477     if (!ether_addr_equal(ehdr->h_dest, lb_priv->packet->dst)) {
478         goto out;
479     }
480 
481     if (!ether_addr_equal(ehdr->h_source, priv->dev->dev_addr)) {
482         goto out;
483     }
484 
485     ihdr = (struct iphdr *)(skb->data + ETH_HLEN);
486 
487     if (lb_priv->packet->tcp) {
488         if (ihdr->protocol != IPPROTO_TCP) {
489             goto out;
490         }
491 
492         thdr = (struct tcphdr *)((u8 *)ihdr + 0x04 * ihdr->ihl);
493         if (thdr->dest != htons(lb_priv->packet->dport)) {
494             goto out;
495         }
496 
497         shdr = (struct dwmac_rk_hdr *)((u8 *)thdr + sizeof(*thdr));
498     } else {
499         if (ihdr->protocol != IPPROTO_UDP) {
500             goto out;
501         }
502 
503         uhdr = (struct udphdr *)((u8 *)ihdr + 0x04 * ihdr->ihl);
504         if (uhdr->dest != htons(lb_priv->packet->dport)) {
505             goto out;
506         }
507 
508         shdr = (struct dwmac_rk_hdr *)((u8 *)uhdr + sizeof(*uhdr));
509     }
510 
511     if (shdr->magic != cpu_to_be64(DWMAC_RK_TEST_PKT_MAGIC)) {
512         goto out;
513     }
514 
515     if (lb_priv->id != shdr->id) {
516         goto out;
517     }
518 
519     if (lb_priv->tx != shdr->tx || lb_priv->rx != shdr->rx) {
520         goto out;
521     }
522 
523     ret = 0;
524 out:
525     return ret;
526 }
527 
dwmac_rk_rx_fill(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)528 static inline int dwmac_rk_rx_fill(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
529 {
530     struct dma_desc *p;
531     struct sk_buff *skb;
532 
533     p = lb_priv->dma_rx;
534     if (likely(!lb_priv->rx_skbuff)) {
535         skb = netdev_alloc_skb_ip_align(priv->dev, lb_priv->buf_sz);
536         if (unlikely(!skb)) {
537             return -ENOMEM;
538         }
539 
540         if (skb_linearize(skb)) {
541             pr_err("%s: Rx skb linearize failed\n", __func__);
542             lb_priv->rx_skbuff = NULL;
543             dev_kfree_skb(skb);
544             return -EPERM;
545         }
546 
547         lb_priv->rx_skbuff = skb;
548         lb_priv->rx_skbuff_dma = dma_map_single(priv->device, skb->data, lb_priv->dma_buf_sz, DMA_FROM_DEVICE);
549         if (dma_mapping_error(priv->device, lb_priv->rx_skbuff_dma)) {
550             pr_err("%s: Rx dma map failed\n", __func__);
551             lb_priv->rx_skbuff = NULL;
552             dev_kfree_skb(skb);
553             return -EFAULT;
554         }
555 
556         stmmac_set_desc_addr(priv, p, lb_priv->rx_skbuff_dma);
557         /* Fill DES3 in case of RING mode */
558         if (lb_priv->dma_buf_sz == BUF_SIZE_16KiB) {
559             p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
560         }
561     }
562 
563     wmb();
564     stmmac_set_rx_owner(priv, p, priv->use_riwt);
565     wmb();
566 
567     stmmac_set_rx_tail_ptr(priv, priv->ioaddr, lb_priv->rx_tail_addr, 0);
568 
569     return 0;
570 }
571 
dwmac_rk_rx_clean(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)572 static void dwmac_rk_rx_clean(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
573 {
574     struct sk_buff *skb;
575 
576     skb = lb_priv->rx_skbuff;
577 
578     if (likely(lb_priv->rx_skbuff)) {
579         dma_unmap_single(priv->device, lb_priv->rx_skbuff_dma, lb_priv->dma_buf_sz, DMA_FROM_DEVICE);
580         dev_kfree_skb(skb);
581         lb_priv->rx_skbuff = NULL;
582     }
583 }
584 
dwmac_rk_rx_validate(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)585 static int dwmac_rk_rx_validate(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
586 {
587     struct dma_desc *p;
588     struct sk_buff *skb;
589     int coe = priv->hw->rx_csum;
590     unsigned int frame_len;
591     int ret;
592 
593     p = lb_priv->dma_rx;
594     skb = lb_priv->rx_skbuff;
595     if (unlikely(!skb)) {
596         pr_err("%s: Inconsistent Rx descriptor chain\n", __func__);
597         return -EINVAL;
598     }
599 
600     frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
601     /*  check if frame_len fits the preallocated memory */
602     if (frame_len > lb_priv->dma_buf_sz) {
603         pr_err("%s: frame_len long: %d\n", __func__, frame_len);
604         return -ENOMEM;
605     }
606 
607     frame_len -= ETH_FCS_LEN;
608     prefetch(skb->data - NET_IP_ALIGN);
609     skb_put(skb, frame_len);
610     dma_unmap_single(priv->device, lb_priv->rx_skbuff_dma, lb_priv->dma_buf_sz, DMA_FROM_DEVICE);
611 
612     ret = dwmac_rk_loopback_validate(priv, lb_priv, skb);
613     dwmac_rk_rx_clean(priv, lb_priv);
614     dwmac_rk_rx_fill(priv, lb_priv);
615 
616     return ret;
617 }
618 
dwmac_rk_get_desc_status(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)619 static int dwmac_rk_get_desc_status(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
620 {
621     struct dma_desc *txp, *rxp;
622     int tx_status, rx_status;
623 
624     txp = lb_priv->dma_tx;
625     tx_status = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, txp, priv->ioaddr);
626     /* Check if the descriptor is owned by the DMA */
627     if (unlikely(tx_status & tx_dma_own)) {
628         return -EBUSY;
629     }
630 
631     rxp = lb_priv->dma_rx;
632     /* read the status of the incoming frame */
633     rx_status = priv->hw->desc->rx_status(&priv->dev->stats, &priv->xstats, rxp);
634     if (unlikely(rx_status & dma_own)) {
635         return -EBUSY;
636     }
637 
638     usleep_range(0x64, 0x96);
639 
640     return 0;
641 }
642 
dwmac_rk_tx_clean(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)643 static void dwmac_rk_tx_clean(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
644 {
645     struct sk_buff *skb;
646     struct dma_desc *p;
647 
648     skb = lb_priv->tx_skbuff;
649     p = lb_priv->dma_tx;
650 
651     if (likely(lb_priv->tx_skbuff_dma)) {
652         dma_unmap_single(priv->device, lb_priv->tx_skbuff_dma, lb_priv->tx_skbuff_dma_len, DMA_TO_DEVICE);
653         lb_priv->tx_skbuff_dma = 0;
654     }
655 
656     if (likely(skb)) {
657         dev_kfree_skb(skb);
658         lb_priv->tx_skbuff = NULL;
659     }
660 
661     priv->hw->desc->release_tx_desc(p, priv->mode);
662 }
663 
dwmac_rk_xmit(struct sk_buff *skb, struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)664 static int dwmac_rk_xmit(struct sk_buff *skb, struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)
665 {
666     struct stmmac_priv *priv = netdev_priv(dev);
667     unsigned int nopaged_len = skb_headlen(skb);
668     int csum_insertion = 0;
669     struct dma_desc *desc;
670     unsigned int des;
671 
672     priv->hw->mac->reset_eee_mode(priv->hw);
673 
674     csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
675 
676     desc = lb_priv->dma_tx;
677     lb_priv->tx_skbuff = skb;
678 
679     des = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE);
680     if (dma_mapping_error(priv->device, des)) {
681         goto dma_map_err;
682     }
683 
684     stmmac_set_desc_addr(priv, desc, des);
685     lb_priv->tx_skbuff_dma_len = nopaged_len;
686 
687     /* Prepare the first descriptor setting the OWN bit too */
688     stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum_insertion, priv->mode, 1, 1, skb->len);
689     stmmac_enable_dma_transmission(priv, priv->ioaddr);
690 
691     lb_priv->tx_tail_addr = lb_priv->dma_tx_phy + sizeof(*desc);
692     stmmac_set_tx_tail_ptr(priv, priv->ioaddr, lb_priv->tx_tail_addr, 0);
693 
694     return 0;
695 
696 dma_map_err:
697     pr_err("%s: Tx dma map failed\n", __func__);
698     dev_kfree_skb(skb);
699     return -EFAULT;
700 }
701 
dwmac_rk_loopback_run_first(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)702 static int dwmac_rk_loopback_run_first(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
703 {
704     u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
705     u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
706     struct sk_buff *tx_skb;
707     u32 chan = 0;
708     int ret = -EIO, delay;
709     u32 status;
710     bool finish = false;
711 
712     if (lb_priv->speed == LOOPBACK_SPEED1000) {
713         delay = 0x0A;
714     } else if (lb_priv->speed == LOOPBACK_SPEED100) {
715         delay = 0x14;
716     } else if (lb_priv->speed == LOOPBACK_SPEED10) {
717         delay = 0x32;
718     } else {
719         return -EPERM;
720     }
721 
722     if (dwmac_rk_rx_fill(priv, lb_priv)) {
723         return -ENOMEM;
724     }
725 
726     /* Enable the MAC Rx/Tx */
727     stmmac_mac_set(priv, priv->ioaddr, true);
728 
729     for (chan = 0; chan < rx_channels_count; chan++) {
730         stmmac_start_rx(priv, priv->ioaddr, chan);
731     }
732     for (chan = 0; chan < tx_channels_count; chan++) {
733         stmmac_start_tx(priv, priv->ioaddr, chan);
734     }
735 
736     tx_skb = dwmac_rk_get_skb(priv, lb_priv);
737     if (!tx_skb) {
738         ret = -ENOMEM;
739         goto stop;
740     }
741 
742     if (dwmac_rk_xmit(tx_skb, priv->dev, lb_priv)) {
743         ret = -EFAULT;
744         goto stop;
745     }
746 
747     do {
748         usleep_range(0x64, 0x96);
749         delay--;
750         if (priv->plat->has_gmac4) {
751             status = readl(priv->ioaddr + DMA_CHAN_STATUS(0));
752             finish = (status & DMA_CHAN_STATUS_ERI) && (status & DMA_CHAN_STATUS_ETI);
753         } else {
754             status = readl(priv->ioaddr + DMA_STATUS);
755             finish = (status & DMA_STATUS_ERI) && (status & DMA_STATUS_ETI);
756         }
757 
758         if (finish) {
759             if (!dwmac_rk_get_desc_status(priv, lb_priv)) {
760                 ret = dwmac_rk_rx_validate(priv, lb_priv);
761                 break;
762             }
763         }
764     } while (delay <= 0);
765     writel((status & 0x1ffff), priv->ioaddr + DMA_STATUS);
766 
767 stop:
768     for (chan = 0; chan < rx_channels_count; chan++) {
769         stmmac_stop_rx(priv, priv->ioaddr, chan);
770     }
771     for (chan = 0; chan < tx_channels_count; chan++) {
772         stmmac_stop_tx(priv, priv->ioaddr, chan);
773     }
774 
775     stmmac_mac_set(priv, priv->ioaddr, false);
776     /* wait for state machine is disabled */
777     usleep_range(0x64, 0x96);
778 
779     dwmac_rk_tx_clean(priv, lb_priv);
780     dwmac_rk_rx_clean(priv, lb_priv);
781 
782     return ret;
783 }
784 
dwmac_rk_loopback_with_identify(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv, int tx, int rx)785 static int dwmac_rk_loopback_with_identify(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv, int tx, int rx)
786 {
787     lb_priv->id++;
788     lb_priv->tx = tx;
789     lb_priv->rx = rx;
790 
791     lb_priv->packet = &dwmac_rk_tcp_attr;
792     dwmac_rk_set_rgmii_delayline(priv, tx, rx);
793 
794     return dwmac_rk_loopback_run_first(priv, lb_priv);
795 }
796 
dwmac_rk_delayline_is_txvalid(struct dwmac_rk_lb_priv *lb_priv, int tx)797 static inline bool dwmac_rk_delayline_is_txvalid(struct dwmac_rk_lb_priv *lb_priv, int tx)
798 {
799     if (tx > 0 && tx < lb_priv->max_delay) {
800         return true;
801     } else {
802         return false;
803     }
804 }
805 
dwmac_rk_delayline_is_valid(struct dwmac_rk_lb_priv *lb_priv, int tx, int rx)806 static inline bool dwmac_rk_delayline_is_valid(struct dwmac_rk_lb_priv *lb_priv, int tx, int rx)
807 {
808     if ((tx > 0 && tx < lb_priv->max_delay) && (rx > 0 && rx < lb_priv->max_delay)) {
809         return true;
810     } else {
811         return false;
812     }
813 }
814 
dwmac_rk_delayline_scan_cross(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)815 static int dwmac_rk_delayline_scan_cross(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
816 {
817     int tx_left, tx_right, rx_up, rx_down;
818     int i, j, tx_index, rx_index;
819     int tx_mid = 0, rx_mid = 0;
820 
821     /* initiation */
822     tx_index = SCAN_STEP;
823     rx_index = SCAN_STEP;
824 
825     while (1) {
826         /* start from rx based on the experience */
827         for (i = rx_index; i <= (lb_priv->max_delay - SCAN_STEP); i += SCAN_STEP) {
828             tx_left = 0;
829             tx_right = 0;
830             tx_mid = 0;
831 
832             for (j = tx_index; j <= (lb_priv->max_delay - SCAN_STEP); j += SCAN_STEP) {
833                 if (!dwmac_rk_loopback_with_identify(priv, lb_priv, j, i)) {
834                     if (!tx_left) {
835                         tx_left = j;
836                     }
837                     tx_right = j;
838                 }
839             }
840 
841             /* look for tx_mid */
842             if ((tx_right - tx_left) > SCAN_VALID_RANGE) {
843                 tx_mid = (tx_right + tx_left) / 0x02;
844                 break;
845             }
846         }
847 
848         /* Worst case: reach the end */
849         if (i >= (lb_priv->max_delay - SCAN_STEP)) {
850             goto end;
851         }
852 
853         rx_up = 0;
854         rx_down = 0;
855 
856         /* look for rx_mid base on the tx_mid */
857         for (i = SCAN_STEP; i <= (lb_priv->max_delay - SCAN_STEP); i += SCAN_STEP) {
858             if (!dwmac_rk_loopback_with_identify(priv, lb_priv, tx_mid, i)) {
859                 if (!rx_up) {
860                     rx_up = i;
861                 }
862                 rx_down = i;
863             }
864         }
865 
866         if ((rx_down - rx_up) > SCAN_VALID_RANGE) {
867             /* Now get the rx_mid */
868             rx_mid = (rx_up + rx_down) / 0x02;
869         } else {
870             rx_index += SCAN_STEP;
871             rx_mid = 0;
872             continue;
873         }
874 
875         if (dwmac_rk_delayline_is_valid(lb_priv, tx_mid, rx_mid)) {
876             lb_priv->final_tx = tx_mid;
877             lb_priv->final_rx = rx_mid;
878 
879             pr_info("Find available tx_delay = 0x%02x, rx_delay = 0x%02x\n", lb_priv->final_tx, lb_priv->final_rx);
880 
881             return 0;
882         }
883         break;
884     }
885 end:
886     pr_err("Can't find available delayline\n");
887     return -ENXIO;
888 }
889 
dwmac_rk_delayline_scan(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)890 static int dwmac_rk_delayline_scan(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
891 {
892     int phy_iface = dwmac_rk_get_phy_interface(priv);
893     int tx, rx, tx_sum, rx_sum, count;
894     int tx_mid, rx_mid;
895     int ret = -ENXIO;
896 
897     tx_sum = 0;
898     rx_sum = 0;
899     count = 0;
900 
901     for (rx = 0x0; rx <= lb_priv->max_delay; rx++) {
902         if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID) {
903             rx = -1;
904         }
905         printk(KERN_CONT "RX(%03d):", rx);
906         for (tx = 0x0; tx <= lb_priv->max_delay; tx++) {
907             if (!dwmac_rk_loopback_with_identify(priv, lb_priv, tx, rx)) {
908                 tx_sum += tx;
909                 rx_sum += rx;
910                 count++;
911                 printk(KERN_CONT "O");
912             } else {
913                 printk(KERN_CONT " ");
914             }
915         }
916         printk(KERN_CONT "\n");
917 
918         if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID) {
919             break;
920         }
921     }
922 
923     if (tx_sum && rx_sum && count) {
924         tx_mid = tx_sum / count;
925         rx_mid = rx_sum / count;
926 
927         if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID) {
928             if (dwmac_rk_delayline_is_txvalid(lb_priv, tx_mid)) {
929                 lb_priv->final_tx = tx_mid;
930                 lb_priv->final_rx = -1;
931                 ret = 0;
932             }
933         } else {
934             if (dwmac_rk_delayline_is_valid(lb_priv, tx_mid, rx_mid)) {
935                 lb_priv->final_tx = tx_mid;
936                 lb_priv->final_rx = rx_mid;
937                 ret = 0;
938             }
939         }
940     }
941 
942     if (ret) {
943         pr_err("\nCan't find suitable delayline\n");
944     } else {
945         if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID) {
946             pr_info("Find available tx_delay = 0x%02x, rx_delay = disable\n", lb_priv->final_tx);
947         } else {
948             pr_info("\nFind suitable tx_delay = 0x%02x, rx_delay = 0x%02x\n", lb_priv->final_tx, lb_priv->final_rx);
949         }
950     }
951 
952     return ret;
953 }
954 
dwmac_rk_loopback_delayline_scan(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)955 static int dwmac_rk_loopback_delayline_scan(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
956 {
957     if (lb_priv->sysfs) {
958         return dwmac_rk_delayline_scan(priv, lb_priv);
959     } else {
960         return dwmac_rk_delayline_scan_cross(priv, lb_priv);
961     }
962 }
963 
dwmac_rk_dma_free_rx_skbufs(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)964 static void dwmac_rk_dma_free_rx_skbufs(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
965 {
966     if (lb_priv->rx_skbuff) {
967         dma_unmap_single(priv->device, lb_priv->rx_skbuff_dma, lb_priv->dma_buf_sz, DMA_FROM_DEVICE);
968         dev_kfree_skb_any(lb_priv->rx_skbuff);
969     }
970     lb_priv->rx_skbuff = NULL;
971 }
972 
dwmac_rk_dma_free_tx_skbufs(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)973 static void dwmac_rk_dma_free_tx_skbufs(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
974 {
975     if (lb_priv->tx_skbuff_dma) {
976         dma_unmap_single(priv->device, lb_priv->tx_skbuff_dma, lb_priv->tx_skbuff_dma_len, DMA_TO_DEVICE);
977     }
978 
979     if (lb_priv->tx_skbuff) {
980         dev_kfree_skb_any(lb_priv->tx_skbuff);
981         lb_priv->tx_skbuff = NULL;
982         lb_priv->tx_skbuff_dma = 0;
983     }
984 }
985 
dwmac_rk_init_dma_desc_rings(struct net_device *dev, gfp_t flags, struct dwmac_rk_lb_priv *lb_priv)986 static int dwmac_rk_init_dma_desc_rings(struct net_device *dev, gfp_t flags, struct dwmac_rk_lb_priv *lb_priv)
987 {
988     struct stmmac_priv *priv = netdev_priv(dev);
989     struct dma_desc *p;
990 
991     p = lb_priv->dma_tx;
992     p->des2 = 0;
993     lb_priv->tx_skbuff_dma = 0;
994     lb_priv->tx_skbuff_dma_len = 0;
995     lb_priv->tx_skbuff = NULL;
996 
997     lb_priv->rx_skbuff = NULL;
998     stmmac_init_rx_desc(priv, lb_priv->dma_rx, priv->use_riwt, priv->mode, true, lb_priv->dma_buf_sz);
999 
1000     stmmac_init_tx_desc(priv, lb_priv->dma_tx, priv->mode, true);
1001 
1002     return 0;
1003 }
1004 
dwmac_rk_alloc_dma_desc_resources(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1005 static int dwmac_rk_alloc_dma_desc_resources(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
1006 {
1007     int ret = -ENOMEM;
1008 
1009     /* desc dma map */
1010     lb_priv->dma_rx = dma_alloc_coherent(priv->device, sizeof(struct dma_desc), &lb_priv->dma_rx_phy, GFP_KERNEL);
1011     if (!lb_priv->dma_rx) {
1012         return ret;
1013     }
1014 
1015     lb_priv->dma_tx = dma_alloc_coherent(priv->device, sizeof(struct dma_desc), &lb_priv->dma_tx_phy, GFP_KERNEL);
1016     if (!lb_priv->dma_tx) {
1017         dma_free_coherent(priv->device, sizeof(struct dma_desc), lb_priv->dma_rx, lb_priv->dma_rx_phy);
1018         return ret;
1019     }
1020 
1021     return 0;
1022 }
1023 
dwmac_rk_free_dma_desc_resources(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1024 static void dwmac_rk_free_dma_desc_resources(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
1025 {
1026     /* Release the DMA TX/RX socket buffers */
1027     dwmac_rk_dma_free_rx_skbufs(priv, lb_priv);
1028     dwmac_rk_dma_free_tx_skbufs(priv, lb_priv);
1029 
1030     dma_free_coherent(priv->device, sizeof(struct dma_desc), lb_priv->dma_tx, lb_priv->dma_tx_phy);
1031     dma_free_coherent(priv->device, sizeof(struct dma_desc), lb_priv->dma_rx, lb_priv->dma_rx_phy);
1032 }
1033 
dwmac_rk_init_dma_engine(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1034 static int dwmac_rk_init_dma_engine(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
1035 {
1036     u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1037     u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
1038     u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
1039     u32 chan = 0;
1040     int ret = 0;
1041 
1042     ret = stmmac_reset(priv, priv->ioaddr);
1043     if (ret) {
1044         dev_err(priv->device, "Failed to reset the dma\n");
1045         return ret;
1046     }
1047 
1048     /* DMA Configuration */
1049     stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, 0);
1050 
1051     if (priv->plat->axi) {
1052         stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
1053     }
1054 
1055     for (chan = 0; chan < dma_csr_ch; chan++) {
1056         stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 0);
1057     }
1058 
1059     /* DMA RX Channel Configuration */
1060     for (chan = 0; chan < rx_channels_count; chan++) {
1061         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, lb_priv->dma_rx_phy, 0);
1062 
1063         lb_priv->rx_tail_addr = lb_priv->dma_rx_phy + (1 * sizeof(struct dma_desc));
1064         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, lb_priv->rx_tail_addr, 0);
1065     }
1066 
1067     /* DMA TX Channel Configuration */
1068     for (chan = 0; chan < tx_channels_count; chan++) {
1069         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, lb_priv->dma_tx_phy, chan);
1070 
1071         lb_priv->tx_tail_addr = lb_priv->dma_tx_phy;
1072         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, lb_priv->tx_tail_addr, chan);
1073     }
1074 
1075     return ret;
1076 }
1077 
dwmac_rk_dma_operation_mode(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1078 static void dwmac_rk_dma_operation_mode(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
1079 {
1080     u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1081     u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
1082     int rxfifosz = priv->plat->rx_fifo_size;
1083     int txfifosz = priv->plat->tx_fifo_size;
1084     u32 txmode = SF_DMA_MODE;
1085     u32 rxmode = SF_DMA_MODE;
1086     u32 chan = 0;
1087     u8 qmode = 0;
1088 
1089     if (rxfifosz == 0) {
1090         rxfifosz = priv->dma_cap.rx_fifo_size;
1091     }
1092     if (txfifosz == 0) {
1093         txfifosz = priv->dma_cap.tx_fifo_size;
1094     }
1095 
1096     /* Adjust for real per queue fifo size */
1097     if (rx_channels_count == 0 || tx_channels_count == 0) {
1098         return;
1099     }
1100     rxfifosz /= rx_channels_count;
1101     txfifosz /= tx_channels_count;
1102 
1103     /* configure all channels */
1104     for (chan = 0; chan < rx_channels_count; chan++) {
1105         qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1106 
1107         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, qmode);
1108         stmmac_set_dma_bfsize(priv, priv->ioaddr, lb_priv->dma_buf_sz, chan);
1109     }
1110 
1111     for (chan = 0; chan < tx_channels_count; chan++) {
1112         qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1113 
1114         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, qmode);
1115     }
1116 }
1117 
dwmac_rk_rx_queue_dma_chan_map(struct stmmac_priv *priv)1118 static void dwmac_rk_rx_queue_dma_chan_map(struct stmmac_priv *priv)
1119 {
1120     u32 rx_queues_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1121     u32 queue;
1122     u32 chan;
1123 
1124     for (queue = 0; queue < rx_queues_count; queue++) {
1125         chan = priv->plat->rx_queues_cfg[queue].chan;
1126         stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
1127     }
1128 }
1129 
dwmac_rk_mac_enable_rx_queues(struct stmmac_priv *priv)1130 static void dwmac_rk_mac_enable_rx_queues(struct stmmac_priv *priv)
1131 {
1132     u32 rx_queues_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1133     int queue;
1134     u8 mode;
1135 
1136     for (queue = 0; queue < rx_queues_count; queue++) {
1137         mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1138         stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1139     }
1140 }
1141 
dwmac_rk_mtl_configuration(struct stmmac_priv *priv)1142 static void dwmac_rk_mtl_configuration(struct stmmac_priv *priv)
1143 {
1144     /* Map RX MTL to DMA channels */
1145     dwmac_rk_rx_queue_dma_chan_map(priv);
1146 
1147     /* Enable MAC RX Queues */
1148     dwmac_rk_mac_enable_rx_queues(priv);
1149 }
1150 
dwmac_rk_mmc_setup(struct stmmac_priv *priv)1151 static void dwmac_rk_mmc_setup(struct stmmac_priv *priv)
1152 {
1153     unsigned int mode =
1154         MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1155 
1156     stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
1157 
1158     if (priv->dma_cap.rmon) {
1159         stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
1160         memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1161     } else {
1162         netdev_info(priv->dev, "No MAC Management Counters available\n");
1163     }
1164 }
1165 
dwmac_rk_init(struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)1166 static int dwmac_rk_init(struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)
1167 {
1168     struct stmmac_priv *priv = netdev_priv(dev);
1169     int ret;
1170     u32 mode;
1171 
1172     lb_priv->dma_buf_sz = 0x600; /* mtu 1500 size */
1173 
1174     if (priv->plat->has_gmac4) {
1175         lb_priv->buf_sz = priv->dma_cap.rx_fifo_size; /* rx fifo size */
1176     } else {
1177         lb_priv->buf_sz = 0x1000; /* rx fifo size */
1178     }
1179 
1180     ret = dwmac_rk_alloc_dma_desc_resources(priv, lb_priv);
1181     if (ret < 0) {
1182         pr_err("%s: DMA descriptors allocation failed\n", __func__);
1183         return ret;
1184     }
1185 
1186     ret = dwmac_rk_init_dma_desc_rings(dev, GFP_KERNEL, lb_priv);
1187     if (ret < 0) {
1188         pr_err("%s: DMA descriptors initialization failed\n", __func__);
1189         goto init_error;
1190     }
1191 
1192     /* DMA initialization and SW reset */
1193     ret = dwmac_rk_init_dma_engine(priv, lb_priv);
1194     if (ret < 0) {
1195         pr_err("%s: DMA engine initialization failed\n", __func__);
1196         goto init_error;
1197     }
1198 
1199     /* Copy the MAC addr into the HW  */
1200     priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1201 
1202     /* Initialize the MAC Core */
1203     stmmac_core_init(priv, priv->hw, dev);
1204 
1205     dwmac_rk_mtl_configuration(priv);
1206 
1207     dwmac_rk_mmc_setup(priv);
1208 
1209     ret = priv->hw->mac->rx_ipc(priv->hw);
1210     if (!ret) {
1211         pr_warn(" RX IPC Checksum Offload disabled\n");
1212         priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1213         priv->hw->rx_csum = 0;
1214     }
1215 
1216     /* Set the HW DMA mode and the COE */
1217     dwmac_rk_dma_operation_mode(priv, lb_priv);
1218 
1219     if (priv->plat->has_gmac4) {
1220         mode = readl(priv->ioaddr + DMA_CHAN_TX_CONTROL(0));
1221         /* Disable OSP to get best performance */
1222         mode &= ~DMA_CONTROL_OSP;
1223         writel(mode, priv->ioaddr + DMA_CHAN_TX_CONTROL(0));
1224     } else {
1225         /* Disable OSF */
1226         mode = readl(priv->ioaddr + DMA_CONTROL);
1227         writel((mode & ~DMA_CONTROL_OSF), priv->ioaddr + DMA_CONTROL);
1228     }
1229 
1230     stmmac_enable_dma_irq(priv, priv->ioaddr, 0, 1, 1);
1231 
1232     if (priv->hw->pcs) {
1233         stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
1234     }
1235 
1236     return 0;
1237 init_error:
1238     dwmac_rk_free_dma_desc_resources(priv, lb_priv);
1239 
1240     return ret;
1241 }
1242 
dwmac_rk_release(struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)1243 static void dwmac_rk_release(struct net_device *dev, struct dwmac_rk_lb_priv *lb_priv)
1244 {
1245     struct stmmac_priv *priv = netdev_priv(dev);
1246 
1247     stmmac_disable_dma_irq(priv, priv->ioaddr, 0, 0, 0);
1248 
1249     /* Release and free the Rx/Tx resources */
1250     dwmac_rk_free_dma_desc_resources(priv, lb_priv);
1251 }
1252 
dwmac_rk_get_max_delayline(struct stmmac_priv *priv)1253 static int dwmac_rk_get_max_delayline(struct stmmac_priv *priv)
1254 {
1255     if (of_device_is_compatible(priv->device->of_node, "rockchip,rk3588-gmac")) {
1256         return RK3588_MAX_DELAYLINE;
1257     } else {
1258         return MAX_DELAYLINE;
1259     }
1260 }
1261 
dwmac_rk_phy_poll_reset(struct stmmac_priv *priv, int addr)1262 static int dwmac_rk_phy_poll_reset(struct stmmac_priv *priv, int addr)
1263 {
1264     /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
1265     unsigned int val, retries = 12;
1266     int ret;
1267 
1268     val = mdiobus_read(priv->mii, addr, MII_BMCR);
1269     mdiobus_write(priv->mii, addr, MII_BMCR, val | BMCR_RESET);
1270 
1271     do {
1272         msleep(0x32);
1273         ret = mdiobus_read(priv->mii, addr, MII_BMCR);
1274         if (ret < 0) {
1275             return ret;
1276         }
1277     } while ((ret & BMCR_RESET) && --retries);
1278     if (ret & BMCR_RESET) {
1279         return -ETIMEDOUT;
1280     }
1281 
1282     msleep(1);
1283     return 0;
1284 }
1285 
dwmac_rk_loopback_run(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)1286 static int dwmac_rk_loopback_run(struct stmmac_priv *priv, struct dwmac_rk_lb_priv *lb_priv)
1287 {
1288     struct net_device *ndev = priv->dev;
1289     int phy_iface = dwmac_rk_get_phy_interface(priv);
1290     int ndev_up, phy_addr;
1291     int ret = -EINVAL;
1292 
1293     if (!ndev || !priv->mii) {
1294         return -EINVAL;
1295     }
1296 
1297     phy_addr = priv->dev->phydev->mdio.addr;
1298     lb_priv->max_delay = dwmac_rk_get_max_delayline(priv);
1299 
1300     rtnl_lock();
1301     /* check the netdevice up or not */
1302     ndev_up = ndev->flags & IFF_UP;
1303 
1304     if (ndev_up) {
1305         if (!netif_running(ndev) || !ndev->phydev) {
1306             rtnl_unlock();
1307             return -EINVAL;
1308         }
1309 
1310         /* check if the negotiation status */
1311         if (ndev->phydev->state != PHY_NOLINK && ndev->phydev->state != PHY_RUNNING) {
1312             rtnl_unlock();
1313             pr_warn("Try again later, after negotiation done\n");
1314             return -EAGAIN;
1315         }
1316 
1317         ndev->netdev_ops->ndo_stop(ndev);
1318 
1319         if (priv->plat->stmmac_rst) {
1320             reset_control_assert(priv->plat->stmmac_rst);
1321         }
1322         dwmac_rk_phy_poll_reset(priv, phy_addr);
1323         if (priv->plat->stmmac_rst) {
1324             reset_control_deassert(priv->plat->stmmac_rst);
1325         }
1326     }
1327     /* wait for phy and controller ready */
1328     usleep_range(0x186A0, 0x30D40);
1329 
1330     dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed, true, phy_addr, true);
1331 
1332     ret = dwmac_rk_init(ndev, lb_priv);
1333     if (ret) {
1334         goto exit_init;
1335     }
1336 
1337     dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed, true, phy_addr, false);
1338 
1339     if (lb_priv->scan) {
1340         /* scan only support for rgmii mode */
1341         if (phy_iface != PHY_INTERFACE_MODE_RGMII && phy_iface != PHY_INTERFACE_MODE_RGMII_ID &&
1342             phy_iface != PHY_INTERFACE_MODE_RGMII_RXID && phy_iface != PHY_INTERFACE_MODE_RGMII_TXID) {
1343             ret = -EINVAL;
1344             goto out;
1345         }
1346         ret = dwmac_rk_loopback_delayline_scan(priv, lb_priv);
1347     } else {
1348         lb_priv->id++;
1349         lb_priv->tx = 0;
1350         lb_priv->rx = 0;
1351 
1352         lb_priv->packet = &dwmac_rk_tcp_attr;
1353         ret = dwmac_rk_loopback_run_first(priv, lb_priv);
1354     }
1355 
1356 out:
1357     dwmac_rk_release(ndev, lb_priv);
1358     dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed, false, phy_addr, false);
1359 exit_init:
1360     if (ndev_up) {
1361         ndev->netdev_ops->ndo_open(ndev);
1362     }
1363 
1364     rtnl_unlock();
1365 
1366     return ret;
1367 }
1368 
rgmii_delayline_show(struct device *dev, struct device_attribute *attr, char *buf)1369 static ssize_t rgmii_delayline_show(struct device *dev, struct device_attribute *attr, char *buf)
1370 {
1371     struct net_device *ndev = dev_get_drvdata(dev);
1372     struct stmmac_priv *priv = netdev_priv(ndev);
1373     int tx, rx;
1374 
1375     dwmac_rk_get_rgmii_delayline(priv, &tx, &rx);
1376 
1377     return sprintf(buf, "tx delayline: 0x%x, rx delayline: 0x%x\n", tx, rx);
1378 }
1379 
rgmii_delayline_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1380 static ssize_t rgmii_delayline_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1381 {
1382     struct net_device *ndev = dev_get_drvdata(dev);
1383     struct stmmac_priv *priv = netdev_priv(ndev);
1384     int tx = 0, rx = 0;
1385     char tmp[32];
1386     size_t buf_size = min(count, (sizeof(tmp) - 1));
1387     char *data;
1388 
1389     memset(tmp, 0, sizeof(tmp));
1390     strncpy(tmp, buf, buf_size);
1391 
1392     data = tmp;
1393     data = strstr(data, " ");
1394     if (!data) {
1395         goto out;
1396     }
1397     *data = 0;
1398     data++;
1399 
1400     if (kstrtoint(tmp, 0, &tx) || tx > dwmac_rk_get_max_delayline(priv)) {
1401         goto out;
1402     }
1403 
1404     if (kstrtoint(data, 0, &rx) || rx > dwmac_rk_get_max_delayline(priv)) {
1405         goto out;
1406     }
1407 
1408     dwmac_rk_set_rgmii_delayline(priv, tx, rx);
1409     pr_info("Set rgmii delayline tx: 0x%x, rx: 0x%x\n", tx, rx);
1410 
1411     return count;
1412 out:
1413     pr_err("wrong delayline value input, range is <0x0, 0x7f>\n");
1414     pr_err("usage: <tx_delayline> <rx_delayline>\n");
1415 
1416     return count;
1417 }
1418 static DEVICE_ATTR_RW(rgmii_delayline);
1419 
mac_lb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1420 static ssize_t mac_lb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1421 {
1422     struct net_device *ndev = dev_get_drvdata(dev);
1423     struct stmmac_priv *priv = netdev_priv(ndev);
1424     struct dwmac_rk_lb_priv *lb_priv;
1425     int ret, speed;
1426 
1427     lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1428     if (!lb_priv) {
1429         return -ENOMEM;
1430     }
1431 
1432     ret = kstrtoint(buf, 0, &speed);
1433     if (ret) {
1434         kfree(lb_priv);
1435         return count;
1436     }
1437     pr_info("MAC loopback speed set to %d\n", speed);
1438 
1439     lb_priv->sysfs = 1;
1440     lb_priv->type = LOOPBACK_TYPE_GMAC;
1441     lb_priv->speed = speed;
1442     lb_priv->scan = 0;
1443 
1444     ret = dwmac_rk_loopback_run(priv, lb_priv);
1445     kfree(lb_priv);
1446 
1447     if (!ret) {
1448         pr_info("MAC loopback: PASS\n");
1449     } else {
1450         pr_info("MAC loopback: FAIL\n");
1451     }
1452 
1453     return count;
1454 }
1455 static DEVICE_ATTR_WO(mac_lb);
1456 
phy_lb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1457 static ssize_t phy_lb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1458 {
1459     struct net_device *ndev = dev_get_drvdata(dev);
1460     struct stmmac_priv *priv = netdev_priv(ndev);
1461     struct dwmac_rk_lb_priv *lb_priv;
1462     int ret, speed;
1463 
1464     lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1465     if (!lb_priv) {
1466         return -ENOMEM;
1467     }
1468 
1469     ret = kstrtoint(buf, 0, &speed);
1470     if (ret) {
1471         kfree(lb_priv);
1472         return count;
1473     }
1474     pr_info("PHY loopback speed set to %d\n", speed);
1475 
1476     lb_priv->sysfs = 1;
1477     lb_priv->type = LOOPBACK_TYPE_PHY;
1478     lb_priv->speed = speed;
1479     lb_priv->scan = 0;
1480 
1481     ret = dwmac_rk_loopback_run(priv, lb_priv);
1482     if (!ret) {
1483         pr_info("PHY loopback: PASS\n");
1484     } else {
1485         pr_info("PHY loopback: FAIL\n");
1486     }
1487 
1488     kfree(lb_priv);
1489     return count;
1490 }
1491 static DEVICE_ATTR_WO(phy_lb);
1492 
phy_lb_scan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1493 static ssize_t phy_lb_scan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1494 {
1495     struct net_device *ndev = dev_get_drvdata(dev);
1496     struct stmmac_priv *priv = netdev_priv(ndev);
1497     struct dwmac_rk_lb_priv *lb_priv;
1498     int ret, speed;
1499 
1500     lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1501     if (!lb_priv) {
1502         return -ENOMEM;
1503     }
1504 
1505     ret = kstrtoint(buf, 0, &speed);
1506     if (ret) {
1507         kfree(lb_priv);
1508         return count;
1509     }
1510     pr_info("Delayline scan speed set to %d\n", speed);
1511 
1512     lb_priv->sysfs = 1;
1513     lb_priv->type = LOOPBACK_TYPE_PHY;
1514     lb_priv->speed = speed;
1515     lb_priv->scan = 1;
1516 
1517     dwmac_rk_loopback_run(priv, lb_priv);
1518 
1519     kfree(lb_priv);
1520     return count;
1521 }
1522 static DEVICE_ATTR_WO(phy_lb_scan);
1523 
dwmac_rk_create_loopback_sysfs(struct device *device)1524 int dwmac_rk_create_loopback_sysfs(struct device *device)
1525 {
1526     int ret;
1527 
1528     ret = device_create_file(device, &dev_attr_rgmii_delayline);
1529     if (ret) {
1530         return ret;
1531     }
1532 
1533     ret = device_create_file(device, &dev_attr_mac_lb);
1534     if (ret) {
1535         goto remove_rgmii_delayline;
1536     }
1537 
1538     ret = device_create_file(device, &dev_attr_phy_lb);
1539     if (ret) {
1540         goto remove_mac_lb;
1541     }
1542 
1543     ret = device_create_file(device, &dev_attr_phy_lb_scan);
1544     if (ret) {
1545         goto remove_phy_lb;
1546     }
1547 
1548     return 0;
1549 
1550 remove_rgmii_delayline:
1551     device_remove_file(device, &dev_attr_rgmii_delayline);
1552 
1553 remove_mac_lb:
1554     device_remove_file(device, &dev_attr_mac_lb);
1555 
1556 remove_phy_lb:
1557     device_remove_file(device, &dev_attr_phy_lb);
1558 
1559     return ret;
1560 }
1561 
dwmac_rk_remove_loopback_sysfs(struct device *device)1562 int dwmac_rk_remove_loopback_sysfs(struct device *device)
1563 {
1564     device_remove_file(device, &dev_attr_rgmii_delayline);
1565     device_remove_file(device, &dev_attr_mac_lb);
1566     device_remove_file(device, &dev_attr_phy_lb);
1567     device_remove_file(device, &dev_attr_phy_lb_scan);
1568 
1569     return 0;
1570 }
1571