1// SPDX-License-Identifier: GPL-2.0
2/* Renesas Ethernet Switch device driver
3 *
4 * Copyright (C) 2022 Renesas Electronics Corporation
5 */
6
7#include <linux/clk.h>
8#include <linux/dma-mapping.h>
9#include <linux/err.h>
10#include <linux/etherdevice.h>
11#include <linux/iopoll.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/net_tstamp.h>
15#include <linux/of.h>
16#include <linux/of_mdio.h>
17#include <linux/of_net.h>
18#include <linux/phy/phy.h>
19#include <linux/platform_device.h>
20#include <linux/pm_runtime.h>
21#include <linux/rtnetlink.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/sys_soc.h>
25
26#include "rswitch.h"
27
28static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
29{
30	u32 val;
31
32	return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
33					 1, RSWITCH_TIMEOUT_US);
34}
35
36static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
37{
38	iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
39}
40
41/* Common Agent block (COMA) */
42static void rswitch_reset(struct rswitch_private *priv)
43{
44	iowrite32(RRC_RR, priv->addr + RRC);
45	iowrite32(RRC_RR_CLR, priv->addr + RRC);
46}
47
48static void rswitch_clock_enable(struct rswitch_private *priv)
49{
50	iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
51}
52
53static void rswitch_clock_disable(struct rswitch_private *priv)
54{
55	iowrite32(RCDC_RCD, priv->addr + RCDC);
56}
57
58static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
59{
60	u32 val = ioread32(coma_addr + RCEC);
61
62	if (val & RCEC_RCE)
63		return (val & BIT(port)) ? true : false;
64	else
65		return false;
66}
67
68static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
69{
70	u32 val;
71
72	if (enable) {
73		val = ioread32(coma_addr + RCEC);
74		iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
75	} else {
76		val = ioread32(coma_addr + RCDC);
77		iowrite32(val | BIT(port), coma_addr + RCDC);
78	}
79}
80
81static int rswitch_bpool_config(struct rswitch_private *priv)
82{
83	u32 val;
84
85	val = ioread32(priv->addr + CABPIRM);
86	if (val & CABPIRM_BPR)
87		return 0;
88
89	iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
90
91	return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
92}
93
94static void rswitch_coma_init(struct rswitch_private *priv)
95{
96	iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
97}
98
99/* R-Switch-2 block (TOP) */
100static void rswitch_top_init(struct rswitch_private *priv)
101{
102	int i;
103
104	for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
105		iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
106}
107
108/* Forwarding engine block (MFWD) */
109static void rswitch_fwd_init(struct rswitch_private *priv)
110{
111	int i;
112
113	/* For ETHA */
114	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
115		iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
116		iowrite32(0, priv->addr + FWPBFC(i));
117	}
118
119	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
120		iowrite32(priv->rdev[i]->rx_queue->index,
121			  priv->addr + FWPBFCSDC(GWCA_INDEX, i));
122		iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
123	}
124
125	/* For GWCA */
126	iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
127	iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
128	iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
129	iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
130}
131
132/* Gateway CPU agent block (GWCA) */
133static int rswitch_gwca_change_mode(struct rswitch_private *priv,
134				    enum rswitch_gwca_mode mode)
135{
136	int ret;
137
138	if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
139		rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
140
141	iowrite32(mode, priv->addr + GWMC);
142
143	ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
144
145	if (mode == GWMC_OPC_DISABLE)
146		rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
147
148	return ret;
149}
150
151static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
152{
153	iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
154
155	return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
156}
157
158static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
159{
160	iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
161
162	return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
163}
164
165static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
166{
167	u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
168	int i;
169
170	for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
171		if (dis[i] & mask[i])
172			return true;
173	}
174
175	return false;
176}
177
178static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
179{
180	int i;
181
182	for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
183		dis[i] = ioread32(priv->addr + GWDIS(i));
184		dis[i] &= ioread32(priv->addr + GWDIE(i));
185	}
186}
187
188static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
189{
190	u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
191
192	iowrite32(BIT(index % 32), priv->addr + offs);
193}
194
195static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
196{
197	u32 offs = GWDIS(index / 32);
198
199	iowrite32(BIT(index % 32), priv->addr + offs);
200}
201
202static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
203{
204	int index = cur ? gq->cur : gq->dirty;
205
206	if (index + num >= gq->ring_size)
207		index = (index + num) % gq->ring_size;
208	else
209		index += num;
210
211	return index;
212}
213
214static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
215{
216	if (gq->cur >= gq->dirty)
217		return gq->cur - gq->dirty;
218	else
219		return gq->ring_size - gq->dirty + gq->cur;
220}
221
222static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
223{
224	struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
225
226	if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
227		return true;
228
229	return false;
230}
231
232static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
233					int start_index, int num)
234{
235	int i, index;
236
237	for (i = 0; i < num; i++) {
238		index = (i + start_index) % gq->ring_size;
239		if (gq->skbs[index])
240			continue;
241		gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
242							    PKT_BUF_SZ + RSWITCH_ALIGN - 1);
243		if (!gq->skbs[index])
244			goto err;
245	}
246
247	return 0;
248
249err:
250	for (i--; i >= 0; i--) {
251		index = (i + start_index) % gq->ring_size;
252		dev_kfree_skb(gq->skbs[index]);
253		gq->skbs[index] = NULL;
254	}
255
256	return -ENOMEM;
257}
258
259static void rswitch_gwca_queue_free(struct net_device *ndev,
260				    struct rswitch_gwca_queue *gq)
261{
262	int i;
263
264	if (!gq->dir_tx) {
265		dma_free_coherent(ndev->dev.parent,
266				  sizeof(struct rswitch_ext_ts_desc) *
267				  (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
268		gq->rx_ring = NULL;
269
270		for (i = 0; i < gq->ring_size; i++)
271			dev_kfree_skb(gq->skbs[i]);
272	} else {
273		dma_free_coherent(ndev->dev.parent,
274				  sizeof(struct rswitch_ext_desc) *
275				  (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
276		gq->tx_ring = NULL;
277	}
278
279	kfree(gq->skbs);
280	gq->skbs = NULL;
281}
282
283static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
284{
285	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
286
287	dma_free_coherent(&priv->pdev->dev,
288			  sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
289			  gq->ts_ring, gq->ring_dma);
290	gq->ts_ring = NULL;
291}
292
293static int rswitch_gwca_queue_alloc(struct net_device *ndev,
294				    struct rswitch_private *priv,
295				    struct rswitch_gwca_queue *gq,
296				    bool dir_tx, int ring_size)
297{
298	int i, bit;
299
300	gq->dir_tx = dir_tx;
301	gq->ring_size = ring_size;
302	gq->ndev = ndev;
303
304	gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
305	if (!gq->skbs)
306		return -ENOMEM;
307
308	if (!dir_tx) {
309		rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
310
311		gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
312						 sizeof(struct rswitch_ext_ts_desc) *
313						 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
314	} else {
315		gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
316						 sizeof(struct rswitch_ext_desc) *
317						 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
318	}
319
320	if (!gq->rx_ring && !gq->tx_ring)
321		goto out;
322
323	i = gq->index / 32;
324	bit = BIT(gq->index % 32);
325	if (dir_tx)
326		priv->gwca.tx_irq_bits[i] |= bit;
327	else
328		priv->gwca.rx_irq_bits[i] |= bit;
329
330	return 0;
331
332out:
333	rswitch_gwca_queue_free(ndev, gq);
334
335	return -ENOMEM;
336}
337
338static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
339{
340	desc->dptrl = cpu_to_le32(lower_32_bits(addr));
341	desc->dptrh = upper_32_bits(addr) & 0xff;
342}
343
344static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
345{
346	return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
347}
348
349static int rswitch_gwca_queue_format(struct net_device *ndev,
350				     struct rswitch_private *priv,
351				     struct rswitch_gwca_queue *gq)
352{
353	int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
354	struct rswitch_ext_desc *desc;
355	struct rswitch_desc *linkfix;
356	dma_addr_t dma_addr;
357	int i;
358
359	memset(gq->tx_ring, 0, ring_size);
360	for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
361		if (!gq->dir_tx) {
362			dma_addr = dma_map_single(ndev->dev.parent,
363						  gq->skbs[i]->data, PKT_BUF_SZ,
364						  DMA_FROM_DEVICE);
365			if (dma_mapping_error(ndev->dev.parent, dma_addr))
366				goto err;
367
368			desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
369			rswitch_desc_set_dptr(&desc->desc, dma_addr);
370			desc->desc.die_dt = DT_FEMPTY | DIE;
371		} else {
372			desc->desc.die_dt = DT_EEMPTY | DIE;
373		}
374	}
375	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
376	desc->desc.die_dt = DT_LINKFIX;
377
378	linkfix = &priv->gwca.linkfix_table[gq->index];
379	linkfix->die_dt = DT_LINKFIX;
380	rswitch_desc_set_dptr(linkfix, gq->ring_dma);
381
382	iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
383		  priv->addr + GWDCC_OFFS(gq->index));
384
385	return 0;
386
387err:
388	if (!gq->dir_tx) {
389		for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
390			dma_addr = rswitch_desc_get_dptr(&desc->desc);
391			dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
392					 DMA_FROM_DEVICE);
393		}
394	}
395
396	return -ENOMEM;
397}
398
399static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
400				       int start_index, int num)
401{
402	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
403	struct rswitch_ts_desc *desc;
404	int i, index;
405
406	for (i = 0; i < num; i++) {
407		index = (i + start_index) % gq->ring_size;
408		desc = &gq->ts_ring[index];
409		desc->desc.die_dt = DT_FEMPTY_ND | DIE;
410	}
411}
412
413static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
414					  struct rswitch_gwca_queue *gq,
415					  int start_index, int num)
416{
417	struct rswitch_device *rdev = netdev_priv(ndev);
418	struct rswitch_ext_ts_desc *desc;
419	dma_addr_t dma_addr;
420	int i, index;
421
422	for (i = 0; i < num; i++) {
423		index = (i + start_index) % gq->ring_size;
424		desc = &gq->rx_ring[index];
425		if (!gq->dir_tx) {
426			dma_addr = dma_map_single(ndev->dev.parent,
427						  gq->skbs[index]->data, PKT_BUF_SZ,
428						  DMA_FROM_DEVICE);
429			if (dma_mapping_error(ndev->dev.parent, dma_addr))
430				goto err;
431
432			desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
433			rswitch_desc_set_dptr(&desc->desc, dma_addr);
434			dma_wmb();
435			desc->desc.die_dt = DT_FEMPTY | DIE;
436			desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
437		} else {
438			desc->desc.die_dt = DT_EEMPTY | DIE;
439		}
440	}
441
442	return 0;
443
444err:
445	if (!gq->dir_tx) {
446		for (i--; i >= 0; i--) {
447			index = (i + start_index) % gq->ring_size;
448			desc = &gq->rx_ring[index];
449			dma_addr = rswitch_desc_get_dptr(&desc->desc);
450			dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
451					 DMA_FROM_DEVICE);
452		}
453	}
454
455	return -ENOMEM;
456}
457
458static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
459					    struct rswitch_private *priv,
460					    struct rswitch_gwca_queue *gq)
461{
462	int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
463	struct rswitch_ext_ts_desc *desc;
464	struct rswitch_desc *linkfix;
465	int err;
466
467	memset(gq->rx_ring, 0, ring_size);
468	err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
469	if (err < 0)
470		return err;
471
472	desc = &gq->rx_ring[gq->ring_size];	/* Last */
473	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
474	desc->desc.die_dt = DT_LINKFIX;
475
476	linkfix = &priv->gwca.linkfix_table[gq->index];
477	linkfix->die_dt = DT_LINKFIX;
478	rswitch_desc_set_dptr(linkfix, gq->ring_dma);
479
480	iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
481		  GWDCC_ETS | GWDCC_EDE,
482		  priv->addr + GWDCC_OFFS(gq->index));
483
484	return 0;
485}
486
487static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
488{
489	int i, num_queues = priv->gwca.num_queues;
490	struct rswitch_gwca *gwca = &priv->gwca;
491	struct device *dev = &priv->pdev->dev;
492
493	gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
494	gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
495						 &gwca->linkfix_table_dma, GFP_KERNEL);
496	if (!gwca->linkfix_table)
497		return -ENOMEM;
498	for (i = 0; i < num_queues; i++)
499		gwca->linkfix_table[i].die_dt = DT_EOS;
500
501	return 0;
502}
503
504static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
505{
506	struct rswitch_gwca *gwca = &priv->gwca;
507
508	if (gwca->linkfix_table)
509		dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
510				  gwca->linkfix_table, gwca->linkfix_table_dma);
511	gwca->linkfix_table = NULL;
512}
513
514static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
515{
516	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
517	struct rswitch_ts_desc *desc;
518
519	gq->ring_size = TS_RING_SIZE;
520	gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
521					 sizeof(struct rswitch_ts_desc) *
522					 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
523
524	if (!gq->ts_ring)
525		return -ENOMEM;
526
527	rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
528	desc = &gq->ts_ring[gq->ring_size];
529	desc->desc.die_dt = DT_LINKFIX;
530	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
531	INIT_LIST_HEAD(&priv->gwca.ts_info_list);
532
533	return 0;
534}
535
536static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
537{
538	struct rswitch_gwca_queue *gq;
539	int index;
540
541	index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
542	if (index >= priv->gwca.num_queues)
543		return NULL;
544	set_bit(index, priv->gwca.used);
545	gq = &priv->gwca.queues[index];
546	memset(gq, 0, sizeof(*gq));
547	gq->index = index;
548
549	return gq;
550}
551
552static void rswitch_gwca_put(struct rswitch_private *priv,
553			     struct rswitch_gwca_queue *gq)
554{
555	clear_bit(gq->index, priv->gwca.used);
556}
557
558static int rswitch_txdmac_alloc(struct net_device *ndev)
559{
560	struct rswitch_device *rdev = netdev_priv(ndev);
561	struct rswitch_private *priv = rdev->priv;
562	int err;
563
564	rdev->tx_queue = rswitch_gwca_get(priv);
565	if (!rdev->tx_queue)
566		return -EBUSY;
567
568	err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
569	if (err < 0) {
570		rswitch_gwca_put(priv, rdev->tx_queue);
571		return err;
572	}
573
574	return 0;
575}
576
577static void rswitch_txdmac_free(struct net_device *ndev)
578{
579	struct rswitch_device *rdev = netdev_priv(ndev);
580
581	rswitch_gwca_queue_free(ndev, rdev->tx_queue);
582	rswitch_gwca_put(rdev->priv, rdev->tx_queue);
583}
584
585static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
586{
587	struct rswitch_device *rdev = priv->rdev[index];
588
589	return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
590}
591
592static int rswitch_rxdmac_alloc(struct net_device *ndev)
593{
594	struct rswitch_device *rdev = netdev_priv(ndev);
595	struct rswitch_private *priv = rdev->priv;
596	int err;
597
598	rdev->rx_queue = rswitch_gwca_get(priv);
599	if (!rdev->rx_queue)
600		return -EBUSY;
601
602	err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
603	if (err < 0) {
604		rswitch_gwca_put(priv, rdev->rx_queue);
605		return err;
606	}
607
608	return 0;
609}
610
611static void rswitch_rxdmac_free(struct net_device *ndev)
612{
613	struct rswitch_device *rdev = netdev_priv(ndev);
614
615	rswitch_gwca_queue_free(ndev, rdev->rx_queue);
616	rswitch_gwca_put(rdev->priv, rdev->rx_queue);
617}
618
619static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
620{
621	struct rswitch_device *rdev = priv->rdev[index];
622	struct net_device *ndev = rdev->ndev;
623
624	return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
625}
626
627static int rswitch_gwca_hw_init(struct rswitch_private *priv)
628{
629	int i, err;
630
631	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
632	if (err < 0)
633		return err;
634	err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
635	if (err < 0)
636		return err;
637
638	err = rswitch_gwca_mcast_table_reset(priv);
639	if (err < 0)
640		return err;
641	err = rswitch_gwca_axi_ram_reset(priv);
642	if (err < 0)
643		return err;
644
645	iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
646	iowrite32(0, priv->addr + GWTTFC);
647	iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
648	iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
649	iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
650	iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
651	iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
652
653	iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
654
655	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
656		err = rswitch_rxdmac_init(priv, i);
657		if (err < 0)
658			return err;
659		err = rswitch_txdmac_init(priv, i);
660		if (err < 0)
661			return err;
662	}
663
664	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
665	if (err < 0)
666		return err;
667	return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
668}
669
670static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
671{
672	int err;
673
674	err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
675	if (err < 0)
676		return err;
677	err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
678	if (err < 0)
679		return err;
680
681	return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
682}
683
684static int rswitch_gwca_halt(struct rswitch_private *priv)
685{
686	int err;
687
688	priv->gwca_halt = true;
689	err = rswitch_gwca_hw_deinit(priv);
690	dev_err(&priv->pdev->dev, "halted (%d)\n", err);
691
692	return err;
693}
694
695static bool rswitch_rx(struct net_device *ndev, int *quota)
696{
697	struct rswitch_device *rdev = netdev_priv(ndev);
698	struct rswitch_gwca_queue *gq = rdev->rx_queue;
699	struct rswitch_ext_ts_desc *desc;
700	int limit, boguscnt, num, ret;
701	struct sk_buff *skb;
702	dma_addr_t dma_addr;
703	u16 pkt_len;
704	u32 get_ts;
705
706	if (*quota <= 0)
707		return true;
708
709	boguscnt = min_t(int, gq->ring_size, *quota);
710	limit = boguscnt;
711
712	desc = &gq->rx_ring[gq->cur];
713	while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
714		dma_rmb();
715		pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
716		skb = gq->skbs[gq->cur];
717		gq->skbs[gq->cur] = NULL;
718		dma_addr = rswitch_desc_get_dptr(&desc->desc);
719		dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
720		get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
721		if (get_ts) {
722			struct skb_shared_hwtstamps *shhwtstamps;
723			struct timespec64 ts;
724
725			shhwtstamps = skb_hwtstamps(skb);
726			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
727			ts.tv_sec = __le32_to_cpu(desc->ts_sec);
728			ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
729			shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
730		}
731		skb_put(skb, pkt_len);
732		skb->protocol = eth_type_trans(skb, ndev);
733		napi_gro_receive(&rdev->napi, skb);
734		rdev->ndev->stats.rx_packets++;
735		rdev->ndev->stats.rx_bytes += pkt_len;
736
737		gq->cur = rswitch_next_queue_index(gq, true, 1);
738		desc = &gq->rx_ring[gq->cur];
739
740		if (--boguscnt <= 0)
741			break;
742	}
743
744	num = rswitch_get_num_cur_queues(gq);
745	ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
746	if (ret < 0)
747		goto err;
748	ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
749	if (ret < 0)
750		goto err;
751	gq->dirty = rswitch_next_queue_index(gq, false, num);
752
753	*quota -= limit - boguscnt;
754
755	return boguscnt <= 0;
756
757err:
758	rswitch_gwca_halt(rdev->priv);
759
760	return 0;
761}
762
763static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
764{
765	struct rswitch_device *rdev = netdev_priv(ndev);
766	struct rswitch_gwca_queue *gq = rdev->tx_queue;
767	struct rswitch_ext_desc *desc;
768	dma_addr_t dma_addr;
769	struct sk_buff *skb;
770	int free_num = 0;
771	int size;
772
773	for (; rswitch_get_num_cur_queues(gq) > 0;
774	     gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
775		desc = &gq->tx_ring[gq->dirty];
776		if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
777			break;
778
779		dma_rmb();
780		size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
781		skb = gq->skbs[gq->dirty];
782		if (skb) {
783			dma_addr = rswitch_desc_get_dptr(&desc->desc);
784			dma_unmap_single(ndev->dev.parent, dma_addr,
785					 size, DMA_TO_DEVICE);
786			dev_kfree_skb_any(gq->skbs[gq->dirty]);
787			gq->skbs[gq->dirty] = NULL;
788			free_num++;
789		}
790		desc->desc.die_dt = DT_EEMPTY;
791		rdev->ndev->stats.tx_packets++;
792		rdev->ndev->stats.tx_bytes += size;
793	}
794
795	return free_num;
796}
797
798static int rswitch_poll(struct napi_struct *napi, int budget)
799{
800	struct net_device *ndev = napi->dev;
801	struct rswitch_private *priv;
802	struct rswitch_device *rdev;
803	unsigned long flags;
804	int quota = budget;
805
806	rdev = netdev_priv(ndev);
807	priv = rdev->priv;
808
809retry:
810	rswitch_tx_free(ndev, true);
811
812	if (rswitch_rx(ndev, &quota))
813		goto out;
814	else if (rdev->priv->gwca_halt)
815		goto err;
816	else if (rswitch_is_queue_rxed(rdev->rx_queue))
817		goto retry;
818
819	netif_wake_subqueue(ndev, 0);
820
821	if (napi_complete_done(napi, budget - quota)) {
822		spin_lock_irqsave(&priv->lock, flags);
823		rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
824		rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
825		spin_unlock_irqrestore(&priv->lock, flags);
826	}
827
828out:
829	return budget - quota;
830
831err:
832	napi_complete(napi);
833
834	return 0;
835}
836
837static void rswitch_queue_interrupt(struct net_device *ndev)
838{
839	struct rswitch_device *rdev = netdev_priv(ndev);
840
841	if (napi_schedule_prep(&rdev->napi)) {
842		spin_lock(&rdev->priv->lock);
843		rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
844		rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
845		spin_unlock(&rdev->priv->lock);
846		__napi_schedule(&rdev->napi);
847	}
848}
849
850static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
851{
852	struct rswitch_gwca_queue *gq;
853	int i, index, bit;
854
855	for (i = 0; i < priv->gwca.num_queues; i++) {
856		gq = &priv->gwca.queues[i];
857		index = gq->index / 32;
858		bit = BIT(gq->index % 32);
859		if (!(dis[index] & bit))
860			continue;
861
862		rswitch_ack_data_irq(priv, gq->index);
863		rswitch_queue_interrupt(gq->ndev);
864	}
865
866	return IRQ_HANDLED;
867}
868
869static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
870{
871	struct rswitch_private *priv = dev_id;
872	u32 dis[RSWITCH_NUM_IRQ_REGS];
873	irqreturn_t ret = IRQ_NONE;
874
875	rswitch_get_data_irq_status(priv, dis);
876
877	if (rswitch_is_any_data_irq(priv, dis, true) ||
878	    rswitch_is_any_data_irq(priv, dis, false))
879		ret = rswitch_data_irq(priv, dis);
880
881	return ret;
882}
883
884static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
885{
886	char *resource_name, *irq_name;
887	int i, ret, irq;
888
889	for (i = 0; i < GWCA_NUM_IRQS; i++) {
890		resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
891		if (!resource_name)
892			return -ENOMEM;
893
894		irq = platform_get_irq_byname(priv->pdev, resource_name);
895		kfree(resource_name);
896		if (irq < 0)
897			return irq;
898
899		irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
900					  GWCA_IRQ_NAME, i);
901		if (!irq_name)
902			return -ENOMEM;
903
904		ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
905				       0, irq_name, priv);
906		if (ret < 0)
907			return ret;
908	}
909
910	return 0;
911}
912
913static void rswitch_ts(struct rswitch_private *priv)
914{
915	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
916	struct rswitch_gwca_ts_info *ts_info, *ts_info2;
917	struct skb_shared_hwtstamps shhwtstamps;
918	struct rswitch_ts_desc *desc;
919	struct timespec64 ts;
920	u32 tag, port;
921	int num;
922
923	desc = &gq->ts_ring[gq->cur];
924	while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
925		dma_rmb();
926
927		port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
928		tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
929
930		list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
931			if (!(ts_info->port == port && ts_info->tag == tag))
932				continue;
933
934			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
935			ts.tv_sec = __le32_to_cpu(desc->ts_sec);
936			ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
937			shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
938			skb_tstamp_tx(ts_info->skb, &shhwtstamps);
939			dev_consume_skb_irq(ts_info->skb);
940			list_del(&ts_info->list);
941			kfree(ts_info);
942			break;
943		}
944
945		gq->cur = rswitch_next_queue_index(gq, true, 1);
946		desc = &gq->ts_ring[gq->cur];
947	}
948
949	num = rswitch_get_num_cur_queues(gq);
950	rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
951	gq->dirty = rswitch_next_queue_index(gq, false, num);
952}
953
954static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
955{
956	struct rswitch_private *priv = dev_id;
957
958	if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
959		iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
960		rswitch_ts(priv);
961
962		return IRQ_HANDLED;
963	}
964
965	return IRQ_NONE;
966}
967
968static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
969{
970	int irq;
971
972	irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
973	if (irq < 0)
974		return irq;
975
976	return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
977				0, GWCA_TS_IRQ_NAME, priv);
978}
979
980/* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
981static int rswitch_etha_change_mode(struct rswitch_etha *etha,
982				    enum rswitch_etha_mode mode)
983{
984	int ret;
985
986	if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
987		rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
988
989	iowrite32(mode, etha->addr + EAMC);
990
991	ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
992
993	if (mode == EAMC_OPC_DISABLE)
994		rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
995
996	return ret;
997}
998
999static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
1000{
1001	u32 mrmac0 = ioread32(etha->addr + MRMAC0);
1002	u32 mrmac1 = ioread32(etha->addr + MRMAC1);
1003	u8 *mac = &etha->mac_addr[0];
1004
1005	mac[0] = (mrmac0 >>  8) & 0xFF;
1006	mac[1] = (mrmac0 >>  0) & 0xFF;
1007	mac[2] = (mrmac1 >> 24) & 0xFF;
1008	mac[3] = (mrmac1 >> 16) & 0xFF;
1009	mac[4] = (mrmac1 >>  8) & 0xFF;
1010	mac[5] = (mrmac1 >>  0) & 0xFF;
1011}
1012
1013static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
1014{
1015	iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
1016	iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1017		  etha->addr + MRMAC1);
1018}
1019
1020static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
1021{
1022	iowrite32(MLVC_PLV, etha->addr + MLVC);
1023
1024	return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
1025}
1026
1027static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
1028{
1029	u32 val;
1030
1031	rswitch_etha_write_mac_address(etha, mac);
1032
1033	switch (etha->speed) {
1034	case 100:
1035		val = MPIC_LSC_100M;
1036		break;
1037	case 1000:
1038		val = MPIC_LSC_1G;
1039		break;
1040	case 2500:
1041		val = MPIC_LSC_2_5G;
1042		break;
1043	default:
1044		return;
1045	}
1046
1047	iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
1048}
1049
1050static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
1051{
1052	rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
1053		       MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
1054	rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
1055}
1056
1057static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
1058{
1059	int err;
1060
1061	err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1062	if (err < 0)
1063		return err;
1064	err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
1065	if (err < 0)
1066		return err;
1067
1068	iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
1069	rswitch_rmac_setting(etha, mac);
1070	rswitch_etha_enable_mii(etha);
1071
1072	err = rswitch_etha_wait_link_verification(etha);
1073	if (err < 0)
1074		return err;
1075
1076	err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1077	if (err < 0)
1078		return err;
1079
1080	return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
1081}
1082
1083static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
1084				   int phyad, int devad, int regad, int data)
1085{
1086	int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
1087	u32 val;
1088	int ret;
1089
1090	if (devad == 0xffffffff)
1091		return -ENODEV;
1092
1093	writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
1094
1095	val = MPSM_PSME | MPSM_MFF_C45;
1096	iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1097
1098	ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1099	if (ret)
1100		return ret;
1101
1102	rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1103
1104	if (read) {
1105		writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1106
1107		ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1108		if (ret)
1109			return ret;
1110
1111		ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
1112
1113		rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1114	} else {
1115		iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
1116			  etha->addr + MPSM);
1117
1118		ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
1119	}
1120
1121	return ret;
1122}
1123
1124static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1125				     int regad)
1126{
1127	struct rswitch_etha *etha = bus->priv;
1128
1129	return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
1130}
1131
1132static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1133				      int regad, u16 val)
1134{
1135	struct rswitch_etha *etha = bus->priv;
1136
1137	return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
1138}
1139
1140/* Call of_node_put(port) after done */
1141static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1142{
1143	struct device_node *ports, *port;
1144	int err = 0;
1145	u32 index;
1146
1147	ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1148				     "ethernet-ports");
1149	if (!ports)
1150		return NULL;
1151
1152	for_each_child_of_node(ports, port) {
1153		err = of_property_read_u32(port, "reg", &index);
1154		if (err < 0) {
1155			port = NULL;
1156			goto out;
1157		}
1158		if (index == rdev->etha->index) {
1159			if (!of_device_is_available(port))
1160				port = NULL;
1161			break;
1162		}
1163	}
1164
1165out:
1166	of_node_put(ports);
1167
1168	return port;
1169}
1170
1171static int rswitch_etha_get_params(struct rswitch_device *rdev)
1172{
1173	u32 max_speed;
1174	int err;
1175
1176	if (!rdev->np_port)
1177		return 0;	/* ignored */
1178
1179	err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
1180	if (err)
1181		return err;
1182
1183	err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
1184	if (!err) {
1185		rdev->etha->speed = max_speed;
1186		return 0;
1187	}
1188
1189	/* if no "max-speed" property, let's use default speed */
1190	switch (rdev->etha->phy_interface) {
1191	case PHY_INTERFACE_MODE_MII:
1192		rdev->etha->speed = SPEED_100;
1193		break;
1194	case PHY_INTERFACE_MODE_SGMII:
1195		rdev->etha->speed = SPEED_1000;
1196		break;
1197	case PHY_INTERFACE_MODE_USXGMII:
1198		rdev->etha->speed = SPEED_2500;
1199		break;
1200	default:
1201		return -EINVAL;
1202	}
1203
1204	return 0;
1205}
1206
1207static int rswitch_mii_register(struct rswitch_device *rdev)
1208{
1209	struct device_node *mdio_np;
1210	struct mii_bus *mii_bus;
1211	int err;
1212
1213	mii_bus = mdiobus_alloc();
1214	if (!mii_bus)
1215		return -ENOMEM;
1216
1217	mii_bus->name = "rswitch_mii";
1218	sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1219	mii_bus->priv = rdev->etha;
1220	mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1221	mii_bus->write_c45 = rswitch_etha_mii_write_c45;
1222	mii_bus->parent = &rdev->priv->pdev->dev;
1223
1224	mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
1225	err = of_mdiobus_register(mii_bus, mdio_np);
1226	if (err < 0) {
1227		mdiobus_free(mii_bus);
1228		goto out;
1229	}
1230
1231	rdev->etha->mii = mii_bus;
1232
1233out:
1234	of_node_put(mdio_np);
1235
1236	return err;
1237}
1238
1239static void rswitch_mii_unregister(struct rswitch_device *rdev)
1240{
1241	if (rdev->etha->mii) {
1242		mdiobus_unregister(rdev->etha->mii);
1243		mdiobus_free(rdev->etha->mii);
1244		rdev->etha->mii = NULL;
1245	}
1246}
1247
1248static void rswitch_adjust_link(struct net_device *ndev)
1249{
1250	struct rswitch_device *rdev = netdev_priv(ndev);
1251	struct phy_device *phydev = ndev->phydev;
1252
1253	if (phydev->link != rdev->etha->link) {
1254		phy_print_status(phydev);
1255		if (phydev->link)
1256			phy_power_on(rdev->serdes);
1257		else if (rdev->serdes->power_count)
1258			phy_power_off(rdev->serdes);
1259
1260		rdev->etha->link = phydev->link;
1261
1262		if (!rdev->priv->etha_no_runtime_change &&
1263		    phydev->speed != rdev->etha->speed) {
1264			rdev->etha->speed = phydev->speed;
1265
1266			rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1267			phy_set_speed(rdev->serdes, rdev->etha->speed);
1268		}
1269	}
1270}
1271
1272static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
1273					 struct phy_device *phydev)
1274{
1275	if (!rdev->priv->etha_no_runtime_change)
1276		return;
1277
1278	switch (rdev->etha->speed) {
1279	case SPEED_2500:
1280		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1281		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1282		break;
1283	case SPEED_1000:
1284		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1285		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1286		break;
1287	case SPEED_100:
1288		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1289		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1290		break;
1291	default:
1292		break;
1293	}
1294
1295	phy_set_max_speed(phydev, rdev->etha->speed);
1296}
1297
1298static int rswitch_phy_device_init(struct rswitch_device *rdev)
1299{
1300	struct phy_device *phydev;
1301	struct device_node *phy;
1302	int err = -ENOENT;
1303
1304	if (!rdev->np_port)
1305		return -ENODEV;
1306
1307	phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
1308	if (!phy)
1309		return -ENODEV;
1310
1311	/* Set phydev->host_interfaces before calling of_phy_connect() to
1312	 * configure the PHY with the information of host_interfaces.
1313	 */
1314	phydev = of_phy_find_device(phy);
1315	if (!phydev)
1316		goto out;
1317	__set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
1318
1319	phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
1320				rdev->etha->phy_interface);
1321	if (!phydev)
1322		goto out;
1323
1324	phy_set_max_speed(phydev, SPEED_2500);
1325	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1326	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1327	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1328	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1329	rswitch_phy_remove_link_mode(rdev, phydev);
1330
1331	phy_attached_info(phydev);
1332
1333	err = 0;
1334out:
1335	of_node_put(phy);
1336
1337	return err;
1338}
1339
1340static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
1341{
1342	if (rdev->ndev->phydev)
1343		phy_disconnect(rdev->ndev->phydev);
1344}
1345
1346static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1347{
1348	int err;
1349
1350	err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
1351			       rdev->etha->phy_interface);
1352	if (err < 0)
1353		return err;
1354
1355	return phy_set_speed(rdev->serdes, rdev->etha->speed);
1356}
1357
1358static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1359{
1360	int err;
1361
1362	if (!rdev->etha->operated) {
1363		err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1364		if (err < 0)
1365			return err;
1366		if (rdev->priv->etha_no_runtime_change)
1367			rdev->etha->operated = true;
1368	}
1369
1370	err = rswitch_mii_register(rdev);
1371	if (err < 0)
1372		return err;
1373
1374	err = rswitch_phy_device_init(rdev);
1375	if (err < 0)
1376		goto err_phy_device_init;
1377
1378	rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
1379	if (IS_ERR(rdev->serdes)) {
1380		err = PTR_ERR(rdev->serdes);
1381		goto err_serdes_phy_get;
1382	}
1383
1384	err = rswitch_serdes_set_params(rdev);
1385	if (err < 0)
1386		goto err_serdes_set_params;
1387
1388	return 0;
1389
1390err_serdes_set_params:
1391err_serdes_phy_get:
1392	rswitch_phy_device_deinit(rdev);
1393
1394err_phy_device_init:
1395	rswitch_mii_unregister(rdev);
1396
1397	return err;
1398}
1399
1400static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1401{
1402	rswitch_phy_device_deinit(rdev);
1403	rswitch_mii_unregister(rdev);
1404}
1405
1406static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1407{
1408	int i, err;
1409
1410	rswitch_for_each_enabled_port(priv, i) {
1411		err = rswitch_ether_port_init_one(priv->rdev[i]);
1412		if (err)
1413			goto err_init_one;
1414	}
1415
1416	rswitch_for_each_enabled_port(priv, i) {
1417		err = phy_init(priv->rdev[i]->serdes);
1418		if (err)
1419			goto err_serdes;
1420	}
1421
1422	return 0;
1423
1424err_serdes:
1425	rswitch_for_each_enabled_port_continue_reverse(priv, i)
1426		phy_exit(priv->rdev[i]->serdes);
1427	i = RSWITCH_NUM_PORTS;
1428
1429err_init_one:
1430	rswitch_for_each_enabled_port_continue_reverse(priv, i)
1431		rswitch_ether_port_deinit_one(priv->rdev[i]);
1432
1433	return err;
1434}
1435
1436static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1437{
1438	int i;
1439
1440	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1441		phy_exit(priv->rdev[i]->serdes);
1442		rswitch_ether_port_deinit_one(priv->rdev[i]);
1443	}
1444}
1445
1446static int rswitch_open(struct net_device *ndev)
1447{
1448	struct rswitch_device *rdev = netdev_priv(ndev);
1449	unsigned long flags;
1450
1451	phy_start(ndev->phydev);
1452
1453	napi_enable(&rdev->napi);
1454	netif_start_queue(ndev);
1455
1456	spin_lock_irqsave(&rdev->priv->lock, flags);
1457	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1458	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1459	spin_unlock_irqrestore(&rdev->priv->lock, flags);
1460
1461	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1462		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
1463
1464	bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
1465
1466	return 0;
1467};
1468
1469static int rswitch_stop(struct net_device *ndev)
1470{
1471	struct rswitch_device *rdev = netdev_priv(ndev);
1472	struct rswitch_gwca_ts_info *ts_info, *ts_info2;
1473	unsigned long flags;
1474
1475	netif_tx_stop_all_queues(ndev);
1476	bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
1477
1478	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1479		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
1480
1481	list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
1482		if (ts_info->port != rdev->port)
1483			continue;
1484		dev_kfree_skb_irq(ts_info->skb);
1485		list_del(&ts_info->list);
1486		kfree(ts_info);
1487	}
1488
1489	spin_lock_irqsave(&rdev->priv->lock, flags);
1490	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1491	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1492	spin_unlock_irqrestore(&rdev->priv->lock, flags);
1493
1494	phy_stop(ndev->phydev);
1495	napi_disable(&rdev->napi);
1496
1497	return 0;
1498};
1499
1500static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1501{
1502	struct rswitch_device *rdev = netdev_priv(ndev);
1503	struct rswitch_gwca_queue *gq = rdev->tx_queue;
1504	netdev_tx_t ret = NETDEV_TX_OK;
1505	struct rswitch_ext_desc *desc;
1506	dma_addr_t dma_addr;
1507
1508	if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
1509		netif_stop_subqueue(ndev, 0);
1510		return NETDEV_TX_BUSY;
1511	}
1512
1513	if (skb_put_padto(skb, ETH_ZLEN))
1514		return ret;
1515
1516	dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1517	if (dma_mapping_error(ndev->dev.parent, dma_addr))
1518		goto err_kfree;
1519
1520	gq->skbs[gq->cur] = skb;
1521	desc = &gq->tx_ring[gq->cur];
1522	rswitch_desc_set_dptr(&desc->desc, dma_addr);
1523	desc->desc.info_ds = cpu_to_le16(skb->len);
1524
1525	desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
1526				  INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
1527	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1528		struct rswitch_gwca_ts_info *ts_info;
1529
1530		ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
1531		if (!ts_info)
1532			goto err_unmap;
1533
1534		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1535		rdev->ts_tag++;
1536		desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
1537
1538		ts_info->skb = skb_get(skb);
1539		ts_info->port = rdev->port;
1540		ts_info->tag = rdev->ts_tag;
1541		list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
1542
1543		skb_tx_timestamp(skb);
1544	}
1545
1546	dma_wmb();
1547
1548	desc->desc.die_dt = DT_FSINGLE | DIE;
1549	wmb();	/* gq->cur must be incremented after die_dt was set */
1550
1551	gq->cur = rswitch_next_queue_index(gq, true, 1);
1552	rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1553
1554	return ret;
1555
1556err_unmap:
1557	dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
1558
1559err_kfree:
1560	dev_kfree_skb_any(skb);
1561
1562	return ret;
1563}
1564
1565static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1566{
1567	return &ndev->stats;
1568}
1569
1570static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1571{
1572	struct rswitch_device *rdev = netdev_priv(ndev);
1573	struct rcar_gen4_ptp_private *ptp_priv;
1574	struct hwtstamp_config config;
1575
1576	ptp_priv = rdev->priv->ptp_priv;
1577
1578	config.flags = 0;
1579	config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1580						    HWTSTAMP_TX_OFF;
1581	switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1582	case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1583		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1584		break;
1585	case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1586		config.rx_filter = HWTSTAMP_FILTER_ALL;
1587		break;
1588	default:
1589		config.rx_filter = HWTSTAMP_FILTER_NONE;
1590		break;
1591	}
1592
1593	return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1594}
1595
1596static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1597{
1598	struct rswitch_device *rdev = netdev_priv(ndev);
1599	u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1600	struct hwtstamp_config config;
1601	u32 tstamp_tx_ctrl;
1602
1603	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1604		return -EFAULT;
1605
1606	if (config.flags)
1607		return -EINVAL;
1608
1609	switch (config.tx_type) {
1610	case HWTSTAMP_TX_OFF:
1611		tstamp_tx_ctrl = 0;
1612		break;
1613	case HWTSTAMP_TX_ON:
1614		tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1615		break;
1616	default:
1617		return -ERANGE;
1618	}
1619
1620	switch (config.rx_filter) {
1621	case HWTSTAMP_FILTER_NONE:
1622		tstamp_rx_ctrl = 0;
1623		break;
1624	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1625		tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1626		break;
1627	default:
1628		config.rx_filter = HWTSTAMP_FILTER_ALL;
1629		tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1630		break;
1631	}
1632
1633	rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1634	rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1635
1636	return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1637}
1638
1639static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1640{
1641	if (!netif_running(ndev))
1642		return -EINVAL;
1643
1644	switch (cmd) {
1645	case SIOCGHWTSTAMP:
1646		return rswitch_hwstamp_get(ndev, req);
1647	case SIOCSHWTSTAMP:
1648		return rswitch_hwstamp_set(ndev, req);
1649	default:
1650		return phy_mii_ioctl(ndev->phydev, req, cmd);
1651	}
1652}
1653
1654static const struct net_device_ops rswitch_netdev_ops = {
1655	.ndo_open = rswitch_open,
1656	.ndo_stop = rswitch_stop,
1657	.ndo_start_xmit = rswitch_start_xmit,
1658	.ndo_get_stats = rswitch_get_stats,
1659	.ndo_eth_ioctl = rswitch_eth_ioctl,
1660	.ndo_validate_addr = eth_validate_addr,
1661	.ndo_set_mac_address = eth_mac_addr,
1662};
1663
1664static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
1665{
1666	struct rswitch_device *rdev = netdev_priv(ndev);
1667
1668	info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1669	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1670				SOF_TIMESTAMPING_RX_SOFTWARE |
1671				SOF_TIMESTAMPING_SOFTWARE |
1672				SOF_TIMESTAMPING_TX_HARDWARE |
1673				SOF_TIMESTAMPING_RX_HARDWARE |
1674				SOF_TIMESTAMPING_RAW_HARDWARE;
1675	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1676	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1677
1678	return 0;
1679}
1680
1681static const struct ethtool_ops rswitch_ethtool_ops = {
1682	.get_ts_info = rswitch_get_ts_info,
1683	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1684	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1685};
1686
1687static const struct of_device_id renesas_eth_sw_of_table[] = {
1688	{ .compatible = "renesas,r8a779f0-ether-switch", },
1689	{ }
1690};
1691MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1692
1693static void rswitch_etha_init(struct rswitch_private *priv, int index)
1694{
1695	struct rswitch_etha *etha = &priv->etha[index];
1696
1697	memset(etha, 0, sizeof(*etha));
1698	etha->index = index;
1699	etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1700	etha->coma_addr = priv->addr;
1701
1702	/* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
1703	 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
1704	 * both the numerator and the denominator by 10.
1705	 */
1706	etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
1707}
1708
1709static int rswitch_device_alloc(struct rswitch_private *priv, int index)
1710{
1711	struct platform_device *pdev = priv->pdev;
1712	struct rswitch_device *rdev;
1713	struct net_device *ndev;
1714	int err;
1715
1716	if (index >= RSWITCH_NUM_PORTS)
1717		return -EINVAL;
1718
1719	ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1720	if (!ndev)
1721		return -ENOMEM;
1722
1723	SET_NETDEV_DEV(ndev, &pdev->dev);
1724	ether_setup(ndev);
1725
1726	rdev = netdev_priv(ndev);
1727	rdev->ndev = ndev;
1728	rdev->priv = priv;
1729	priv->rdev[index] = rdev;
1730	rdev->port = index;
1731	rdev->etha = &priv->etha[index];
1732	rdev->addr = priv->addr;
1733
1734	ndev->base_addr = (unsigned long)rdev->addr;
1735	snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1736	ndev->netdev_ops = &rswitch_netdev_ops;
1737	ndev->ethtool_ops = &rswitch_ethtool_ops;
1738
1739	netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1740
1741	rdev->np_port = rswitch_get_port_node(rdev);
1742	rdev->disabled = !rdev->np_port;
1743	err = of_get_ethdev_address(rdev->np_port, ndev);
1744	of_node_put(rdev->np_port);
1745	if (err) {
1746		if (is_valid_ether_addr(rdev->etha->mac_addr))
1747			eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1748		else
1749			eth_hw_addr_random(ndev);
1750	}
1751
1752	err = rswitch_etha_get_params(rdev);
1753	if (err < 0)
1754		goto out_get_params;
1755
1756	if (rdev->priv->gwca.speed < rdev->etha->speed)
1757		rdev->priv->gwca.speed = rdev->etha->speed;
1758
1759	err = rswitch_rxdmac_alloc(ndev);
1760	if (err < 0)
1761		goto out_rxdmac;
1762
1763	err = rswitch_txdmac_alloc(ndev);
1764	if (err < 0)
1765		goto out_txdmac;
1766
1767	return 0;
1768
1769out_txdmac:
1770	rswitch_rxdmac_free(ndev);
1771
1772out_rxdmac:
1773out_get_params:
1774	netif_napi_del(&rdev->napi);
1775	free_netdev(ndev);
1776
1777	return err;
1778}
1779
1780static void rswitch_device_free(struct rswitch_private *priv, int index)
1781{
1782	struct rswitch_device *rdev = priv->rdev[index];
1783	struct net_device *ndev = rdev->ndev;
1784
1785	rswitch_txdmac_free(ndev);
1786	rswitch_rxdmac_free(ndev);
1787	netif_napi_del(&rdev->napi);
1788	free_netdev(ndev);
1789}
1790
1791static int rswitch_init(struct rswitch_private *priv)
1792{
1793	int i, err;
1794
1795	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1796		rswitch_etha_init(priv, i);
1797
1798	rswitch_clock_enable(priv);
1799	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1800		rswitch_etha_read_mac_address(&priv->etha[i]);
1801
1802	rswitch_reset(priv);
1803
1804	rswitch_clock_enable(priv);
1805	rswitch_top_init(priv);
1806	err = rswitch_bpool_config(priv);
1807	if (err < 0)
1808		return err;
1809
1810	rswitch_coma_init(priv);
1811
1812	err = rswitch_gwca_linkfix_alloc(priv);
1813	if (err < 0)
1814		return -ENOMEM;
1815
1816	err = rswitch_gwca_ts_queue_alloc(priv);
1817	if (err < 0)
1818		goto err_ts_queue_alloc;
1819
1820	for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1821		err = rswitch_device_alloc(priv, i);
1822		if (err < 0) {
1823			for (i--; i >= 0; i--)
1824				rswitch_device_free(priv, i);
1825			goto err_device_alloc;
1826		}
1827	}
1828
1829	rswitch_fwd_init(priv);
1830
1831	err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
1832				     RCAR_GEN4_PTP_CLOCK_S4);
1833	if (err < 0)
1834		goto err_ptp_register;
1835
1836	err = rswitch_gwca_request_irqs(priv);
1837	if (err < 0)
1838		goto err_gwca_request_irq;
1839
1840	err = rswitch_gwca_ts_request_irqs(priv);
1841	if (err < 0)
1842		goto err_gwca_ts_request_irq;
1843
1844	err = rswitch_gwca_hw_init(priv);
1845	if (err < 0)
1846		goto err_gwca_hw_init;
1847
1848	err = rswitch_ether_port_init_all(priv);
1849	if (err)
1850		goto err_ether_port_init_all;
1851
1852	rswitch_for_each_enabled_port(priv, i) {
1853		err = register_netdev(priv->rdev[i]->ndev);
1854		if (err) {
1855			rswitch_for_each_enabled_port_continue_reverse(priv, i)
1856				unregister_netdev(priv->rdev[i]->ndev);
1857			goto err_register_netdev;
1858		}
1859	}
1860
1861	rswitch_for_each_enabled_port(priv, i)
1862		netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
1863			    priv->rdev[i]->ndev->dev_addr);
1864
1865	return 0;
1866
1867err_register_netdev:
1868	rswitch_ether_port_deinit_all(priv);
1869
1870err_ether_port_init_all:
1871	rswitch_gwca_hw_deinit(priv);
1872
1873err_gwca_hw_init:
1874err_gwca_ts_request_irq:
1875err_gwca_request_irq:
1876	rcar_gen4_ptp_unregister(priv->ptp_priv);
1877
1878err_ptp_register:
1879	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1880		rswitch_device_free(priv, i);
1881
1882err_device_alloc:
1883	rswitch_gwca_ts_queue_free(priv);
1884
1885err_ts_queue_alloc:
1886	rswitch_gwca_linkfix_free(priv);
1887
1888	return err;
1889}
1890
1891static const struct soc_device_attribute rswitch_soc_no_speed_change[]  = {
1892	{ .soc_id = "r8a779f0", .revision = "ES1.0" },
1893	{ /* Sentinel */ }
1894};
1895
1896static int renesas_eth_sw_probe(struct platform_device *pdev)
1897{
1898	const struct soc_device_attribute *attr;
1899	struct rswitch_private *priv;
1900	struct resource *res;
1901	int ret;
1902
1903	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
1904	if (!res) {
1905		dev_err(&pdev->dev, "invalid resource\n");
1906		return -EINVAL;
1907	}
1908
1909	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1910	if (!priv)
1911		return -ENOMEM;
1912	spin_lock_init(&priv->lock);
1913
1914	priv->clk = devm_clk_get(&pdev->dev, NULL);
1915	if (IS_ERR(priv->clk))
1916		return PTR_ERR(priv->clk);
1917
1918	attr = soc_device_match(rswitch_soc_no_speed_change);
1919	if (attr)
1920		priv->etha_no_runtime_change = true;
1921
1922	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1923	if (!priv->ptp_priv)
1924		return -ENOMEM;
1925
1926	platform_set_drvdata(pdev, priv);
1927	priv->pdev = pdev;
1928	priv->addr = devm_ioremap_resource(&pdev->dev, res);
1929	if (IS_ERR(priv->addr))
1930		return PTR_ERR(priv->addr);
1931
1932	priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
1933
1934	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1935	if (ret < 0) {
1936		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1937		if (ret < 0)
1938			return ret;
1939	}
1940
1941	priv->gwca.index = AGENT_INDEX_GWCA;
1942	priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
1943				    RSWITCH_MAX_NUM_QUEUES);
1944	priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
1945					 sizeof(*priv->gwca.queues), GFP_KERNEL);
1946	if (!priv->gwca.queues)
1947		return -ENOMEM;
1948
1949	pm_runtime_enable(&pdev->dev);
1950	pm_runtime_get_sync(&pdev->dev);
1951
1952	ret = rswitch_init(priv);
1953	if (ret < 0) {
1954		pm_runtime_put(&pdev->dev);
1955		pm_runtime_disable(&pdev->dev);
1956		return ret;
1957	}
1958
1959	device_set_wakeup_capable(&pdev->dev, 1);
1960
1961	return ret;
1962}
1963
1964static void rswitch_deinit(struct rswitch_private *priv)
1965{
1966	int i;
1967
1968	rswitch_gwca_hw_deinit(priv);
1969	rcar_gen4_ptp_unregister(priv->ptp_priv);
1970
1971	rswitch_for_each_enabled_port(priv, i) {
1972		struct rswitch_device *rdev = priv->rdev[i];
1973
1974		unregister_netdev(rdev->ndev);
1975		rswitch_ether_port_deinit_one(rdev);
1976		phy_exit(priv->rdev[i]->serdes);
1977	}
1978
1979	for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1980		rswitch_device_free(priv, i);
1981
1982	rswitch_gwca_ts_queue_free(priv);
1983	rswitch_gwca_linkfix_free(priv);
1984
1985	rswitch_clock_disable(priv);
1986}
1987
1988static int renesas_eth_sw_remove(struct platform_device *pdev)
1989{
1990	struct rswitch_private *priv = platform_get_drvdata(pdev);
1991
1992	rswitch_deinit(priv);
1993
1994	pm_runtime_put(&pdev->dev);
1995	pm_runtime_disable(&pdev->dev);
1996
1997	platform_set_drvdata(pdev, NULL);
1998
1999	return 0;
2000}
2001
2002static struct platform_driver renesas_eth_sw_driver_platform = {
2003	.probe = renesas_eth_sw_probe,
2004	.remove = renesas_eth_sw_remove,
2005	.driver = {
2006		.name = "renesas_eth_sw",
2007		.of_match_table = renesas_eth_sw_of_table,
2008	}
2009};
2010module_platform_driver(renesas_eth_sw_driver_platform);
2011MODULE_AUTHOR("Yoshihiro Shimoda");
2012MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
2013MODULE_LICENSE("GPL");
2014