1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Broadcom STB ASP 2.0 Driver
4 *
5 * Copyright (c) 2023 Broadcom
6 */
7#include <linux/etherdevice.h>
8#include <linux/if_vlan.h>
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/of_platform.h>
17#include <linux/clk.h>
18
19#include "bcmasp.h"
20#include "bcmasp_intf_defs.h"
21
22static void _intr2_mask_clear(struct bcmasp_priv *priv, u32 mask)
23{
24	intr2_core_wl(priv, mask, ASP_INTR2_MASK_CLEAR);
25	priv->irq_mask &= ~mask;
26}
27
28static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
29{
30	intr2_core_wl(priv, mask, ASP_INTR2_MASK_SET);
31	priv->irq_mask |= mask;
32}
33
34void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
35{
36	struct bcmasp_priv *priv = intf->parent;
37
38	if (en)
39		_intr2_mask_clear(priv, ASP_INTR2_TX_DESC(intf->channel));
40	else
41		_intr2_mask_set(priv, ASP_INTR2_TX_DESC(intf->channel));
42}
43EXPORT_SYMBOL_GPL(bcmasp_enable_tx_irq);
44
45void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en)
46{
47	struct bcmasp_priv *priv = intf->parent;
48
49	if (en)
50		_intr2_mask_clear(priv, ASP_INTR2_RX_ECH(intf->channel));
51	else
52		_intr2_mask_set(priv, ASP_INTR2_RX_ECH(intf->channel));
53}
54EXPORT_SYMBOL_GPL(bcmasp_enable_rx_irq);
55
56static void bcmasp_intr2_mask_set_all(struct bcmasp_priv *priv)
57{
58	_intr2_mask_set(priv, 0xffffffff);
59	priv->irq_mask = 0xffffffff;
60}
61
62static void bcmasp_intr2_clear_all(struct bcmasp_priv *priv)
63{
64	intr2_core_wl(priv, 0xffffffff, ASP_INTR2_CLEAR);
65}
66
67static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
68{
69	if (status & ASP_INTR2_RX_ECH(intf->channel)) {
70		if (likely(napi_schedule_prep(&intf->rx_napi))) {
71			bcmasp_enable_rx_irq(intf, 0);
72			__napi_schedule_irqoff(&intf->rx_napi);
73		}
74	}
75
76	if (status & ASP_INTR2_TX_DESC(intf->channel)) {
77		if (likely(napi_schedule_prep(&intf->tx_napi))) {
78			bcmasp_enable_tx_irq(intf, 0);
79			__napi_schedule_irqoff(&intf->tx_napi);
80		}
81	}
82}
83
84static irqreturn_t bcmasp_isr(int irq, void *data)
85{
86	struct bcmasp_priv *priv = data;
87	struct bcmasp_intf *intf;
88	u32 status;
89
90	status = intr2_core_rl(priv, ASP_INTR2_STATUS) &
91		~intr2_core_rl(priv, ASP_INTR2_MASK_STATUS);
92
93	intr2_core_wl(priv, status, ASP_INTR2_CLEAR);
94
95	if (unlikely(status == 0)) {
96		dev_warn(&priv->pdev->dev, "l2 spurious interrupt\n");
97		return IRQ_NONE;
98	}
99
100	/* Handle intferfaces */
101	list_for_each_entry(intf, &priv->intfs, list)
102		bcmasp_intr2_handling(intf, status);
103
104	return IRQ_HANDLED;
105}
106
107void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
108{
109	struct bcmasp_priv *priv = intf->parent;
110	u32 mask;
111
112	switch (intf->port) {
113	case 0:
114		mask = ASP_CTRL_UMAC0_FLUSH_MASK;
115		break;
116	case 1:
117		mask = ASP_CTRL_UMAC1_FLUSH_MASK;
118		break;
119	case 2:
120		mask = ASP_CTRL_SPB_FLUSH_MASK;
121		break;
122	default:
123		/* Not valid port */
124		return;
125	}
126
127	rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
128}
129
130static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
131				      struct bcmasp_net_filter *nfilt)
132{
133	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64),
134			  ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index));
135
136	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) |
137			  ASP_RX_FILTER_NET_OFFSET_L3_0(32) |
138			  ASP_RX_FILTER_NET_OFFSET_L3_1(96) |
139			  ASP_RX_FILTER_NET_OFFSET_L4(32),
140			  ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
141
142	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
143			  ASP_RX_FILTER_NET_CFG_EN |
144			  ASP_RX_FILTER_NET_CFG_L2_EN |
145			  ASP_RX_FILTER_NET_CFG_L3_EN |
146			  ASP_RX_FILTER_NET_CFG_L4_EN |
147			  ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
148			  ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
149			  ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
150			  ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
151
152	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
153			  ASP_RX_FILTER_NET_CFG_EN |
154			  ASP_RX_FILTER_NET_CFG_L2_EN |
155			  ASP_RX_FILTER_NET_CFG_L3_EN |
156			  ASP_RX_FILTER_NET_CFG_L4_EN |
157			  ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
158			  ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
159			  ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
160			  ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1));
161}
162
163#define MAX_WAKE_FILTER_SIZE		256
164enum asp_netfilt_reg_type {
165	ASP_NETFILT_MATCH = 0,
166	ASP_NETFILT_MASK,
167	ASP_NETFILT_MAX
168};
169
170static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv,
171					 struct bcmasp_net_filter *nfilt,
172					 enum asp_netfilt_reg_type reg_type,
173					 u32 offset)
174{
175	u32 block_index, filter_sel;
176
177	if (offset < 32) {
178		block_index = ASP_RX_FILTER_NET_L2;
179		filter_sel = nfilt->hw_index;
180	} else if (offset < 64) {
181		block_index = ASP_RX_FILTER_NET_L2;
182		filter_sel = nfilt->hw_index + 1;
183	} else if (offset < 96) {
184		block_index = ASP_RX_FILTER_NET_L3_0;
185		filter_sel = nfilt->hw_index;
186	} else if (offset < 128) {
187		block_index = ASP_RX_FILTER_NET_L3_0;
188		filter_sel = nfilt->hw_index + 1;
189	} else if (offset < 160) {
190		block_index = ASP_RX_FILTER_NET_L3_1;
191		filter_sel = nfilt->hw_index;
192	} else if (offset < 192) {
193		block_index = ASP_RX_FILTER_NET_L3_1;
194		filter_sel = nfilt->hw_index + 1;
195	} else if (offset < 224) {
196		block_index = ASP_RX_FILTER_NET_L4;
197		filter_sel = nfilt->hw_index;
198	} else if (offset < 256) {
199		block_index = ASP_RX_FILTER_NET_L4;
200		filter_sel = nfilt->hw_index + 1;
201	} else {
202		return -EINVAL;
203	}
204
205	switch (reg_type) {
206	case ASP_NETFILT_MATCH:
207		return ASP_RX_FILTER_NET_PAT(filter_sel, block_index,
208					     (offset % 32));
209	case ASP_NETFILT_MASK:
210		return ASP_RX_FILTER_NET_MASK(filter_sel, block_index,
211					      (offset % 32));
212	default:
213		return -EINVAL;
214	}
215}
216
217static void bcmasp_netfilt_wr(struct bcmasp_priv *priv,
218			      struct bcmasp_net_filter *nfilt,
219			      enum asp_netfilt_reg_type reg_type,
220			      u32 val, u32 offset)
221{
222	int reg_offset;
223
224	/* HW only accepts 4 byte aligned writes */
225	if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
226		return;
227
228	reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
229						   offset);
230
231	rx_filter_core_wl(priv, val, reg_offset);
232}
233
234static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv,
235			     struct bcmasp_net_filter *nfilt,
236			     enum asp_netfilt_reg_type reg_type,
237			     u32 offset)
238{
239	int reg_offset;
240
241	/* HW only accepts 4 byte aligned writes */
242	if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
243		return 0;
244
245	reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
246						   offset);
247
248	return rx_filter_core_rl(priv, reg_offset);
249}
250
251static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv,
252				    struct bcmasp_net_filter *nfilt,
253				    u32 offset, void *match, void *mask,
254				    size_t size)
255{
256	u32 shift, mask_val = 0, match_val = 0;
257	bool first_byte = true;
258
259	if ((offset + size) > MAX_WAKE_FILTER_SIZE)
260		return -EINVAL;
261
262	while (size--) {
263		/* The HW only accepts 4 byte aligned writes, so if we
264		 * begin unaligned or if remaining bytes less than 4,
265		 * we need to read then write to avoid losing current
266		 * register state
267		 */
268		if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) {
269			match_val = bcmasp_netfilt_rd(priv, nfilt,
270						      ASP_NETFILT_MATCH,
271						      ALIGN_DOWN(offset, 4));
272			mask_val = bcmasp_netfilt_rd(priv, nfilt,
273						     ASP_NETFILT_MASK,
274						     ALIGN_DOWN(offset, 4));
275		}
276
277		shift = (3 - (offset % 4)) * 8;
278		match_val &= ~GENMASK(shift + 7, shift);
279		mask_val &= ~GENMASK(shift + 7, shift);
280		match_val |= (u32)(*((u8 *)match) << shift);
281		mask_val |= (u32)(*((u8 *)mask) << shift);
282
283		/* If last byte or last byte of word, write to reg */
284		if (!size || ((offset % 4) == 3)) {
285			bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH,
286					  match_val, ALIGN_DOWN(offset, 4));
287			bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK,
288					  mask_val, ALIGN_DOWN(offset, 4));
289			first_byte = true;
290		} else {
291			first_byte = false;
292		}
293
294		offset++;
295		match++;
296		mask++;
297	}
298
299	return 0;
300}
301
302static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv,
303				    struct bcmasp_net_filter *nfilt)
304{
305	int i;
306
307	for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) {
308		bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i);
309		bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i);
310	}
311}
312
313static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv,
314				     struct bcmasp_net_filter *nfilt,
315				     struct ethtool_tcpip4_spec *match,
316				     struct ethtool_tcpip4_spec *mask,
317				     u32 offset)
318{
319	__be16 val_16, mask_16;
320
321	val_16 = htons(ETH_P_IP);
322	mask_16 = htons(0xFFFF);
323	bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
324				 &val_16, &mask_16, sizeof(val_16));
325	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
326				 &match->tos, &mask->tos,
327				 sizeof(match->tos));
328	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
329				 &match->ip4src, &mask->ip4src,
330				 sizeof(match->ip4src));
331	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
332				 &match->ip4dst, &mask->ip4dst,
333				 sizeof(match->ip4dst));
334	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20,
335				 &match->psrc, &mask->psrc,
336				 sizeof(match->psrc));
337	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22,
338				 &match->pdst, &mask->pdst,
339				 sizeof(match->pdst));
340}
341
342static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv,
343				     struct bcmasp_net_filter *nfilt,
344				     struct ethtool_tcpip6_spec *match,
345				     struct ethtool_tcpip6_spec *mask,
346				     u32 offset)
347{
348	__be16 val_16, mask_16;
349
350	val_16 = htons(ETH_P_IPV6);
351	mask_16 = htons(0xFFFF);
352	bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
353				 &val_16, &mask_16, sizeof(val_16));
354	val_16 = htons(match->tclass << 4);
355	mask_16 = htons(mask->tclass << 4);
356	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
357				 &val_16, &mask_16, sizeof(val_16));
358	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8,
359				 &match->ip6src, &mask->ip6src,
360				 sizeof(match->ip6src));
361	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24,
362				 &match->ip6dst, &mask->ip6dst,
363				 sizeof(match->ip6dst));
364	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40,
365				 &match->psrc, &mask->psrc,
366				 sizeof(match->psrc));
367	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42,
368				 &match->pdst, &mask->pdst,
369				 sizeof(match->pdst));
370}
371
372static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv,
373				   struct bcmasp_net_filter *nfilt)
374{
375	struct ethtool_rx_flow_spec *fs = &nfilt->fs;
376	unsigned int offset = 0;
377	__be16 val_16, mask_16;
378	u8 val_8, mask_8;
379
380	/* Currently only supports wake filters */
381	if (!nfilt->wake_filter)
382		return -EINVAL;
383
384	bcmasp_netfilt_reset_hw(priv, nfilt);
385
386	if (fs->flow_type & FLOW_MAC_EXT) {
387		bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest,
388					 &fs->m_ext.h_dest,
389					 sizeof(fs->h_ext.h_dest));
390	}
391
392	if ((fs->flow_type & FLOW_EXT) &&
393	    (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) {
394		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2),
395					 &fs->h_ext.vlan_etype,
396					 &fs->m_ext.vlan_etype,
397					 sizeof(fs->h_ext.vlan_etype));
398		bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2),
399					 &fs->h_ext.vlan_tci,
400					 &fs->m_ext.vlan_tci,
401					 sizeof(fs->h_ext.vlan_tci));
402		offset += VLAN_HLEN;
403	}
404
405	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
406	case ETHER_FLOW:
407		bcmasp_netfilt_wr_m_wake(priv, nfilt, 0,
408					 &fs->h_u.ether_spec.h_dest,
409					 &fs->m_u.ether_spec.h_dest,
410					 sizeof(fs->h_u.ether_spec.h_dest));
411		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN,
412					 &fs->h_u.ether_spec.h_source,
413					 &fs->m_u.ether_spec.h_source,
414					 sizeof(fs->h_u.ether_spec.h_source));
415		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
416					 &fs->h_u.ether_spec.h_proto,
417					 &fs->m_u.ether_spec.h_proto,
418					 sizeof(fs->h_u.ether_spec.h_proto));
419
420		break;
421	case IP_USER_FLOW:
422		val_16 = htons(ETH_P_IP);
423		mask_16 = htons(0xFFFF);
424		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
425					 &val_16, &mask_16, sizeof(val_16));
426		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
427					 &fs->h_u.usr_ip4_spec.tos,
428					 &fs->m_u.usr_ip4_spec.tos,
429					 sizeof(fs->h_u.usr_ip4_spec.tos));
430		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
431					 &fs->h_u.usr_ip4_spec.proto,
432					 &fs->m_u.usr_ip4_spec.proto,
433					 sizeof(fs->h_u.usr_ip4_spec.proto));
434		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
435					 &fs->h_u.usr_ip4_spec.ip4src,
436					 &fs->m_u.usr_ip4_spec.ip4src,
437					 sizeof(fs->h_u.usr_ip4_spec.ip4src));
438		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
439					 &fs->h_u.usr_ip4_spec.ip4dst,
440					 &fs->m_u.usr_ip4_spec.ip4dst,
441					 sizeof(fs->h_u.usr_ip4_spec.ip4dst));
442		if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
443			break;
444
445		/* Only supports 20 byte IPv4 header */
446		val_8 = 0x45;
447		mask_8 = 0xFF;
448		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
449					 &val_8, &mask_8, sizeof(val_8));
450		bcmasp_netfilt_wr_m_wake(priv, nfilt,
451					 ETH_HLEN + 20 + offset,
452					 &fs->h_u.usr_ip4_spec.l4_4_bytes,
453					 &fs->m_u.usr_ip4_spec.l4_4_bytes,
454					 sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes)
455					 );
456		break;
457	case TCP_V4_FLOW:
458		val_8 = IPPROTO_TCP;
459		mask_8 = 0xFF;
460		bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec,
461					 &fs->m_u.tcp_ip4_spec, offset);
462		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
463					 &val_8, &mask_8, sizeof(val_8));
464		break;
465	case UDP_V4_FLOW:
466		val_8 = IPPROTO_UDP;
467		mask_8 = 0xFF;
468		bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec,
469					 &fs->m_u.udp_ip4_spec, offset);
470
471		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
472					 &val_8, &mask_8, sizeof(val_8));
473		break;
474	case TCP_V6_FLOW:
475		val_8 = IPPROTO_TCP;
476		mask_8 = 0xFF;
477		bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec,
478					 &fs->m_u.tcp_ip6_spec, offset);
479		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
480					 &val_8, &mask_8, sizeof(val_8));
481		break;
482	case UDP_V6_FLOW:
483		val_8 = IPPROTO_UDP;
484		mask_8 = 0xFF;
485		bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec,
486					 &fs->m_u.udp_ip6_spec, offset);
487		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
488					 &val_8, &mask_8, sizeof(val_8));
489		break;
490	}
491
492	bcmasp_netfilt_hw_en_wake(priv, nfilt);
493
494	return 0;
495}
496
497void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
498{
499	struct bcmasp_priv *priv = intf->parent;
500	bool write = false;
501	int ret, i;
502
503	/* Write all filters to HW */
504	for (i = 0; i < NUM_NET_FILTERS; i++) {
505		/* If the filter does not match the port, skip programming. */
506		if (!priv->net_filters[i].claimed ||
507		    priv->net_filters[i].port != intf->port)
508			continue;
509
510		if (i > 0 && (i % 2) &&
511		    priv->net_filters[i].wake_filter &&
512		    priv->net_filters[i - 1].wake_filter)
513			continue;
514
515		ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]);
516		if (!ret)
517			write = true;
518	}
519
520	/* Successfully programmed at least one wake filter
521	 * so enable top level wake config
522	 */
523	if (write)
524		rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
525				  ASP_RX_FILTER_LNR_MD |
526				  ASP_RX_FILTER_GEN_WK_EN |
527				  ASP_RX_FILTER_NT_FLT_EN),
528				  ASP_RX_FILTER_BLK_CTRL);
529}
530
531int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
532				  u32 *rule_cnt)
533{
534	struct bcmasp_priv *priv = intf->parent;
535	int j = 0, i;
536
537	for (i = 0; i < NUM_NET_FILTERS; i++) {
538		if (!priv->net_filters[i].claimed ||
539		    priv->net_filters[i].port != intf->port)
540			continue;
541
542		if (i > 0 && (i % 2) &&
543		    priv->net_filters[i].wake_filter &&
544		    priv->net_filters[i - 1].wake_filter)
545			continue;
546
547		if (j == *rule_cnt)
548			return -EMSGSIZE;
549
550		rule_locs[j++] = priv->net_filters[i].fs.location;
551	}
552
553	*rule_cnt = j;
554
555	return 0;
556}
557
558int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
559{
560	struct bcmasp_priv *priv = intf->parent;
561	int cnt = 0, i;
562
563	for (i = 0; i < NUM_NET_FILTERS; i++) {
564		if (!priv->net_filters[i].claimed ||
565		    priv->net_filters[i].port != intf->port)
566			continue;
567
568		/* Skip over a wake filter pair */
569		if (i > 0 && (i % 2) &&
570		    priv->net_filters[i].wake_filter &&
571		    priv->net_filters[i - 1].wake_filter)
572			continue;
573
574		cnt++;
575	}
576
577	return cnt;
578}
579
580bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
581			      struct ethtool_rx_flow_spec *fs)
582{
583	struct bcmasp_priv *priv = intf->parent;
584	struct ethtool_rx_flow_spec *cur;
585	size_t fs_size = 0;
586	int i;
587
588	for (i = 0; i < NUM_NET_FILTERS; i++) {
589		if (!priv->net_filters[i].claimed ||
590		    priv->net_filters[i].port != intf->port)
591			continue;
592
593		cur = &priv->net_filters[i].fs;
594
595		if (cur->flow_type != fs->flow_type ||
596		    cur->ring_cookie != fs->ring_cookie)
597			continue;
598
599		switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
600		case ETHER_FLOW:
601			fs_size = sizeof(struct ethhdr);
602			break;
603		case IP_USER_FLOW:
604			fs_size = sizeof(struct ethtool_usrip4_spec);
605			break;
606		case TCP_V6_FLOW:
607		case UDP_V6_FLOW:
608			fs_size = sizeof(struct ethtool_tcpip6_spec);
609			break;
610		case TCP_V4_FLOW:
611		case UDP_V4_FLOW:
612			fs_size = sizeof(struct ethtool_tcpip4_spec);
613			break;
614		default:
615			continue;
616		}
617
618		if (memcmp(&cur->h_u, &fs->h_u, fs_size) ||
619		    memcmp(&cur->m_u, &fs->m_u, fs_size))
620			continue;
621
622		if (cur->flow_type & FLOW_EXT) {
623			if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype ||
624			    cur->m_ext.vlan_etype != fs->m_ext.vlan_etype ||
625			    cur->h_ext.vlan_tci != fs->h_ext.vlan_tci ||
626			    cur->m_ext.vlan_tci != fs->m_ext.vlan_tci ||
627			    cur->h_ext.data[0] != fs->h_ext.data[0])
628				continue;
629		}
630		if (cur->flow_type & FLOW_MAC_EXT) {
631			if (memcmp(&cur->h_ext.h_dest,
632				   &fs->h_ext.h_dest, ETH_ALEN) ||
633			    memcmp(&cur->m_ext.h_dest,
634				   &fs->m_ext.h_dest, ETH_ALEN))
635				continue;
636		}
637
638		return true;
639	}
640
641	return false;
642}
643
644/* If no network filter found, return open filter.
645 * If no more open filters return NULL
646 */
647struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
648						  u32 loc, bool wake_filter,
649						  bool init)
650{
651	struct bcmasp_net_filter *nfilter = NULL;
652	struct bcmasp_priv *priv = intf->parent;
653	int i, open_index = -1;
654
655	/* Check whether we exceed the filter table capacity */
656	if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
657		return ERR_PTR(-EINVAL);
658
659	/* If the filter location is busy (already claimed) and we are initializing
660	 * the filter (insertion), return a busy error code.
661	 */
662	if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed)
663		return ERR_PTR(-EBUSY);
664
665	/* We need two filters for wake-up, so we cannot use an odd filter */
666	if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2))
667		return ERR_PTR(-EINVAL);
668
669	/* Initialize the loop index based on the desired location or from 0 */
670	i = loc == RX_CLS_LOC_ANY ? 0 : loc;
671
672	for ( ; i < NUM_NET_FILTERS; i++) {
673		/* Found matching network filter */
674		if (!init &&
675		    priv->net_filters[i].claimed &&
676		    priv->net_filters[i].hw_index == i &&
677		    priv->net_filters[i].port == intf->port)
678			return &priv->net_filters[i];
679
680		/* If we don't need a new filter or new filter already found */
681		if (!init || open_index >= 0)
682			continue;
683
684		/* Wake filter conslidates two filters to cover more bytes
685		 * Wake filter is open if...
686		 * 1. It is an even filter
687		 * 2. The current and next filter is not claimed
688		 */
689		if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed &&
690		    !priv->net_filters[i + 1].claimed)
691			open_index = i;
692		else if (!priv->net_filters[i].claimed)
693			open_index = i;
694	}
695
696	if (open_index >= 0) {
697		nfilter = &priv->net_filters[open_index];
698		nfilter->claimed = true;
699		nfilter->port = intf->port;
700		nfilter->hw_index = open_index;
701	}
702
703	if (wake_filter && open_index >= 0) {
704		/* Claim next filter */
705		priv->net_filters[open_index + 1].claimed = true;
706		priv->net_filters[open_index + 1].wake_filter = true;
707		nfilter->wake_filter = true;
708	}
709
710	return nfilter ? nfilter : ERR_PTR(-EINVAL);
711}
712
713void bcmasp_netfilt_release(struct bcmasp_intf *intf,
714			    struct bcmasp_net_filter *nfilt)
715{
716	struct bcmasp_priv *priv = intf->parent;
717
718	if (nfilt->wake_filter) {
719		memset(&priv->net_filters[nfilt->hw_index + 1], 0,
720		       sizeof(struct bcmasp_net_filter));
721	}
722
723	memset(nfilt, 0, sizeof(struct bcmasp_net_filter));
724}
725
726static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low)
727{
728	*high = (u32)(addr[0] << 8 | addr[1]);
729	*low = (u32)(addr[2] << 24 | addr[3] << 16 | addr[4] << 8 |
730		     addr[5]);
731}
732
733static void bcmasp_set_mda_filter(struct bcmasp_intf *intf,
734				  const unsigned char *addr,
735				  unsigned char *mask,
736				  unsigned int i)
737{
738	struct bcmasp_priv *priv = intf->parent;
739	u32 addr_h, addr_l, mask_h, mask_l;
740
741	/* Set local copy */
742	ether_addr_copy(priv->mda_filters[i].mask, mask);
743	ether_addr_copy(priv->mda_filters[i].addr, addr);
744
745	/* Write to HW */
746	bcmasp_addr_to_uint(priv->mda_filters[i].mask, &mask_h, &mask_l);
747	bcmasp_addr_to_uint(priv->mda_filters[i].addr, &addr_h, &addr_l);
748	rx_filter_core_wl(priv, addr_h, ASP_RX_FILTER_MDA_PAT_H(i));
749	rx_filter_core_wl(priv, addr_l, ASP_RX_FILTER_MDA_PAT_L(i));
750	rx_filter_core_wl(priv, mask_h, ASP_RX_FILTER_MDA_MSK_H(i));
751	rx_filter_core_wl(priv, mask_l, ASP_RX_FILTER_MDA_MSK_L(i));
752}
753
754static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
755				 unsigned int i)
756{
757	struct bcmasp_priv *priv = intf->parent;
758
759	if (priv->mda_filters[i].en == en)
760		return;
761
762	priv->mda_filters[i].en = en;
763	priv->mda_filters[i].port = intf->port;
764
765	rx_filter_core_wl(priv, ((intf->channel + 8) |
766			  (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
767			  ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
768			  ASP_RX_FILTER_MDA_CFG(i));
769}
770
771/* There are 32 MDA filters shared between all ports, we reserve 4 filters per
772 * port for the following.
773 * - Promisc: Filter to allow all packets when promisc is enabled
774 * - All Multicast
775 * - Broadcast
776 * - Own address
777 *
778 * The reserved filters are identified as so.
779 * - Promisc: (index * 4) + 0
780 * - All Multicast: (index * 4) + 1
781 * - Broadcast: (index * 4) + 2
782 * - Own address: (index * 4) + 3
783 */
784enum asp_rx_filter_id {
785	ASP_RX_FILTER_MDA_PROMISC = 0,
786	ASP_RX_FILTER_MDA_ALLMULTI,
787	ASP_RX_FILTER_MDA_BROADCAST,
788	ASP_RX_FILTER_MDA_OWN_ADDR,
789	ASP_RX_FILTER_MDA_RES_MAX,
790};
791
792#define ASP_RX_FILT_MDA(intf, name)	(((intf)->index * \
793					  ASP_RX_FILTER_MDA_RES_MAX) \
794					 + ASP_RX_FILTER_MDA_##name)
795
796static int bcmasp_total_res_mda_cnt(struct bcmasp_priv *priv)
797{
798	return list_count_nodes(&priv->intfs) * ASP_RX_FILTER_MDA_RES_MAX;
799}
800
801void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en)
802{
803	unsigned int i = ASP_RX_FILT_MDA(intf, PROMISC);
804	unsigned char promisc[ETH_ALEN];
805
806	eth_zero_addr(promisc);
807	/* Set mask to 00:00:00:00:00:00 to match all packets */
808	bcmasp_set_mda_filter(intf, promisc, promisc, i);
809	bcmasp_en_mda_filter(intf, en, i);
810}
811
812void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en)
813{
814	unsigned char allmulti[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
815	unsigned int i = ASP_RX_FILT_MDA(intf, ALLMULTI);
816
817	/* Set mask to 01:00:00:00:00:00 to match all multicast */
818	bcmasp_set_mda_filter(intf, allmulti, allmulti, i);
819	bcmasp_en_mda_filter(intf, en, i);
820}
821
822void bcmasp_set_broad(struct bcmasp_intf *intf, bool en)
823{
824	unsigned int i = ASP_RX_FILT_MDA(intf, BROADCAST);
825	unsigned char addr[ETH_ALEN];
826
827	eth_broadcast_addr(addr);
828	bcmasp_set_mda_filter(intf, addr, addr, i);
829	bcmasp_en_mda_filter(intf, en, i);
830}
831
832void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
833		      bool en)
834{
835	unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
836	unsigned int i = ASP_RX_FILT_MDA(intf, OWN_ADDR);
837
838	bcmasp_set_mda_filter(intf, addr, mask, i);
839	bcmasp_en_mda_filter(intf, en, i);
840}
841
842void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
843{
844	struct bcmasp_priv *priv = intf->parent;
845	unsigned int i;
846	int res_count;
847
848	res_count = bcmasp_total_res_mda_cnt(intf->parent);
849
850	/* Disable all filters held by this port */
851	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
852		if (priv->mda_filters[i].en &&
853		    priv->mda_filters[i].port == intf->port)
854			bcmasp_en_mda_filter(intf, 0, i);
855	}
856}
857
858static int bcmasp_combine_set_filter(struct bcmasp_intf *intf,
859				     unsigned char *addr, unsigned char *mask,
860				     int i)
861{
862	struct bcmasp_priv *priv = intf->parent;
863	u64 addr1, addr2, mask1, mask2, mask3;
864
865	/* Switch to u64 to help with the calculations */
866	addr1 = ether_addr_to_u64(priv->mda_filters[i].addr);
867	mask1 = ether_addr_to_u64(priv->mda_filters[i].mask);
868	addr2 = ether_addr_to_u64(addr);
869	mask2 = ether_addr_to_u64(mask);
870
871	/* Check if one filter resides within the other */
872	mask3 = mask1 & mask2;
873	if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) {
874		/* Filter 2 resides within filter 1, so everything is good */
875		return 0;
876	} else if (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) {
877		/* Filter 1 resides within filter 2, so swap filters */
878		bcmasp_set_mda_filter(intf, addr, mask, i);
879		return 0;
880	}
881
882	/* Unable to combine */
883	return -EINVAL;
884}
885
886int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
887			     unsigned char *mask)
888{
889	struct bcmasp_priv *priv = intf->parent;
890	int ret, res_count;
891	unsigned int i;
892
893	res_count = bcmasp_total_res_mda_cnt(intf->parent);
894
895	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
896		/* If filter not enabled or belongs to another port skip */
897		if (!priv->mda_filters[i].en ||
898		    priv->mda_filters[i].port != intf->port)
899			continue;
900
901		/* Attempt to combine filters */
902		ret = bcmasp_combine_set_filter(intf, addr, mask, i);
903		if (!ret) {
904			intf->mib.filters_combine_cnt++;
905			return 0;
906		}
907	}
908
909	/* Create new filter if possible */
910	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
911		if (priv->mda_filters[i].en)
912			continue;
913
914		bcmasp_set_mda_filter(intf, addr, mask, i);
915		bcmasp_en_mda_filter(intf, 1, i);
916		return 0;
917	}
918
919	/* No room for new filter */
920	return -EINVAL;
921}
922
923static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
924{
925	unsigned int i;
926
927	/* Disable all filters and reset software view since the HW
928	 * can lose context while in deep sleep suspend states
929	 */
930	for (i = 0; i < NUM_MDA_FILTERS; i++) {
931		rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
932		priv->mda_filters[i].en = 0;
933	}
934
935	for (i = 0; i < NUM_NET_FILTERS; i++)
936		rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
937
938	/* Top level filter enable bit should be enabled at all times, set
939	 * GEN_WAKE_CLEAR to clear the network filter wake-up which would
940	 * otherwise be sticky
941	 */
942	rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
943			  ASP_RX_FILTER_MDA_EN |
944			  ASP_RX_FILTER_GEN_WK_CLR |
945			  ASP_RX_FILTER_NT_FLT_EN),
946			  ASP_RX_FILTER_BLK_CTRL);
947}
948
949/* ASP core initialization */
950static void bcmasp_core_init(struct bcmasp_priv *priv)
951{
952	tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
953	rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
954
955	rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
956			 ASP_EDPKT_HDR_CFG);
957	rx_edpkt_core_wl(priv,
958			 (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
959			 ASP_EDPKT_ENDI);
960
961	rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
962	rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
963	rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
964
965	rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
966
967	/* Disable and clear both UniMAC's wake-up interrupts to avoid
968	 * sticky interrupts.
969	 */
970	_intr2_mask_set(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE);
971	intr2_core_wl(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE,
972		      ASP_INTR2_CLEAR);
973}
974
975static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
976{
977	u32 reg;
978
979	reg = ctrl_core_rl(priv, ASP_CTRL_CORE_CLOCK_SELECT);
980	if (slow)
981		reg &= ~ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
982	else
983		reg |= ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
984	ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
985}
986
987static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
988{
989	u32 reg;
990
991	reg = ctrl_core_rl(priv, ASP_CTRL_CLOCK_CTRL);
992	reg &= ~clr;
993	reg |= set;
994	ctrl_core_wl(priv, reg, ASP_CTRL_CLOCK_CTRL);
995
996	reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0);
997	reg &= ~clr;
998	reg |= set;
999	ctrl_core_wl(priv, reg, ASP_CTRL_SCRATCH_0);
1000}
1001
1002static void bcmasp_core_clock_set(struct bcmasp_priv *priv, u32 clr, u32 set)
1003{
1004	unsigned long flags;
1005
1006	spin_lock_irqsave(&priv->clk_lock, flags);
1007	bcmasp_core_clock_set_ll(priv, clr, set);
1008	spin_unlock_irqrestore(&priv->clk_lock, flags);
1009}
1010
1011void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en)
1012{
1013	u32 intf_mask = ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(intf->port);
1014	struct bcmasp_priv *priv = intf->parent;
1015	unsigned long flags;
1016	u32 reg;
1017
1018	/* When enabling an interface, if the RX or TX clocks were not enabled,
1019	 * enable them. Conversely, while disabling an interface, if this is
1020	 * the last one enabled, we can turn off the shared RX and TX clocks as
1021	 * well. We control enable bits which is why we test for equality on
1022	 * the RGMII clock bit mask.
1023	 */
1024	spin_lock_irqsave(&priv->clk_lock, flags);
1025	if (en) {
1026		intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
1027			     ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
1028		bcmasp_core_clock_set_ll(priv, intf_mask, 0);
1029	} else {
1030		reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask;
1031		if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) ==
1032		    ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK)
1033			intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
1034				     ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
1035		bcmasp_core_clock_set_ll(priv, 0, intf_mask);
1036	}
1037	spin_unlock_irqrestore(&priv->clk_lock, flags);
1038}
1039
1040static irqreturn_t bcmasp_isr_wol(int irq, void *data)
1041{
1042	struct bcmasp_priv *priv = data;
1043	u32 status;
1044
1045	/* No L3 IRQ, so we good */
1046	if (priv->wol_irq <= 0)
1047		goto irq_handled;
1048
1049	status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) &
1050		~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS);
1051	wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR);
1052
1053irq_handled:
1054	pm_wakeup_event(&priv->pdev->dev, 0);
1055	return IRQ_HANDLED;
1056}
1057
1058static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
1059{
1060	struct platform_device *pdev = priv->pdev;
1061	int irq, ret;
1062
1063	irq = platform_get_irq_optional(pdev, i);
1064	if (irq < 0)
1065		return irq;
1066
1067	ret = devm_request_irq(&pdev->dev, irq, bcmasp_isr_wol, 0,
1068			       pdev->name, priv);
1069	if (ret)
1070		return ret;
1071
1072	return irq;
1073}
1074
1075static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
1076{
1077	struct platform_device *pdev = priv->pdev;
1078	struct device *dev = &pdev->dev;
1079	int irq;
1080
1081	irq = bcmasp_get_and_request_irq(priv, 1);
1082	if (irq < 0) {
1083		dev_warn(dev, "Failed to init WoL irq: %d\n", irq);
1084		return;
1085	}
1086
1087	priv->wol_irq = irq;
1088	priv->wol_irq_enabled_mask = 0;
1089	device_set_wakeup_capable(&pdev->dev, 1);
1090}
1091
1092static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
1093{
1094	struct bcmasp_priv *priv = intf->parent;
1095	struct device *dev = &priv->pdev->dev;
1096
1097	if (en) {
1098		if (priv->wol_irq_enabled_mask) {
1099			set_bit(intf->port, &priv->wol_irq_enabled_mask);
1100			return;
1101		}
1102
1103		/* First enable */
1104		set_bit(intf->port, &priv->wol_irq_enabled_mask);
1105		enable_irq_wake(priv->wol_irq);
1106		device_set_wakeup_enable(dev, 1);
1107	} else {
1108		if (!priv->wol_irq_enabled_mask)
1109			return;
1110
1111		clear_bit(intf->port, &priv->wol_irq_enabled_mask);
1112		if (priv->wol_irq_enabled_mask)
1113			return;
1114
1115		/* Last disable */
1116		disable_irq_wake(priv->wol_irq);
1117		device_set_wakeup_enable(dev, 0);
1118	}
1119}
1120
1121static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
1122{
1123	if (priv->wol_irq > 0)
1124		free_irq(priv->wol_irq, priv);
1125}
1126
1127static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
1128{
1129	struct platform_device *pdev = priv->pdev;
1130	struct device *dev = &pdev->dev;
1131	struct bcmasp_intf *intf;
1132	int irq;
1133
1134	list_for_each_entry(intf, &priv->intfs, list) {
1135		irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
1136		if (irq < 0) {
1137			dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
1138				 intf->port, irq);
1139			continue;
1140		}
1141
1142		intf->wol_irq = irq;
1143		intf->wol_irq_enabled = false;
1144		device_set_wakeup_capable(&pdev->dev, 1);
1145	}
1146}
1147
1148static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
1149{
1150	struct device *dev = &intf->parent->pdev->dev;
1151
1152	if (en ^ intf->wol_irq_enabled)
1153		irq_set_irq_wake(intf->wol_irq, en);
1154
1155	intf->wol_irq_enabled = en;
1156	device_set_wakeup_enable(dev, en);
1157}
1158
1159static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
1160{
1161	struct bcmasp_intf *intf;
1162
1163	list_for_each_entry(intf, &priv->intfs, list) {
1164		if (intf->wol_irq > 0)
1165			free_irq(intf->wol_irq, priv);
1166	}
1167}
1168
1169static struct bcmasp_hw_info v20_hw_info = {
1170	.rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
1171	.umac2fb = UMAC2FB_OFFSET,
1172	.rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
1173	.rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
1174	.rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
1175};
1176
1177static const struct bcmasp_plat_data v20_plat_data = {
1178	.init_wol = bcmasp_init_wol_per_intf,
1179	.enable_wol = bcmasp_enable_wol_per_intf,
1180	.destroy_wol = bcmasp_wol_irq_destroy_per_intf,
1181	.hw_info = &v20_hw_info,
1182};
1183
1184static struct bcmasp_hw_info v21_hw_info = {
1185	.rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
1186	.umac2fb = UMAC2FB_OFFSET_2_1,
1187	.rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
1188	.rx_ctrl_fb_filt_out_frame_count =
1189		ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
1190	.rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
1191};
1192
1193static const struct bcmasp_plat_data v21_plat_data = {
1194	.init_wol = bcmasp_init_wol_shared,
1195	.enable_wol = bcmasp_enable_wol_shared,
1196	.destroy_wol = bcmasp_wol_irq_destroy_shared,
1197	.hw_info = &v21_hw_info,
1198};
1199
1200static const struct of_device_id bcmasp_of_match[] = {
1201	{ .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
1202	{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
1203	{ /* sentinel */ },
1204};
1205MODULE_DEVICE_TABLE(of, bcmasp_of_match);
1206
1207static const struct of_device_id bcmasp_mdio_of_match[] = {
1208	{ .compatible = "brcm,asp-v2.1-mdio", },
1209	{ .compatible = "brcm,asp-v2.0-mdio", },
1210	{ /* sentinel */ },
1211};
1212MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
1213
1214static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
1215{
1216	struct bcmasp_intf *intf, *n;
1217
1218	list_for_each_entry_safe(intf, n, &priv->intfs, list) {
1219		list_del(&intf->list);
1220		bcmasp_interface_destroy(intf);
1221	}
1222}
1223
1224static int bcmasp_probe(struct platform_device *pdev)
1225{
1226	struct device_node *ports_node, *intf_node;
1227	const struct bcmasp_plat_data *pdata;
1228	struct device *dev = &pdev->dev;
1229	struct bcmasp_priv *priv;
1230	struct bcmasp_intf *intf;
1231	int ret = 0, count = 0;
1232	unsigned int i;
1233
1234	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1235	if (!priv)
1236		return -ENOMEM;
1237
1238	priv->irq = platform_get_irq(pdev, 0);
1239	if (priv->irq <= 0)
1240		return -EINVAL;
1241
1242	priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp");
1243	if (IS_ERR(priv->clk))
1244		return dev_err_probe(dev, PTR_ERR(priv->clk),
1245				     "failed to request clock\n");
1246
1247	/* Base from parent node */
1248	priv->base = devm_platform_ioremap_resource(pdev, 0);
1249	if (IS_ERR(priv->base))
1250		return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n");
1251
1252	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1253	if (ret)
1254		return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret);
1255
1256	dev_set_drvdata(&pdev->dev, priv);
1257	priv->pdev = pdev;
1258	spin_lock_init(&priv->mda_lock);
1259	spin_lock_init(&priv->clk_lock);
1260	mutex_init(&priv->wol_lock);
1261	mutex_init(&priv->net_lock);
1262	INIT_LIST_HEAD(&priv->intfs);
1263
1264	pdata = device_get_match_data(&pdev->dev);
1265	if (!pdata)
1266		return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
1267
1268	priv->init_wol = pdata->init_wol;
1269	priv->enable_wol = pdata->enable_wol;
1270	priv->destroy_wol = pdata->destroy_wol;
1271	priv->hw_info = pdata->hw_info;
1272
1273	/* Enable all clocks to ensure successful probing */
1274	bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
1275
1276	/* Switch to the main clock */
1277	bcmasp_core_clock_select(priv, false);
1278
1279	bcmasp_intr2_mask_set_all(priv);
1280	bcmasp_intr2_clear_all(priv);
1281
1282	ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0,
1283			       pdev->name, priv);
1284	if (ret)
1285		return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret);
1286
1287	/* Register mdio child nodes */
1288	of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev);
1289
1290	/* ASP specific initialization, Needs to be done regardless of
1291	 * how many interfaces come up.
1292	 */
1293	bcmasp_core_init(priv);
1294	bcmasp_core_init_filters(priv);
1295
1296	ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
1297	if (!ports_node) {
1298		dev_warn(dev, "No ports found\n");
1299		return -EINVAL;
1300	}
1301
1302	i = 0;
1303	for_each_available_child_of_node(ports_node, intf_node) {
1304		intf = bcmasp_interface_create(priv, intf_node, i);
1305		if (!intf) {
1306			dev_err(dev, "Cannot create eth interface %d\n", i);
1307			bcmasp_remove_intfs(priv);
1308			of_node_put(intf_node);
1309			goto of_put_exit;
1310		}
1311		list_add_tail(&intf->list, &priv->intfs);
1312		i++;
1313	}
1314
1315	/* Check and enable WoL */
1316	priv->init_wol(priv);
1317
1318	/* Drop the clock reference count now and let ndo_open()/ndo_close()
1319	 * manage it for us from now on.
1320	 */
1321	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
1322
1323	clk_disable_unprepare(priv->clk);
1324
1325	/* Now do the registration of the network ports which will take care
1326	 * of managing the clock properly.
1327	 */
1328	list_for_each_entry(intf, &priv->intfs, list) {
1329		ret = register_netdev(intf->ndev);
1330		if (ret) {
1331			netdev_err(intf->ndev,
1332				   "failed to register net_device: %d\n", ret);
1333			priv->destroy_wol(priv);
1334			bcmasp_remove_intfs(priv);
1335			goto of_put_exit;
1336		}
1337		count++;
1338	}
1339
1340	dev_info(dev, "Initialized %d port(s)\n", count);
1341
1342of_put_exit:
1343	of_node_put(ports_node);
1344	return ret;
1345}
1346
1347static int bcmasp_remove(struct platform_device *pdev)
1348{
1349	struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev);
1350
1351	if (!priv)
1352		return 0;
1353
1354	priv->destroy_wol(priv);
1355	bcmasp_remove_intfs(priv);
1356
1357	return 0;
1358}
1359
1360static void bcmasp_shutdown(struct platform_device *pdev)
1361{
1362	bcmasp_remove(pdev);
1363}
1364
1365static int __maybe_unused bcmasp_suspend(struct device *d)
1366{
1367	struct bcmasp_priv *priv = dev_get_drvdata(d);
1368	struct bcmasp_intf *intf;
1369	int ret;
1370
1371	list_for_each_entry(intf, &priv->intfs, list) {
1372		ret = bcmasp_interface_suspend(intf);
1373		if (ret)
1374			break;
1375	}
1376
1377	ret = clk_prepare_enable(priv->clk);
1378	if (ret)
1379		return ret;
1380
1381	/* Whether Wake-on-LAN is enabled or not, we can always disable
1382	 * the shared TX clock
1383	 */
1384	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
1385
1386	bcmasp_core_clock_select(priv, true);
1387
1388	clk_disable_unprepare(priv->clk);
1389
1390	return ret;
1391}
1392
1393static int __maybe_unused bcmasp_resume(struct device *d)
1394{
1395	struct bcmasp_priv *priv = dev_get_drvdata(d);
1396	struct bcmasp_intf *intf;
1397	int ret;
1398
1399	ret = clk_prepare_enable(priv->clk);
1400	if (ret)
1401		return ret;
1402
1403	/* Switch to the main clock domain */
1404	bcmasp_core_clock_select(priv, false);
1405
1406	/* Re-enable all clocks for re-initialization */
1407	bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
1408
1409	bcmasp_core_init(priv);
1410	bcmasp_core_init_filters(priv);
1411
1412	/* And disable them to let the network devices take care of them */
1413	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
1414
1415	clk_disable_unprepare(priv->clk);
1416
1417	list_for_each_entry(intf, &priv->intfs, list) {
1418		ret = bcmasp_interface_resume(intf);
1419		if (ret)
1420			break;
1421	}
1422
1423	return ret;
1424}
1425
1426static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
1427			 bcmasp_suspend, bcmasp_resume);
1428
1429static struct platform_driver bcmasp_driver = {
1430	.probe = bcmasp_probe,
1431	.remove = bcmasp_remove,
1432	.shutdown = bcmasp_shutdown,
1433	.driver = {
1434		.name = "brcm,asp-v2",
1435		.of_match_table = bcmasp_of_match,
1436		.pm = &bcmasp_pm_ops,
1437	},
1438};
1439module_platform_driver(bcmasp_driver);
1440
1441MODULE_DESCRIPTION("Broadcom ASP 2.0 Ethernet controller driver");
1442MODULE_ALIAS("platform:brcm,asp-v2");
1443MODULE_LICENSE("GPL");
1444