1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/*
3 * Driver for Microsemi VSC85xx PHYs - timestamping and PHC support
4 *
5 * Authors: Quentin Schulz & Antoine Tenart
6 * License: Dual MIT/GPL
7 * Copyright (c) 2020 Microsemi Corporation
8 */
9
10#include <linux/gpio/consumer.h>
11#include <linux/ip.h>
12#include <linux/net_tstamp.h>
13#include <linux/mii.h>
14#include <linux/phy.h>
15#include <linux/ptp_classify.h>
16#include <linux/ptp_clock_kernel.h>
17#include <linux/udp.h>
18#include <asm/unaligned.h>
19
20#include "mscc.h"
21#include "mscc_ptp.h"
22
23/* Two PHYs share the same 1588 processor and it's to be entirely configured
24 * through the base PHY of this processor.
25 */
26/* phydev->bus->mdio_lock should be locked when using this function */
27static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val)
28{
29	struct vsc8531_private *priv = phydev->priv;
30
31	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
32	return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum,
33			       val);
34}
35
36/* phydev->bus->mdio_lock should be locked when using this function */
37static int phy_ts_base_read(struct phy_device *phydev, u32 regnum)
38{
39	struct vsc8531_private *priv = phydev->priv;
40
41	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
42	return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum);
43}
44
45enum ts_blk_hw {
46	INGRESS_ENGINE_0,
47	EGRESS_ENGINE_0,
48	INGRESS_ENGINE_1,
49	EGRESS_ENGINE_1,
50	INGRESS_ENGINE_2,
51	EGRESS_ENGINE_2,
52	PROCESSOR_0,
53	PROCESSOR_1,
54};
55
56enum ts_blk {
57	INGRESS,
58	EGRESS,
59	PROCESSOR,
60};
61
62static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk,
63			       u16 addr)
64{
65	struct vsc8531_private *priv = phydev->priv;
66	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
67	u32 val, cnt = 0;
68	enum ts_blk_hw blk_hw;
69
70	switch (blk) {
71	case INGRESS:
72		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
73		break;
74	case EGRESS:
75		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
76		break;
77	case PROCESSOR:
78	default:
79		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
80		break;
81	}
82
83	phy_lock_mdio_bus(phydev);
84
85	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
86
87	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
88			  BIU_ADDR_READ | BIU_BLK_ID(blk_hw) |
89			  BIU_CSR_ADDR(addr));
90
91	do {
92		val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
93	} while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
94
95	val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB);
96	val <<= 16;
97	val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB);
98
99	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
100
101	phy_unlock_mdio_bus(phydev);
102
103	return val;
104}
105
106static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
107				 u16 addr, u32 val)
108{
109	struct vsc8531_private *priv = phydev->priv;
110	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
111	u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16;
112	bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL ||
113		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK ||
114		     addr == MSCC_PHY_1588_VSC85XX_INT_MASK ||
115		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS ||
116		     addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) &&
117		    blk == PROCESSOR;
118	enum ts_blk_hw blk_hw;
119
120	switch (blk) {
121	case INGRESS:
122		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
123		break;
124	case EGRESS:
125		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
126		break;
127	case PROCESSOR:
128	default:
129		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
130		break;
131	}
132
133	phy_lock_mdio_bus(phydev);
134
135	bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
136
137	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
138
139	if (!cond || (cond && upper))
140		phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
141
142	phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
143
144	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
145			  BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) |
146			  BIU_CSR_ADDR(addr));
147
148	do {
149		reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
150	} while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
151
152	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
153
154	if (cond && upper)
155		phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass);
156
157	phy_unlock_mdio_bus(phydev);
158}
159
160/* Pick bytes from PTP header */
161#define PTP_HEADER_TRNSP_MSG		26
162#define PTP_HEADER_DOMAIN_NUM		25
163#define PTP_HEADER_BYTE_8_31(x)		(31 - (x))
164#define MAC_ADDRESS_BYTE(x)		((x) + (35 - ETH_ALEN + 1))
165
166static int vsc85xx_ts_fsb_init(struct phy_device *phydev)
167{
168	u8 sig_sel[16] = {};
169	signed char i, pos = 0;
170
171	/* Seq ID is 2B long and starts at 30th byte */
172	for (i = 1; i >= 0; i--)
173		sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i);
174
175	/* DomainNum */
176	sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM;
177
178	/* MsgType */
179	sig_sel[pos++] = PTP_HEADER_TRNSP_MSG;
180
181	/* MAC address is 6B long */
182	for (i = ETH_ALEN - 1; i >= 0; i--)
183		sig_sel[pos++] = MAC_ADDRESS_BYTE(i);
184
185	/* Fill the last bytes of the signature to reach a 16B signature */
186	for (; pos < ARRAY_SIZE(sig_sel); pos++)
187		sig_sel[pos] = PTP_HEADER_TRNSP_MSG;
188
189	for (i = 0; i <= 2; i++) {
190		u32 val = 0;
191
192		for (pos = i * 5 + 4; pos >= i * 5; pos--)
193			val = (val << 6) | sig_sel[pos];
194
195		vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i),
196				     val);
197	}
198
199	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3),
200			     sig_sel[15]);
201
202	return 0;
203}
204
205static const u32 vsc85xx_egr_latency[] = {
206	/* Copper Egress */
207	1272, /* 1000Mbps */
208	12516, /* 100Mbps */
209	125444, /* 10Mbps */
210	/* Fiber Egress */
211	1277, /* 1000Mbps */
212	12537, /* 100Mbps */
213};
214
215static const u32 vsc85xx_egr_latency_macsec[] = {
216	/* Copper Egress ON */
217	3496, /* 1000Mbps */
218	34760, /* 100Mbps */
219	347844, /* 10Mbps */
220	/* Fiber Egress ON */
221	3502, /* 1000Mbps */
222	34780, /* 100Mbps */
223};
224
225static const u32 vsc85xx_ingr_latency[] = {
226	/* Copper Ingress */
227	208, /* 1000Mbps */
228	304, /* 100Mbps */
229	2023, /* 10Mbps */
230	/* Fiber Ingress */
231	98, /* 1000Mbps */
232	197, /* 100Mbps */
233};
234
235static const u32 vsc85xx_ingr_latency_macsec[] = {
236	/* Copper Ingress */
237	2408, /* 1000Mbps */
238	22300, /* 100Mbps */
239	222009, /* 10Mbps */
240	/* Fiber Ingress */
241	2299, /* 1000Mbps */
242	22192, /* 100Mbps */
243};
244
245static void vsc85xx_ts_set_latencies(struct phy_device *phydev)
246{
247	u32 val, ingr_latency, egr_latency;
248	u8 idx;
249
250	/* No need to set latencies of packets if the PHY is not connected */
251	if (!phydev->link)
252		return;
253
254	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY,
255			     STALL_EGR_LATENCY(phydev->speed));
256
257	switch (phydev->speed) {
258	case SPEED_100:
259		idx = 1;
260		break;
261	case SPEED_1000:
262		idx = 0;
263		break;
264	default:
265		idx = 2;
266		break;
267	}
268
269	ingr_latency = IS_ENABLED(CONFIG_MACSEC) ?
270		vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx];
271	egr_latency = IS_ENABLED(CONFIG_MACSEC) ?
272		vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx];
273
274	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY,
275			     PTP_INGR_LOCAL_LATENCY(ingr_latency));
276
277	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
278				  MSCC_PHY_PTP_INGR_TSP_CTRL);
279	val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS;
280	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
281			     val);
282
283	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY,
284			     PTP_EGR_LOCAL_LATENCY(egr_latency));
285
286	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
287	val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS;
288	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
289}
290
291static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk)
292{
293	u8 i;
294
295	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0);
296	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
297			     IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2));
298	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0);
299	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM,
300			     IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2));
301	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0);
302	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0);
303	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0);
304
305	for (i = 0; i < COMP_MAX_FLOWS; i++) {
306		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i),
307				     IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1);
308		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i),
309				     IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1);
310		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i),
311				     ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1);
312		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i),
313				     ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1);
314		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i),
315				     MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1);
316
317		if (i >= PTP_COMP_MAX_FLOWS)
318			continue;
319
320		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0);
321		vsc85xx_ts_write_csr(phydev, blk,
322				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0);
323		vsc85xx_ts_write_csr(phydev, blk,
324				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0);
325		vsc85xx_ts_write_csr(phydev, blk,
326				     MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0);
327		vsc85xx_ts_write_csr(phydev, blk,
328				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0);
329		vsc85xx_ts_write_csr(phydev, blk,
330				     MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0);
331		vsc85xx_ts_write_csr(phydev, blk,
332				     MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0);
333		vsc85xx_ts_write_csr(phydev, blk,
334				     MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0);
335		vsc85xx_ts_write_csr(phydev, blk,
336				     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0);
337		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i),
338				     0);
339	}
340
341	return 0;
342}
343
344static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev)
345{
346	u32 val;
347
348	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT);
349	val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK;
350	val |= ANA_ETH1_NTX_PROT_SIG_OFF(0);
351	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
352
353	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG);
354	val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK;
355	val |= ANA_FSB_ADDR_FROM_ETH1;
356	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val);
357
358	return 0;
359}
360
361static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb,
362						struct iphdr *iphdr,
363						struct udphdr *udphdr)
364{
365	if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP)
366		return NULL;
367
368	return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN);
369}
370
371static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb)
372{
373	struct ethhdr *ethhdr = eth_hdr(skb);
374	struct udphdr *udphdr;
375	struct iphdr *iphdr;
376
377	if (ethhdr->h_proto == htons(ETH_P_1588))
378		return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) +
379						 skb_mac_header_len(skb));
380
381	if (ethhdr->h_proto != htons(ETH_P_IP))
382		return NULL;
383
384	iphdr = ip_hdr(skb);
385	udphdr = udp_hdr(skb);
386
387	return get_ptp_header_l4(skb, iphdr, udphdr);
388}
389
390static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb,
391						enum hwtstamp_rx_filters rx_filter)
392{
393	struct udphdr *udphdr;
394	struct iphdr *iphdr;
395
396	if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT)
397		return (struct vsc85xx_ptphdr *)skb->data;
398
399	iphdr = (struct iphdr *)skb->data;
400	udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4);
401
402	return get_ptp_header_l4(skb, iphdr, udphdr);
403}
404
405static int get_sig(struct sk_buff *skb, u8 *sig)
406{
407	struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb);
408	struct ethhdr *ethhdr = eth_hdr(skb);
409	unsigned int i;
410
411	if (!ptphdr)
412		return -EOPNOTSUPP;
413
414	sig[0] = (__force u16)ptphdr->seq_id >> 8;
415	sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0);
416	sig[2] = ptphdr->domain;
417	sig[3] = ptphdr->tsmt & GENMASK(3, 0);
418
419	memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN);
420
421	/* Fill the last bytes of the signature to reach a 16B signature */
422	for (i = 10; i < 16; i++)
423		sig[i] = ptphdr->tsmt & GENMASK(3, 0);
424
425	return 0;
426}
427
428static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
429{
430	struct skb_shared_hwtstamps shhwtstamps;
431	struct vsc85xx_ts_fifo fifo;
432	struct sk_buff *skb;
433	u8 skb_sig[16], *p;
434	int i, len;
435	u32 reg;
436
437	memset(&fifo, 0, sizeof(fifo));
438	p = (u8 *)&fifo;
439
440	reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
441				  MSCC_PHY_PTP_EGR_TS_FIFO(0));
442	if (reg & PTP_EGR_TS_FIFO_EMPTY)
443		return;
444
445	*p++ = reg & 0xff;
446	*p++ = (reg >> 8) & 0xff;
447
448	/* Read the current FIFO item. Reading FIFO6 pops the next one. */
449	for (i = 1; i < 7; i++) {
450		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
451					  MSCC_PHY_PTP_EGR_TS_FIFO(i));
452		*p++ = reg & 0xff;
453		*p++ = (reg >> 8) & 0xff;
454		*p++ = (reg >> 16) & 0xff;
455		*p++ = (reg >> 24) & 0xff;
456	}
457
458	len = skb_queue_len(&ptp->tx_queue);
459	if (len < 1)
460		return;
461
462	while (len--) {
463		skb = __skb_dequeue(&ptp->tx_queue);
464		if (!skb)
465			return;
466
467		/* Can't get the signature of the packet, won't ever
468		 * be able to have one so let's dequeue the packet.
469		 */
470		if (get_sig(skb, skb_sig) < 0) {
471			kfree_skb(skb);
472			continue;
473		}
474
475		/* Check if we found the signature we were looking for. */
476		if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) {
477			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
478			shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns);
479			skb_complete_tx_timestamp(skb, &shhwtstamps);
480
481			return;
482		}
483
484		/* Valid signature but does not match the one of the
485		 * packet in the FIFO right now, reschedule it for later
486		 * packets.
487		 */
488		__skb_queue_tail(&ptp->tx_queue, skb);
489	}
490}
491
492static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp)
493{
494	u32 reg;
495
496	do {
497		vsc85xx_dequeue_skb(ptp);
498
499		/* If other timestamps are available in the FIFO, process them. */
500		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
501					  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
502	} while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1);
503}
504
505static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
506{
507	struct vsc8531_private *vsc8531 = phydev->priv;
508	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
509	enum vsc85xx_ptp_msg_type msgs[] = {
510		PTP_MSG_TYPE_SYNC,
511		PTP_MSG_TYPE_DELAY_REQ
512	};
513	u32 val;
514	u8 i;
515
516	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
517		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
518				     base ? PTP_FLOW_VALID_CH0 :
519				     PTP_FLOW_VALID_CH1);
520
521		val = vsc85xx_ts_read_csr(phydev, blk,
522					  MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i));
523		val &= ~PTP_FLOW_DOMAIN_RANGE_ENA;
524		vsc85xx_ts_write_csr(phydev, blk,
525				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val);
526
527		vsc85xx_ts_write_csr(phydev, blk,
528				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i),
529				     msgs[i] << 24);
530
531		vsc85xx_ts_write_csr(phydev, blk,
532				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i),
533				     PTP_FLOW_MSG_TYPE_MASK);
534	}
535
536	return 0;
537}
538
539static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
540{
541	struct vsc8531_private *vsc8531 = phydev->priv;
542	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
543	u32 val;
544
545	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0);
546	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID,
547			     ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD));
548
549	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0),
550			     base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1);
551	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
552			     ANA_ETH1_FLOW_MATCH_VLAN_TAG2);
553	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
554	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0);
555	vsc85xx_ts_write_csr(phydev, blk,
556			     MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0);
557	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0);
558	vsc85xx_ts_write_csr(phydev, blk,
559			     MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0);
560
561	val = vsc85xx_ts_read_csr(phydev, blk,
562				  MSCC_ANA_ETH1_FLOW_MATCH_MODE(0));
563	val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK;
564	val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY;
565	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
566			     val);
567
568	return 0;
569}
570
571static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
572{
573	struct vsc8531_private *vsc8531 = phydev->priv;
574	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
575	u32 val;
576
577	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER,
578			     PTP_EV_PORT);
579	/* Match on dest port only, ignore src */
580	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER,
581			     0xffff);
582	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER,
583			     0);
584	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0);
585
586	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
587	val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK;
588	val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1;
589	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
590
591	/* Match all IPs */
592	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0);
593	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0);
594	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0),
595			     0);
596	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0),
597			     0);
598	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0),
599			     0);
600	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0),
601			     0);
602	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0);
603	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0);
604
605	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0);
606
607	return 0;
608}
609
610static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm)
611{
612	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
613	struct phy_device *phydev = ptp->phydev;
614	struct vsc8531_private *priv = phydev->priv;
615	u64 adj = 0;
616	u32 val;
617
618	if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL)
619		return 0;
620
621	adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm));
622	if (adj > 1000000000L)
623		adj = 1000000000L;
624
625	val = PTP_AUTO_ADJ_NS_ROLLOVER(adj);
626	val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS;
627
628	mutex_lock(&priv->phc_lock);
629
630	/* Update the ppb val in nano seconds to the auto adjust reg. */
631	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ,
632			     val);
633
634	/* The auto adjust update val is set to 0 after write operation. */
635	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
636	val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE;
637	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
638
639	mutex_unlock(&priv->phc_lock);
640
641	return 0;
642}
643
644static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
645{
646	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
647	struct phy_device *phydev = ptp->phydev;
648	struct vsc85xx_shared_private *shared =
649		(struct vsc85xx_shared_private *)phydev->shared->priv;
650	struct vsc8531_private *priv = phydev->priv;
651	u32 val;
652
653	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
654	val |= PTP_LTC_CTRL_SAVE_ENA;
655	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
656
657	/* Local Time Counter (LTC) is put in SAVE* regs on rising edge of
658	 * LOAD_SAVE pin.
659	 */
660	mutex_lock(&shared->gpio_lock);
661	gpiod_set_value(priv->load_save, 1);
662
663	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
664				  MSCC_PHY_PTP_LTC_SAVED_SEC_MSB);
665
666	ts->tv_sec = ((time64_t)val) << 32;
667
668	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
669				  MSCC_PHY_PTP_LTC_SAVED_SEC_LSB);
670	ts->tv_sec += val;
671
672	ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR,
673					  MSCC_PHY_PTP_LTC_SAVED_NS);
674
675	gpiod_set_value(priv->load_save, 0);
676	mutex_unlock(&shared->gpio_lock);
677
678	return 0;
679}
680
681static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
682{
683	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
684	struct phy_device *phydev = ptp->phydev;
685	struct vsc8531_private *priv = phydev->priv;
686
687	mutex_lock(&priv->phc_lock);
688	__vsc85xx_gettime(info, ts);
689	mutex_unlock(&priv->phc_lock);
690
691	return 0;
692}
693
694static int __vsc85xx_settime(struct ptp_clock_info *info,
695			     const struct timespec64 *ts)
696{
697	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
698	struct phy_device *phydev = ptp->phydev;
699	struct vsc85xx_shared_private *shared =
700		(struct vsc85xx_shared_private *)phydev->shared->priv;
701	struct vsc8531_private *priv = phydev->priv;
702	u32 val;
703
704	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
705			     PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
706	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
707			     PTP_LTC_LOAD_SEC_LSB(ts->tv_sec));
708	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS,
709			     PTP_LTC_LOAD_NS(ts->tv_nsec));
710
711	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
712	val |= PTP_LTC_CTRL_LOAD_ENA;
713	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
714
715	/* Local Time Counter (LTC) is set from LOAD* regs on rising edge of
716	 * LOAD_SAVE pin.
717	 */
718	mutex_lock(&shared->gpio_lock);
719	gpiod_set_value(priv->load_save, 1);
720
721	val &= ~PTP_LTC_CTRL_LOAD_ENA;
722	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
723
724	gpiod_set_value(priv->load_save, 0);
725	mutex_unlock(&shared->gpio_lock);
726
727	return 0;
728}
729
730static int vsc85xx_settime(struct ptp_clock_info *info,
731			   const struct timespec64 *ts)
732{
733	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
734	struct phy_device *phydev = ptp->phydev;
735	struct vsc8531_private *priv = phydev->priv;
736
737	mutex_lock(&priv->phc_lock);
738	__vsc85xx_settime(info, ts);
739	mutex_unlock(&priv->phc_lock);
740
741	return 0;
742}
743
744static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta)
745{
746	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
747	struct phy_device *phydev = ptp->phydev;
748	struct vsc8531_private *priv = phydev->priv;
749	u32 val;
750
751	/* Can't recover that big of an offset. Let's set the time directly. */
752	if (abs(delta) >= NSEC_PER_SEC) {
753		struct timespec64 ts;
754		u64 now;
755
756		mutex_lock(&priv->phc_lock);
757
758		__vsc85xx_gettime(info, &ts);
759		now = ktime_to_ns(timespec64_to_ktime(ts));
760		ts = ns_to_timespec64(now + delta);
761		__vsc85xx_settime(info, &ts);
762
763		mutex_unlock(&priv->phc_lock);
764
765		return 0;
766	}
767
768	mutex_lock(&priv->phc_lock);
769
770	val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ;
771	if (delta > 0)
772		val |= PTP_LTC_OFFSET_ADD;
773	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val);
774
775	mutex_unlock(&priv->phc_lock);
776
777	return 0;
778}
779
780static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk,
781				  u32 next_comp, u32 etype)
782{
783	u32 val;
784
785	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT);
786	val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK;
787	val |= next_comp;
788	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
789
790	val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) |
791		ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA;
792	vsc85xx_ts_write_csr(phydev, blk,
793			     MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val);
794
795	return 0;
796}
797
798static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk,
799				 u32 next_comp, u32 header)
800{
801	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP,
802			     ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) |
803			     next_comp);
804
805	return 0;
806}
807
808static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd)
809{
810	u32 val;
811
812	/* Check non-zero reserved field */
813	val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK;
814	vsc85xx_ts_write_csr(phydev, blk,
815			     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val);
816
817	val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) |
818	      PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) |
819	      PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ?
820					  PTP_NOP : cmd);
821	if (cmd == PTP_SAVE_IN_TS_FIFO)
822		val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME;
823	else if (cmd == PTP_WRITE_NS)
824		val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE |
825		       PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6);
826	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow),
827			     val);
828
829	if (cmd == PTP_WRITE_1588)
830		/* Rewrite timestamp directly in frame */
831		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) |
832		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10);
833	else if (cmd == PTP_SAVE_IN_TS_FIFO)
834		/* no rewrite */
835		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) |
836		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0);
837	else
838		/* Write in reserved field */
839		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) |
840		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4);
841	vsc85xx_ts_write_csr(phydev, blk,
842			     MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val);
843
844	return 0;
845}
846
847static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
848			    bool one_step, bool enable)
849{
850	enum vsc85xx_ptp_msg_type msgs[] = {
851		PTP_MSG_TYPE_SYNC,
852		PTP_MSG_TYPE_DELAY_REQ
853	};
854	u32 val;
855	u8 i;
856
857	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
858		if (blk == INGRESS)
859			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
860						   PTP_WRITE_NS);
861		else if (msgs[i] == PTP_MSG_TYPE_SYNC && one_step)
862			/* no need to know Sync t when sending in one_step */
863			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
864						   PTP_WRITE_1588);
865		else
866			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
867						   PTP_SAVE_IN_TS_FIFO);
868
869		val = vsc85xx_ts_read_csr(phydev, blk,
870					  MSCC_ANA_PTP_FLOW_ENA(i));
871		val &= ~PTP_FLOW_ENA;
872		if (enable)
873			val |= PTP_FLOW_ENA;
874		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
875				     val);
876	}
877
878	return 0;
879}
880
881static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
882			     bool enable)
883{
884	struct vsc8531_private *vsc8531 = phydev->priv;
885	u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST;
886
887	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
888		/* PTP over Ethernet multicast address for SYNC and DELAY msg */
889		u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00};
890
891		val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR |
892		       get_unaligned_be16(&ptp_multicast[4]);
893		vsc85xx_ts_write_csr(phydev, blk,
894				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
895		vsc85xx_ts_write_csr(phydev, blk,
896				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0),
897				     get_unaligned_be32(ptp_multicast));
898	} else {
899		val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
900		vsc85xx_ts_write_csr(phydev, blk,
901				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
902		vsc85xx_ts_write_csr(phydev, blk,
903				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
904	}
905
906	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0));
907	val &= ~ETH1_FLOW_ENA;
908	if (enable)
909		val |= ETH1_FLOW_ENA;
910	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val);
911
912	return 0;
913}
914
915static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
916			    bool enable)
917{
918	u32 val;
919
920	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE,
921			     ANA_IP1_NXT_PROT_IPV4 |
922			     ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4);
923
924	/* Matching UDP protocol number */
925	val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) |
926	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) |
927	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9);
928	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1,
929			     val);
930
931	/* End of IP protocol, start of next protocol (UDP) */
932	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2,
933			     ANA_IP1_NXT_PROT_OFFSET2(20));
934
935	val = vsc85xx_ts_read_csr(phydev, blk,
936				  MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM);
937	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK |
938		 IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK);
939	val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2);
940
941	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE |
942		 IP1_NXT_PROT_UDP_CHKSUM_CLEAR);
943	/* UDP checksum offset in IPv4 packet
944	 * according to: https://tools.ietf.org/html/rfc768
945	 */
946	val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
947	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
948			     val);
949
950	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
951	val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA);
952	val |= IP1_FLOW_MATCH_DEST_SRC_ADDR;
953	if (enable)
954		val |= IP1_FLOW_ENA;
955	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
956
957	return 0;
958}
959
960static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step)
961{
962	struct vsc8531_private *vsc8531 = phydev->priv;
963	bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr;
964	u8 eng_id = base ? 0 : 1;
965	u32 val;
966
967	ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
968
969	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
970				  MSCC_PHY_PTP_ANALYZER_MODE);
971	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
972	val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) |
973		 PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)));
974	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
975			     val);
976
977	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
978		vsc85xx_eth1_next_comp(phydev, INGRESS,
979				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
980		vsc85xx_eth1_next_comp(phydev, EGRESS,
981				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
982	} else {
983		vsc85xx_eth1_next_comp(phydev, INGRESS,
984				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
985				       ETH_P_IP);
986		vsc85xx_eth1_next_comp(phydev, EGRESS,
987				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
988				       ETH_P_IP);
989		/* Header length of IPv[4/6] + UDP */
990		vsc85xx_ip1_next_comp(phydev, INGRESS,
991				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
992		vsc85xx_ip1_next_comp(phydev, EGRESS,
993				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
994	}
995
996	vsc85xx_eth1_conf(phydev, INGRESS,
997			  vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
998	vsc85xx_ip1_conf(phydev, INGRESS,
999			 ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1000	vsc85xx_ptp_conf(phydev, INGRESS, one_step,
1001			 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1002
1003	vsc85xx_eth1_conf(phydev, EGRESS,
1004			  vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1005	vsc85xx_ip1_conf(phydev, EGRESS,
1006			 ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1007	vsc85xx_ptp_conf(phydev, EGRESS, one_step,
1008			 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1009
1010	val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1011	if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF)
1012		val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1013
1014	val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1015	if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE)
1016		val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1017
1018	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1019			     val);
1020
1021	return 0;
1022}
1023
1024void vsc85xx_link_change_notify(struct phy_device *phydev)
1025{
1026	struct vsc8531_private *priv = phydev->priv;
1027
1028	mutex_lock(&priv->ts_lock);
1029	vsc85xx_ts_set_latencies(phydev);
1030	mutex_unlock(&priv->ts_lock);
1031}
1032
1033static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
1034{
1035	u32 val;
1036
1037	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1038				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1039	val |= PTP_EGR_TS_FIFO_RESET;
1040	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1041			     val);
1042
1043	val &= ~PTP_EGR_TS_FIFO_RESET;
1044	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1045			     val);
1046}
1047
1048static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
1049{
1050	struct vsc8531_private *vsc8531 =
1051		container_of(mii_ts, struct vsc8531_private, mii_ts);
1052	struct phy_device *phydev = vsc8531->ptp->phydev;
1053	struct hwtstamp_config cfg;
1054	bool one_step = false;
1055	u32 val;
1056
1057	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1058		return -EFAULT;
1059
1060	if (cfg.flags)
1061		return -EINVAL;
1062
1063	switch (cfg.tx_type) {
1064	case HWTSTAMP_TX_ONESTEP_SYNC:
1065		one_step = true;
1066		break;
1067	case HWTSTAMP_TX_ON:
1068		break;
1069	case HWTSTAMP_TX_OFF:
1070		break;
1071	default:
1072		return -ERANGE;
1073	}
1074
1075	vsc8531->ptp->tx_type = cfg.tx_type;
1076
1077	switch (cfg.rx_filter) {
1078	case HWTSTAMP_FILTER_NONE:
1079		break;
1080	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1081		/* ETH->IP->UDP->PTP */
1082		break;
1083	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1084		/* ETH->PTP */
1085		break;
1086	default:
1087		return -ERANGE;
1088	}
1089
1090	vsc8531->ptp->rx_filter = cfg.rx_filter;
1091
1092	mutex_lock(&vsc8531->ts_lock);
1093
1094	__skb_queue_purge(&vsc8531->ptp->tx_queue);
1095	__skb_queue_head_init(&vsc8531->ptp->tx_queue);
1096
1097	/* Disable predictor while configuring the 1588 block */
1098	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1099				  MSCC_PHY_PTP_INGR_PREDICTOR);
1100	val &= ~PTP_INGR_PREDICTOR_EN;
1101	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1102			     val);
1103	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1104				  MSCC_PHY_PTP_EGR_PREDICTOR);
1105	val &= ~PTP_EGR_PREDICTOR_EN;
1106	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1107			     val);
1108
1109	/* Bypass egress or ingress blocks if timestamping isn't used */
1110	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1111	val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS);
1112	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1113		val |= PTP_IFACE_CTRL_EGR_BYPASS;
1114	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE)
1115		val |= PTP_IFACE_CTRL_INGR_BYPASS;
1116	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1117
1118	/* Resetting FIFO so that it's empty after reconfiguration */
1119	vsc85xx_ts_reset_fifo(phydev);
1120
1121	vsc85xx_ts_engine_init(phydev, one_step);
1122
1123	/* Re-enable predictors now */
1124	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1125				  MSCC_PHY_PTP_INGR_PREDICTOR);
1126	val |= PTP_INGR_PREDICTOR_EN;
1127	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1128			     val);
1129	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1130				  MSCC_PHY_PTP_EGR_PREDICTOR);
1131	val |= PTP_EGR_PREDICTOR_EN;
1132	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1133			     val);
1134
1135	vsc8531->ptp->configured = 1;
1136	mutex_unlock(&vsc8531->ts_lock);
1137
1138	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1139}
1140
1141static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
1142			   struct ethtool_ts_info *info)
1143{
1144	struct vsc8531_private *vsc8531 =
1145		container_of(mii_ts, struct vsc8531_private, mii_ts);
1146
1147	info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock);
1148	info->so_timestamping =
1149		SOF_TIMESTAMPING_TX_HARDWARE |
1150		SOF_TIMESTAMPING_RX_HARDWARE |
1151		SOF_TIMESTAMPING_RAW_HARDWARE;
1152	info->tx_types =
1153		(1 << HWTSTAMP_TX_OFF) |
1154		(1 << HWTSTAMP_TX_ON) |
1155		(1 << HWTSTAMP_TX_ONESTEP_SYNC);
1156	info->rx_filters =
1157		(1 << HWTSTAMP_FILTER_NONE) |
1158		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1159		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1160
1161	return 0;
1162}
1163
1164static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
1165			     struct sk_buff *skb, int type)
1166{
1167	struct vsc8531_private *vsc8531 =
1168		container_of(mii_ts, struct vsc8531_private, mii_ts);
1169
1170	if (!vsc8531->ptp->configured)
1171		return;
1172
1173	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
1174		kfree_skb(skb);
1175		return;
1176	}
1177
1178	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1179
1180	mutex_lock(&vsc8531->ts_lock);
1181	__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
1182	mutex_unlock(&vsc8531->ts_lock);
1183}
1184
1185static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
1186			     struct sk_buff *skb, int type)
1187{
1188	struct vsc8531_private *vsc8531 =
1189		container_of(mii_ts, struct vsc8531_private, mii_ts);
1190	struct skb_shared_hwtstamps *shhwtstamps = NULL;
1191	struct vsc85xx_ptphdr *ptphdr;
1192	struct timespec64 ts;
1193	unsigned long ns;
1194
1195	if (!vsc8531->ptp->configured)
1196		return false;
1197
1198	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE ||
1199	    type == PTP_CLASS_NONE)
1200		return false;
1201
1202	vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
1203
1204	ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
1205	if (!ptphdr)
1206		return false;
1207
1208	shhwtstamps = skb_hwtstamps(skb);
1209	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1210
1211	ns = ntohl(ptphdr->rsrvd2);
1212
1213	/* nsec is in reserved field */
1214	if (ts.tv_nsec < ns)
1215		ts.tv_sec--;
1216
1217	shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
1218	netif_rx_ni(skb);
1219
1220	return true;
1221}
1222
1223static const struct ptp_clock_info vsc85xx_clk_caps = {
1224	.owner		= THIS_MODULE,
1225	.name		= "VSC85xx timer",
1226	.max_adj	= S32_MAX,
1227	.n_alarm	= 0,
1228	.n_pins		= 0,
1229	.n_ext_ts	= 0,
1230	.n_per_out	= 0,
1231	.pps		= 0,
1232	.adjtime        = &vsc85xx_adjtime,
1233	.adjfine	= &vsc85xx_adjfine,
1234	.gettime64	= &vsc85xx_gettime,
1235	.settime64	= &vsc85xx_settime,
1236};
1237
1238static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
1239{
1240	struct vsc8531_private *vsc8531 = phydev->priv;
1241
1242	if (vsc8531->ts_base_addr != phydev->mdio.addr) {
1243		struct mdio_device *dev;
1244
1245		dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr];
1246		phydev = container_of(dev, struct phy_device, mdio);
1247
1248		return phydev->priv;
1249	}
1250
1251	return vsc8531;
1252}
1253
1254static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev)
1255{
1256	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1257
1258	return vsc8531->input_clk_init;
1259}
1260
1261static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
1262{
1263	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1264
1265	vsc8531->input_clk_init = true;
1266}
1267
1268static int __vsc8584_init_ptp(struct phy_device *phydev)
1269{
1270	struct vsc8531_private *vsc8531 = phydev->priv;
1271	u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
1272	u8  ltc_seq_a[] = { 8, 6, 5, 4, 2 };
1273	u32 val;
1274
1275	if (!vsc8584_is_1588_input_clk_configured(phydev)) {
1276		phy_lock_mdio_bus(phydev);
1277
1278		/* 1588_DIFF_INPUT_CLK configuration: Use an external clock for
1279		 * the LTC, as per 3.13.29 in the VSC8584 datasheet.
1280		 */
1281		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1282				  MSCC_PHY_PAGE_1588);
1283		phy_ts_base_write(phydev, 29, 0x7ae0);
1284		phy_ts_base_write(phydev, 30, 0xb71c);
1285		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1286				  MSCC_PHY_PAGE_STANDARD);
1287
1288		phy_unlock_mdio_bus(phydev);
1289
1290		vsc8584_set_input_clk_configured(phydev);
1291	}
1292
1293	/* Disable predictor before configuring the 1588 block */
1294	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1295				  MSCC_PHY_PTP_INGR_PREDICTOR);
1296	val &= ~PTP_INGR_PREDICTOR_EN;
1297	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1298			     val);
1299	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1300				  MSCC_PHY_PTP_EGR_PREDICTOR);
1301	val &= ~PTP_EGR_PREDICTOR_EN;
1302	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1303			     val);
1304
1305	/* By default, the internal clock of fixed rate 250MHz is used */
1306	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
1307	val &= ~PTP_LTC_CTRL_CLK_SEL_MASK;
1308	val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250;
1309	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
1310
1311	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE);
1312	val &= ~PTP_LTC_SEQUENCE_A_MASK;
1313	val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]);
1314	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val);
1315
1316	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ);
1317	val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB);
1318	if (ltc_seq_e[PHC_CLK_250MHZ])
1319		val |= PTP_LTC_SEQ_ADD_SUB;
1320	val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]);
1321	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val);
1322
1323	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ,
1324			     PPS_WIDTH_ADJ);
1325
1326	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO,
1327			     IS_ENABLED(CONFIG_MACSEC) ?
1328			     PTP_INGR_DELAY_FIFO_DEPTH_MACSEC :
1329			     PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT);
1330
1331	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO,
1332			     IS_ENABLED(CONFIG_MACSEC) ?
1333			     PTP_EGR_DELAY_FIFO_DEPTH_MACSEC :
1334			     PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT);
1335
1336	/* Enable n-phase sampler for Viper Rev-B */
1337	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1338				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1339	val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS |
1340		 PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS |
1341		 PTP_ACCUR_LOAD_SAVE_BYPASS);
1342	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1343	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1344	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1345	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1346	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1347	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1348			     val);
1349
1350	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1351				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1352	val |= PTP_ACCUR_CALIB_TRIGG;
1353	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1354			     val);
1355
1356	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1357				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1358	val &= ~PTP_ACCUR_CALIB_TRIGG;
1359	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1360	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1361	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1362	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1363	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1364	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1365			     val);
1366
1367	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1368				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1369	val |= PTP_ACCUR_CALIB_TRIGG;
1370	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1371			     val);
1372
1373	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1374				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1375	val &= ~PTP_ACCUR_CALIB_TRIGG;
1376	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1377			     val);
1378
1379	/* Do not access FIFO via SI */
1380	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1381				  MSCC_PHY_PTP_TSTAMP_FIFO_SI);
1382	val &= ~PTP_TSTAMP_FIFO_SI_EN;
1383	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI,
1384			     val);
1385
1386	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1387				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1388	val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE;
1389	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1390			     val);
1391	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1392				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1393	val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE;
1394	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1395			     val);
1396
1397	/* Put the flag that indicates the frame has been modified to bit 7 */
1398	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1399				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1400	val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL;
1401	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1402			     val);
1403	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1404				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1405	val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7);
1406	val &= ~PTP_EGR_REWRITER_FLAG_VAL;
1407	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1408			     val);
1409
1410	/* 30bit mode for RX timestamp, only the nanoseconds are kept in
1411	 * reserved field.
1412	 */
1413	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1414				  MSCC_PHY_PTP_INGR_TSP_CTRL);
1415	val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS;
1416	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
1417			     val);
1418
1419	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
1420	val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS;
1421	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
1422
1423	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1424				  MSCC_PHY_PTP_SERIAL_TOD_IFACE);
1425	val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR;
1426	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE,
1427			     val);
1428
1429	vsc85xx_ts_fsb_init(phydev);
1430
1431	/* Set the Egress timestamp FIFO configuration and status register */
1432	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1433				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1434	val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK);
1435	/* 16 bytes for the signature, 10 for the timestamp in the TS FIFO */
1436	val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7);
1437	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1438			     val);
1439
1440	vsc85xx_ts_reset_fifo(phydev);
1441
1442	val = PTP_IFACE_CTRL_CLK_ENA;
1443	if (!IS_ENABLED(CONFIG_MACSEC))
1444		val |= PTP_IFACE_CTRL_GMII_PROT;
1445	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1446
1447	vsc85xx_ts_set_latencies(phydev);
1448
1449	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE);
1450
1451	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1452	val |= PTP_IFACE_CTRL_EGR_BYPASS;
1453	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1454
1455	vsc85xx_ts_disable_flows(phydev, EGRESS);
1456	vsc85xx_ts_disable_flows(phydev, INGRESS);
1457
1458	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1459				  MSCC_PHY_PTP_ANALYZER_MODE);
1460	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
1461	val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK |
1462		 PTP_ANALYZER_MODE_INGR_ENA_MASK |
1463		 PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK |
1464		 PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK);
1465	/* Strict matching in flow (packets should match flows from the same
1466	 * index in all enabled comparators (except PTP)).
1467	 */
1468	val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) |
1469	       PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7);
1470	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1471			     val);
1472
1473	/* Initialized for ingress and egress flows:
1474	 * - The Ethernet comparator.
1475	 * - The IP comparator.
1476	 * - The PTP comparator.
1477	 */
1478	vsc85xx_eth_cmp1_init(phydev, INGRESS);
1479	vsc85xx_ip_cmp1_init(phydev, INGRESS);
1480	vsc85xx_ptp_cmp_init(phydev, INGRESS);
1481	vsc85xx_eth_cmp1_init(phydev, EGRESS);
1482	vsc85xx_ip_cmp1_init(phydev, EGRESS);
1483	vsc85xx_ptp_cmp_init(phydev, EGRESS);
1484
1485	vsc85xx_ts_eth_cmp1_sig(phydev);
1486
1487	vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
1488	vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
1489	vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
1490	vsc8531->mii_ts.ts_info  = vsc85xx_ts_info;
1491	phydev->mii_ts = &vsc8531->mii_ts;
1492
1493	memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
1494
1495	vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
1496						     &phydev->mdio.dev);
1497	return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
1498}
1499
1500void vsc8584_config_ts_intr(struct phy_device *phydev)
1501{
1502	struct vsc8531_private *priv = phydev->priv;
1503
1504	mutex_lock(&priv->ts_lock);
1505	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK,
1506			     VSC85XX_1588_INT_MASK_MASK);
1507	mutex_unlock(&priv->ts_lock);
1508}
1509
1510int vsc8584_ptp_init(struct phy_device *phydev)
1511{
1512	switch (phydev->phy_id & phydev->drv->phy_id_mask) {
1513	case PHY_ID_VSC8575:
1514	case PHY_ID_VSC8582:
1515	case PHY_ID_VSC8584:
1516		return __vsc8584_init_ptp(phydev);
1517	}
1518
1519	return 0;
1520}
1521
1522irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
1523{
1524	struct vsc8531_private *priv = phydev->priv;
1525	int rc;
1526
1527	mutex_lock(&priv->ts_lock);
1528	rc = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1529				 MSCC_PHY_1588_VSC85XX_INT_STATUS);
1530	/* Ack the PTP interrupt */
1531	vsc85xx_ts_write_csr(phydev, PROCESSOR,
1532			     MSCC_PHY_1588_VSC85XX_INT_STATUS, rc);
1533
1534	if (!(rc & VSC85XX_1588_INT_MASK_MASK)) {
1535		mutex_unlock(&priv->ts_lock);
1536		return IRQ_NONE;
1537	}
1538
1539	if (rc & VSC85XX_1588_INT_FIFO_ADD) {
1540		vsc85xx_get_tx_ts(priv->ptp);
1541	} else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
1542		__skb_queue_purge(&priv->ptp->tx_queue);
1543		vsc85xx_ts_reset_fifo(phydev);
1544	}
1545
1546	mutex_unlock(&priv->ts_lock);
1547	return IRQ_HANDLED;
1548}
1549
1550int vsc8584_ptp_probe(struct phy_device *phydev)
1551{
1552	struct vsc8531_private *vsc8531 = phydev->priv;
1553
1554	vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp),
1555				    GFP_KERNEL);
1556	if (!vsc8531->ptp)
1557		return -ENOMEM;
1558
1559	mutex_init(&vsc8531->phc_lock);
1560	mutex_init(&vsc8531->ts_lock);
1561
1562	/* Retrieve the shared load/save GPIO. Request it as non exclusive as
1563	 * the same GPIO can be requested by all the PHYs of the same package.
1564	 * This GPIO must be used with the gpio_lock taken (the lock is shared
1565	 * between all PHYs).
1566	 */
1567	vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save",
1568						     GPIOD_FLAGS_BIT_NONEXCLUSIVE |
1569						     GPIOD_OUT_LOW);
1570	if (IS_ERR(vsc8531->load_save)) {
1571		phydev_err(phydev, "Can't get load-save GPIO (%ld)\n",
1572			   PTR_ERR(vsc8531->load_save));
1573		return PTR_ERR(vsc8531->load_save);
1574	}
1575
1576	vsc8531->ptp->phydev = phydev;
1577
1578	return 0;
1579}
1580
1581int vsc8584_ptp_probe_once(struct phy_device *phydev)
1582{
1583	struct vsc85xx_shared_private *shared =
1584		(struct vsc85xx_shared_private *)phydev->shared->priv;
1585
1586	/* Initialize shared GPIO lock */
1587	mutex_init(&shared->gpio_lock);
1588
1589	return 0;
1590}
1591