1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/*
3 * Driver for Microsemi VSC85xx PHYs - timestamping and PHC support
4 *
5 * Authors: Quentin Schulz & Antoine Tenart
6 * License: Dual MIT/GPL
7 * Copyright (c) 2020 Microsemi Corporation
8 */
9
10#include <linux/gpio/consumer.h>
11#include <linux/ip.h>
12#include <linux/net_tstamp.h>
13#include <linux/mii.h>
14#include <linux/phy.h>
15#include <linux/ptp_classify.h>
16#include <linux/ptp_clock_kernel.h>
17#include <linux/udp.h>
18#include <asm/unaligned.h>
19
20#include "mscc.h"
21#include "mscc_ptp.h"
22
23/* Two PHYs share the same 1588 processor and it's to be entirely configured
24 * through the base PHY of this processor.
25 */
26/* phydev->bus->mdio_lock should be locked when using this function */
27static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val)
28{
29	struct vsc8531_private *priv = phydev->priv;
30
31	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
32	return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum,
33			       val);
34}
35
36/* phydev->bus->mdio_lock should be locked when using this function */
37static int phy_ts_base_read(struct phy_device *phydev, u32 regnum)
38{
39	struct vsc8531_private *priv = phydev->priv;
40
41	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
42	return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum);
43}
44
45enum ts_blk_hw {
46	INGRESS_ENGINE_0,
47	EGRESS_ENGINE_0,
48	INGRESS_ENGINE_1,
49	EGRESS_ENGINE_1,
50	INGRESS_ENGINE_2,
51	EGRESS_ENGINE_2,
52	PROCESSOR_0,
53	PROCESSOR_1,
54};
55
56enum ts_blk {
57	INGRESS,
58	EGRESS,
59	PROCESSOR,
60};
61
62static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk,
63			       u16 addr)
64{
65	struct vsc8531_private *priv = phydev->priv;
66	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
67	u32 val, cnt = 0;
68	enum ts_blk_hw blk_hw;
69
70	switch (blk) {
71	case INGRESS:
72		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
73		break;
74	case EGRESS:
75		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
76		break;
77	case PROCESSOR:
78	default:
79		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
80		break;
81	}
82
83	phy_lock_mdio_bus(phydev);
84
85	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
86
87	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
88			  BIU_ADDR_READ | BIU_BLK_ID(blk_hw) |
89			  BIU_CSR_ADDR(addr));
90
91	do {
92		val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
93	} while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
94
95	val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB);
96	val <<= 16;
97	val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB);
98
99	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
100
101	phy_unlock_mdio_bus(phydev);
102
103	return val;
104}
105
106static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
107				 u16 addr, u32 val)
108{
109	struct vsc8531_private *priv = phydev->priv;
110	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
111	u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16;
112	bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL ||
113		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK ||
114		     addr == MSCC_PHY_1588_VSC85XX_INT_MASK ||
115		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS ||
116		     addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) &&
117		    blk == PROCESSOR;
118	enum ts_blk_hw blk_hw;
119
120	switch (blk) {
121	case INGRESS:
122		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
123		break;
124	case EGRESS:
125		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
126		break;
127	case PROCESSOR:
128	default:
129		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
130		break;
131	}
132
133	phy_lock_mdio_bus(phydev);
134
135	bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
136
137	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
138
139	if (!cond || upper)
140		phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
141
142	phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
143
144	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
145			  BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) |
146			  BIU_CSR_ADDR(addr));
147
148	do {
149		reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
150	} while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
151
152	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
153
154	if (cond && upper)
155		phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass);
156
157	phy_unlock_mdio_bus(phydev);
158}
159
160/* Pick bytes from PTP header */
161#define PTP_HEADER_TRNSP_MSG		26
162#define PTP_HEADER_DOMAIN_NUM		25
163#define PTP_HEADER_BYTE_8_31(x)		(31 - (x))
164#define MAC_ADDRESS_BYTE(x)		((x) + (35 - ETH_ALEN + 1))
165
166static int vsc85xx_ts_fsb_init(struct phy_device *phydev)
167{
168	u8 sig_sel[16] = {};
169	signed char i, pos = 0;
170
171	/* Seq ID is 2B long and starts at 30th byte */
172	for (i = 1; i >= 0; i--)
173		sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i);
174
175	/* DomainNum */
176	sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM;
177
178	/* MsgType */
179	sig_sel[pos++] = PTP_HEADER_TRNSP_MSG;
180
181	/* MAC address is 6B long */
182	for (i = ETH_ALEN - 1; i >= 0; i--)
183		sig_sel[pos++] = MAC_ADDRESS_BYTE(i);
184
185	/* Fill the last bytes of the signature to reach a 16B signature */
186	for (; pos < ARRAY_SIZE(sig_sel); pos++)
187		sig_sel[pos] = PTP_HEADER_TRNSP_MSG;
188
189	for (i = 0; i <= 2; i++) {
190		u32 val = 0;
191
192		for (pos = i * 5 + 4; pos >= i * 5; pos--)
193			val = (val << 6) | sig_sel[pos];
194
195		vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i),
196				     val);
197	}
198
199	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3),
200			     sig_sel[15]);
201
202	return 0;
203}
204
205static const u32 vsc85xx_egr_latency[] = {
206	/* Copper Egress */
207	1272, /* 1000Mbps */
208	12516, /* 100Mbps */
209	125444, /* 10Mbps */
210	/* Fiber Egress */
211	1277, /* 1000Mbps */
212	12537, /* 100Mbps */
213};
214
215static const u32 vsc85xx_egr_latency_macsec[] = {
216	/* Copper Egress ON */
217	3496, /* 1000Mbps */
218	34760, /* 100Mbps */
219	347844, /* 10Mbps */
220	/* Fiber Egress ON */
221	3502, /* 1000Mbps */
222	34780, /* 100Mbps */
223};
224
225static const u32 vsc85xx_ingr_latency[] = {
226	/* Copper Ingress */
227	208, /* 1000Mbps */
228	304, /* 100Mbps */
229	2023, /* 10Mbps */
230	/* Fiber Ingress */
231	98, /* 1000Mbps */
232	197, /* 100Mbps */
233};
234
235static const u32 vsc85xx_ingr_latency_macsec[] = {
236	/* Copper Ingress */
237	2408, /* 1000Mbps */
238	22300, /* 100Mbps */
239	222009, /* 10Mbps */
240	/* Fiber Ingress */
241	2299, /* 1000Mbps */
242	22192, /* 100Mbps */
243};
244
245static void vsc85xx_ts_set_latencies(struct phy_device *phydev)
246{
247	u32 val, ingr_latency, egr_latency;
248	u8 idx;
249
250	/* No need to set latencies of packets if the PHY is not connected */
251	if (!phydev->link)
252		return;
253
254	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY,
255			     STALL_EGR_LATENCY(phydev->speed));
256
257	switch (phydev->speed) {
258	case SPEED_100:
259		idx = 1;
260		break;
261	case SPEED_1000:
262		idx = 0;
263		break;
264	default:
265		idx = 2;
266		break;
267	}
268
269	ingr_latency = IS_ENABLED(CONFIG_MACSEC) ?
270		vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx];
271	egr_latency = IS_ENABLED(CONFIG_MACSEC) ?
272		vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx];
273
274	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY,
275			     PTP_INGR_LOCAL_LATENCY(ingr_latency));
276
277	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
278				  MSCC_PHY_PTP_INGR_TSP_CTRL);
279	val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS;
280	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
281			     val);
282
283	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY,
284			     PTP_EGR_LOCAL_LATENCY(egr_latency));
285
286	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
287	val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS;
288	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
289}
290
291static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk)
292{
293	u8 i;
294
295	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0);
296	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
297			     IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2));
298	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0);
299	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM,
300			     IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2));
301	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0);
302	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0);
303	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0);
304
305	for (i = 0; i < COMP_MAX_FLOWS; i++) {
306		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i),
307				     IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1);
308		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i),
309				     IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1);
310		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i),
311				     ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1);
312		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i),
313				     ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1);
314		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i),
315				     MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1);
316
317		if (i >= PTP_COMP_MAX_FLOWS)
318			continue;
319
320		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0);
321		vsc85xx_ts_write_csr(phydev, blk,
322				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0);
323		vsc85xx_ts_write_csr(phydev, blk,
324				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0);
325		vsc85xx_ts_write_csr(phydev, blk,
326				     MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0);
327		vsc85xx_ts_write_csr(phydev, blk,
328				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0);
329		vsc85xx_ts_write_csr(phydev, blk,
330				     MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0);
331		vsc85xx_ts_write_csr(phydev, blk,
332				     MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0);
333		vsc85xx_ts_write_csr(phydev, blk,
334				     MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0);
335		vsc85xx_ts_write_csr(phydev, blk,
336				     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0);
337		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i),
338				     0);
339	}
340
341	return 0;
342}
343
344static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev)
345{
346	u32 val;
347
348	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT);
349	val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK;
350	val |= ANA_ETH1_NTX_PROT_SIG_OFF(0);
351	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
352
353	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG);
354	val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK;
355	val |= ANA_FSB_ADDR_FROM_ETH1;
356	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val);
357
358	return 0;
359}
360
361static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb,
362						struct iphdr *iphdr,
363						struct udphdr *udphdr)
364{
365	if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP)
366		return NULL;
367
368	return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN);
369}
370
371static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb)
372{
373	struct ethhdr *ethhdr = eth_hdr(skb);
374	struct udphdr *udphdr;
375	struct iphdr *iphdr;
376
377	if (ethhdr->h_proto == htons(ETH_P_1588))
378		return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) +
379						 skb_mac_header_len(skb));
380
381	if (ethhdr->h_proto != htons(ETH_P_IP))
382		return NULL;
383
384	iphdr = ip_hdr(skb);
385	udphdr = udp_hdr(skb);
386
387	return get_ptp_header_l4(skb, iphdr, udphdr);
388}
389
390static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb,
391						enum hwtstamp_rx_filters rx_filter)
392{
393	struct udphdr *udphdr;
394	struct iphdr *iphdr;
395
396	if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT)
397		return (struct vsc85xx_ptphdr *)skb->data;
398
399	iphdr = (struct iphdr *)skb->data;
400	udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4);
401
402	return get_ptp_header_l4(skb, iphdr, udphdr);
403}
404
405static int get_sig(struct sk_buff *skb, u8 *sig)
406{
407	struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb);
408	struct ethhdr *ethhdr = eth_hdr(skb);
409	unsigned int i;
410
411	if (!ptphdr)
412		return -EOPNOTSUPP;
413
414	sig[0] = (__force u16)ptphdr->seq_id >> 8;
415	sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0);
416	sig[2] = ptphdr->domain;
417	sig[3] = ptphdr->tsmt & GENMASK(3, 0);
418
419	memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN);
420
421	/* Fill the last bytes of the signature to reach a 16B signature */
422	for (i = 10; i < 16; i++)
423		sig[i] = ptphdr->tsmt & GENMASK(3, 0);
424
425	return 0;
426}
427
428static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
429{
430	struct skb_shared_hwtstamps shhwtstamps;
431	struct vsc85xx_ts_fifo fifo;
432	struct sk_buff *skb;
433	u8 skb_sig[16], *p;
434	int i, len;
435	u32 reg;
436
437	memset(&fifo, 0, sizeof(fifo));
438	p = (u8 *)&fifo;
439
440	reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
441				  MSCC_PHY_PTP_EGR_TS_FIFO(0));
442	if (reg & PTP_EGR_TS_FIFO_EMPTY)
443		return;
444
445	*p++ = reg & 0xff;
446	*p++ = (reg >> 8) & 0xff;
447
448	/* Read the current FIFO item. Reading FIFO6 pops the next one. */
449	for (i = 1; i < 7; i++) {
450		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
451					  MSCC_PHY_PTP_EGR_TS_FIFO(i));
452		*p++ = reg & 0xff;
453		*p++ = (reg >> 8) & 0xff;
454		*p++ = (reg >> 16) & 0xff;
455		*p++ = (reg >> 24) & 0xff;
456	}
457
458	len = skb_queue_len(&ptp->tx_queue);
459	if (len < 1)
460		return;
461
462	while (len--) {
463		skb = __skb_dequeue(&ptp->tx_queue);
464		if (!skb)
465			return;
466
467		/* Can't get the signature of the packet, won't ever
468		 * be able to have one so let's dequeue the packet.
469		 */
470		if (get_sig(skb, skb_sig) < 0) {
471			kfree_skb(skb);
472			continue;
473		}
474
475		/* Check if we found the signature we were looking for. */
476		if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) {
477			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
478			shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns);
479			skb_complete_tx_timestamp(skb, &shhwtstamps);
480
481			return;
482		}
483
484		/* Valid signature but does not match the one of the
485		 * packet in the FIFO right now, reschedule it for later
486		 * packets.
487		 */
488		__skb_queue_tail(&ptp->tx_queue, skb);
489	}
490}
491
492static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp)
493{
494	u32 reg;
495
496	do {
497		vsc85xx_dequeue_skb(ptp);
498
499		/* If other timestamps are available in the FIFO, process them. */
500		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
501					  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
502	} while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1);
503}
504
505static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
506{
507	struct vsc8531_private *vsc8531 = phydev->priv;
508	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
509	static const u8 msgs[] = {
510		PTP_MSGTYPE_SYNC,
511		PTP_MSGTYPE_DELAY_REQ
512	};
513	u32 val;
514	u8 i;
515
516	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
517		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
518				     base ? PTP_FLOW_VALID_CH0 :
519				     PTP_FLOW_VALID_CH1);
520
521		val = vsc85xx_ts_read_csr(phydev, blk,
522					  MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i));
523		val &= ~PTP_FLOW_DOMAIN_RANGE_ENA;
524		vsc85xx_ts_write_csr(phydev, blk,
525				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val);
526
527		vsc85xx_ts_write_csr(phydev, blk,
528				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i),
529				     msgs[i] << 24);
530
531		vsc85xx_ts_write_csr(phydev, blk,
532				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i),
533				     PTP_FLOW_MSG_TYPE_MASK);
534	}
535
536	return 0;
537}
538
539static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
540{
541	struct vsc8531_private *vsc8531 = phydev->priv;
542	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
543	u32 val;
544
545	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0);
546	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID,
547			     ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD));
548
549	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0),
550			     base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1);
551	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
552			     ANA_ETH1_FLOW_MATCH_VLAN_TAG2);
553	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
554	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0);
555	vsc85xx_ts_write_csr(phydev, blk,
556			     MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0);
557	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0);
558	vsc85xx_ts_write_csr(phydev, blk,
559			     MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0);
560
561	val = vsc85xx_ts_read_csr(phydev, blk,
562				  MSCC_ANA_ETH1_FLOW_MATCH_MODE(0));
563	val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK;
564	val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY;
565	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
566			     val);
567
568	return 0;
569}
570
571static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
572{
573	struct vsc8531_private *vsc8531 = phydev->priv;
574	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
575	u32 val;
576
577	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER,
578			     PTP_EV_PORT);
579	/* Match on dest port only, ignore src */
580	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER,
581			     0xffff);
582	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER,
583			     0);
584	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0);
585
586	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
587	val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK;
588	val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1;
589	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
590
591	/* Match all IPs */
592	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0);
593	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0);
594	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0),
595			     0);
596	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0),
597			     0);
598	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0),
599			     0);
600	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0),
601			     0);
602	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0);
603	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0);
604
605	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0);
606
607	return 0;
608}
609
610static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm)
611{
612	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
613	struct phy_device *phydev = ptp->phydev;
614	struct vsc8531_private *priv = phydev->priv;
615	u64 adj = 0;
616	u32 val;
617
618	if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL)
619		return 0;
620
621	adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm));
622	if (adj > 1000000000L)
623		adj = 1000000000L;
624
625	val = PTP_AUTO_ADJ_NS_ROLLOVER(adj);
626	val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS;
627
628	mutex_lock(&priv->phc_lock);
629
630	/* Update the ppb val in nano seconds to the auto adjust reg. */
631	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ,
632			     val);
633
634	/* The auto adjust update val is set to 0 after write operation. */
635	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
636	val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE;
637	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
638
639	mutex_unlock(&priv->phc_lock);
640
641	return 0;
642}
643
644static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
645{
646	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
647	struct phy_device *phydev = ptp->phydev;
648	struct vsc85xx_shared_private *shared =
649		(struct vsc85xx_shared_private *)phydev->shared->priv;
650	struct vsc8531_private *priv = phydev->priv;
651	u32 val;
652
653	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
654	val |= PTP_LTC_CTRL_SAVE_ENA;
655	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
656
657	/* Local Time Counter (LTC) is put in SAVE* regs on rising edge of
658	 * LOAD_SAVE pin.
659	 */
660	mutex_lock(&shared->gpio_lock);
661	gpiod_set_value(priv->load_save, 1);
662
663	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
664				  MSCC_PHY_PTP_LTC_SAVED_SEC_MSB);
665
666	ts->tv_sec = ((time64_t)val) << 32;
667
668	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
669				  MSCC_PHY_PTP_LTC_SAVED_SEC_LSB);
670	ts->tv_sec += val;
671
672	ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR,
673					  MSCC_PHY_PTP_LTC_SAVED_NS);
674
675	gpiod_set_value(priv->load_save, 0);
676	mutex_unlock(&shared->gpio_lock);
677
678	return 0;
679}
680
681static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
682{
683	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
684	struct phy_device *phydev = ptp->phydev;
685	struct vsc8531_private *priv = phydev->priv;
686
687	mutex_lock(&priv->phc_lock);
688	__vsc85xx_gettime(info, ts);
689	mutex_unlock(&priv->phc_lock);
690
691	return 0;
692}
693
694static int __vsc85xx_settime(struct ptp_clock_info *info,
695			     const struct timespec64 *ts)
696{
697	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
698	struct phy_device *phydev = ptp->phydev;
699	struct vsc85xx_shared_private *shared =
700		(struct vsc85xx_shared_private *)phydev->shared->priv;
701	struct vsc8531_private *priv = phydev->priv;
702	u32 val;
703
704	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
705			     PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
706	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
707			     PTP_LTC_LOAD_SEC_LSB(ts->tv_sec));
708	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS,
709			     PTP_LTC_LOAD_NS(ts->tv_nsec));
710
711	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
712	val |= PTP_LTC_CTRL_LOAD_ENA;
713	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
714
715	/* Local Time Counter (LTC) is set from LOAD* regs on rising edge of
716	 * LOAD_SAVE pin.
717	 */
718	mutex_lock(&shared->gpio_lock);
719	gpiod_set_value(priv->load_save, 1);
720
721	val &= ~PTP_LTC_CTRL_LOAD_ENA;
722	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
723
724	gpiod_set_value(priv->load_save, 0);
725	mutex_unlock(&shared->gpio_lock);
726
727	return 0;
728}
729
730static int vsc85xx_settime(struct ptp_clock_info *info,
731			   const struct timespec64 *ts)
732{
733	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
734	struct phy_device *phydev = ptp->phydev;
735	struct vsc8531_private *priv = phydev->priv;
736
737	mutex_lock(&priv->phc_lock);
738	__vsc85xx_settime(info, ts);
739	mutex_unlock(&priv->phc_lock);
740
741	return 0;
742}
743
744static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta)
745{
746	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
747	struct phy_device *phydev = ptp->phydev;
748	struct vsc8531_private *priv = phydev->priv;
749	u32 val;
750
751	/* Can't recover that big of an offset. Let's set the time directly. */
752	if (abs(delta) >= NSEC_PER_SEC) {
753		struct timespec64 ts;
754		u64 now;
755
756		mutex_lock(&priv->phc_lock);
757
758		__vsc85xx_gettime(info, &ts);
759		now = ktime_to_ns(timespec64_to_ktime(ts));
760		ts = ns_to_timespec64(now + delta);
761		__vsc85xx_settime(info, &ts);
762
763		mutex_unlock(&priv->phc_lock);
764
765		return 0;
766	}
767
768	mutex_lock(&priv->phc_lock);
769
770	val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ;
771	if (delta > 0)
772		val |= PTP_LTC_OFFSET_ADD;
773	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val);
774
775	mutex_unlock(&priv->phc_lock);
776
777	return 0;
778}
779
780static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk,
781				  u32 next_comp, u32 etype)
782{
783	u32 val;
784
785	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT);
786	val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK;
787	val |= next_comp;
788	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
789
790	val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) |
791		ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA;
792	vsc85xx_ts_write_csr(phydev, blk,
793			     MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val);
794
795	return 0;
796}
797
798static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk,
799				 u32 next_comp, u32 header)
800{
801	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP,
802			     ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) |
803			     next_comp);
804
805	return 0;
806}
807
808static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd)
809{
810	u32 val;
811
812	/* Check non-zero reserved field */
813	val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK;
814	vsc85xx_ts_write_csr(phydev, blk,
815			     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val);
816
817	val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) |
818	      PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) |
819	      PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ?
820					  PTP_NOP : cmd);
821	if (cmd == PTP_SAVE_IN_TS_FIFO)
822		val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME;
823	else if (cmd == PTP_WRITE_NS)
824		val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE |
825		       PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6);
826	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow),
827			     val);
828
829	if (cmd == PTP_WRITE_1588)
830		/* Rewrite timestamp directly in frame */
831		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) |
832		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10);
833	else if (cmd == PTP_SAVE_IN_TS_FIFO)
834		/* no rewrite */
835		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) |
836		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0);
837	else
838		/* Write in reserved field */
839		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) |
840		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4);
841	vsc85xx_ts_write_csr(phydev, blk,
842			     MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val);
843
844	return 0;
845}
846
847static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
848			    bool one_step, bool enable)
849{
850	static const u8 msgs[] = {
851		PTP_MSGTYPE_SYNC,
852		PTP_MSGTYPE_DELAY_REQ
853	};
854	u32 val;
855	u8 i;
856
857	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
858		if (blk == INGRESS)
859			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
860						   PTP_WRITE_NS);
861		else if (msgs[i] == PTP_MSGTYPE_SYNC && one_step)
862			/* no need to know Sync t when sending in one_step */
863			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
864						   PTP_WRITE_1588);
865		else
866			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
867						   PTP_SAVE_IN_TS_FIFO);
868
869		val = vsc85xx_ts_read_csr(phydev, blk,
870					  MSCC_ANA_PTP_FLOW_ENA(i));
871		val &= ~PTP_FLOW_ENA;
872		if (enable)
873			val |= PTP_FLOW_ENA;
874		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
875				     val);
876	}
877
878	return 0;
879}
880
881static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
882			     bool enable)
883{
884	struct vsc8531_private *vsc8531 = phydev->priv;
885	u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST;
886
887	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
888		/* PTP over Ethernet multicast address for SYNC and DELAY msg */
889		u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00};
890
891		val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR |
892		       get_unaligned_be16(&ptp_multicast[4]);
893		vsc85xx_ts_write_csr(phydev, blk,
894				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
895		vsc85xx_ts_write_csr(phydev, blk,
896				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0),
897				     get_unaligned_be32(ptp_multicast));
898	} else {
899		val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
900		vsc85xx_ts_write_csr(phydev, blk,
901				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
902		vsc85xx_ts_write_csr(phydev, blk,
903				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
904	}
905
906	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0));
907	val &= ~ETH1_FLOW_ENA;
908	if (enable)
909		val |= ETH1_FLOW_ENA;
910	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val);
911
912	return 0;
913}
914
915static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
916			    bool enable)
917{
918	u32 val;
919
920	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE,
921			     ANA_IP1_NXT_PROT_IPV4 |
922			     ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4);
923
924	/* Matching UDP protocol number */
925	val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) |
926	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) |
927	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9);
928	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1,
929			     val);
930
931	/* End of IP protocol, start of next protocol (UDP) */
932	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2,
933			     ANA_IP1_NXT_PROT_OFFSET2(20));
934
935	val = vsc85xx_ts_read_csr(phydev, blk,
936				  MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM);
937	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK |
938		 IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK);
939	val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2);
940
941	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE |
942		 IP1_NXT_PROT_UDP_CHKSUM_CLEAR);
943	/* UDP checksum offset in IPv4 packet
944	 * according to: https://tools.ietf.org/html/rfc768
945	 */
946	val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
947	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
948			     val);
949
950	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
951	val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA);
952	val |= IP1_FLOW_MATCH_DEST_SRC_ADDR;
953	if (enable)
954		val |= IP1_FLOW_ENA;
955	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
956
957	return 0;
958}
959
960static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step)
961{
962	struct vsc8531_private *vsc8531 = phydev->priv;
963	bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr;
964	u8 eng_id = base ? 0 : 1;
965	u32 val;
966
967	ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
968
969	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
970				  MSCC_PHY_PTP_ANALYZER_MODE);
971	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
972	val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) |
973		 PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)));
974	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
975			     val);
976
977	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
978		vsc85xx_eth1_next_comp(phydev, INGRESS,
979				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
980		vsc85xx_eth1_next_comp(phydev, EGRESS,
981				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
982	} else {
983		vsc85xx_eth1_next_comp(phydev, INGRESS,
984				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
985				       ETH_P_IP);
986		vsc85xx_eth1_next_comp(phydev, EGRESS,
987				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
988				       ETH_P_IP);
989		/* Header length of IPv[4/6] + UDP */
990		vsc85xx_ip1_next_comp(phydev, INGRESS,
991				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
992		vsc85xx_ip1_next_comp(phydev, EGRESS,
993				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
994	}
995
996	vsc85xx_eth1_conf(phydev, INGRESS,
997			  vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
998	vsc85xx_ip1_conf(phydev, INGRESS,
999			 ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1000	vsc85xx_ptp_conf(phydev, INGRESS, one_step,
1001			 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1002
1003	vsc85xx_eth1_conf(phydev, EGRESS,
1004			  vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1005	vsc85xx_ip1_conf(phydev, EGRESS,
1006			 ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1007	vsc85xx_ptp_conf(phydev, EGRESS, one_step,
1008			 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1009
1010	val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1011	if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF)
1012		val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1013
1014	val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1015	if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE)
1016		val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1017
1018	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1019			     val);
1020
1021	return 0;
1022}
1023
1024void vsc85xx_link_change_notify(struct phy_device *phydev)
1025{
1026	struct vsc8531_private *priv = phydev->priv;
1027
1028	mutex_lock(&priv->ts_lock);
1029	vsc85xx_ts_set_latencies(phydev);
1030	mutex_unlock(&priv->ts_lock);
1031}
1032
1033static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
1034{
1035	u32 val;
1036
1037	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1038				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1039	val |= PTP_EGR_TS_FIFO_RESET;
1040	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1041			     val);
1042
1043	val &= ~PTP_EGR_TS_FIFO_RESET;
1044	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1045			     val);
1046}
1047
1048static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
1049{
1050	struct vsc8531_private *vsc8531 =
1051		container_of(mii_ts, struct vsc8531_private, mii_ts);
1052	struct phy_device *phydev = vsc8531->ptp->phydev;
1053	struct hwtstamp_config cfg;
1054	bool one_step = false;
1055	u32 val;
1056
1057	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1058		return -EFAULT;
1059
1060	switch (cfg.tx_type) {
1061	case HWTSTAMP_TX_ONESTEP_SYNC:
1062		one_step = true;
1063		break;
1064	case HWTSTAMP_TX_ON:
1065		break;
1066	case HWTSTAMP_TX_OFF:
1067		break;
1068	default:
1069		return -ERANGE;
1070	}
1071
1072	vsc8531->ptp->tx_type = cfg.tx_type;
1073
1074	switch (cfg.rx_filter) {
1075	case HWTSTAMP_FILTER_NONE:
1076		break;
1077	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1078		/* ETH->IP->UDP->PTP */
1079		break;
1080	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1081		/* ETH->PTP */
1082		break;
1083	default:
1084		return -ERANGE;
1085	}
1086
1087	vsc8531->ptp->rx_filter = cfg.rx_filter;
1088
1089	mutex_lock(&vsc8531->ts_lock);
1090
1091	__skb_queue_purge(&vsc8531->ptp->tx_queue);
1092	__skb_queue_head_init(&vsc8531->ptp->tx_queue);
1093
1094	/* Disable predictor while configuring the 1588 block */
1095	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1096				  MSCC_PHY_PTP_INGR_PREDICTOR);
1097	val &= ~PTP_INGR_PREDICTOR_EN;
1098	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1099			     val);
1100	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1101				  MSCC_PHY_PTP_EGR_PREDICTOR);
1102	val &= ~PTP_EGR_PREDICTOR_EN;
1103	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1104			     val);
1105
1106	/* Bypass egress or ingress blocks if timestamping isn't used */
1107	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1108	val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS);
1109	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1110		val |= PTP_IFACE_CTRL_EGR_BYPASS;
1111	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE)
1112		val |= PTP_IFACE_CTRL_INGR_BYPASS;
1113	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1114
1115	/* Resetting FIFO so that it's empty after reconfiguration */
1116	vsc85xx_ts_reset_fifo(phydev);
1117
1118	vsc85xx_ts_engine_init(phydev, one_step);
1119
1120	/* Re-enable predictors now */
1121	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1122				  MSCC_PHY_PTP_INGR_PREDICTOR);
1123	val |= PTP_INGR_PREDICTOR_EN;
1124	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1125			     val);
1126	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1127				  MSCC_PHY_PTP_EGR_PREDICTOR);
1128	val |= PTP_EGR_PREDICTOR_EN;
1129	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1130			     val);
1131
1132	vsc8531->ptp->configured = 1;
1133	mutex_unlock(&vsc8531->ts_lock);
1134
1135	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1136}
1137
1138static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
1139			   struct ethtool_ts_info *info)
1140{
1141	struct vsc8531_private *vsc8531 =
1142		container_of(mii_ts, struct vsc8531_private, mii_ts);
1143
1144	info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock);
1145	info->so_timestamping =
1146		SOF_TIMESTAMPING_TX_HARDWARE |
1147		SOF_TIMESTAMPING_RX_HARDWARE |
1148		SOF_TIMESTAMPING_RAW_HARDWARE;
1149	info->tx_types =
1150		(1 << HWTSTAMP_TX_OFF) |
1151		(1 << HWTSTAMP_TX_ON) |
1152		(1 << HWTSTAMP_TX_ONESTEP_SYNC);
1153	info->rx_filters =
1154		(1 << HWTSTAMP_FILTER_NONE) |
1155		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1156		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1157
1158	return 0;
1159}
1160
1161static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
1162			     struct sk_buff *skb, int type)
1163{
1164	struct vsc8531_private *vsc8531 =
1165		container_of(mii_ts, struct vsc8531_private, mii_ts);
1166
1167	if (!vsc8531->ptp->configured)
1168		return;
1169
1170	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
1171		kfree_skb(skb);
1172		return;
1173	}
1174
1175	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1176
1177	mutex_lock(&vsc8531->ts_lock);
1178	__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
1179	mutex_unlock(&vsc8531->ts_lock);
1180}
1181
1182static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
1183			     struct sk_buff *skb, int type)
1184{
1185	struct vsc8531_private *vsc8531 =
1186		container_of(mii_ts, struct vsc8531_private, mii_ts);
1187	struct skb_shared_hwtstamps *shhwtstamps = NULL;
1188	struct vsc85xx_ptphdr *ptphdr;
1189	struct timespec64 ts;
1190	unsigned long ns;
1191
1192	if (!vsc8531->ptp->configured)
1193		return false;
1194
1195	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE ||
1196	    type == PTP_CLASS_NONE)
1197		return false;
1198
1199	vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
1200
1201	ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
1202	if (!ptphdr)
1203		return false;
1204
1205	shhwtstamps = skb_hwtstamps(skb);
1206	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1207
1208	ns = ntohl(ptphdr->rsrvd2);
1209
1210	/* nsec is in reserved field */
1211	if (ts.tv_nsec < ns)
1212		ts.tv_sec--;
1213
1214	shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
1215	netif_rx(skb);
1216
1217	return true;
1218}
1219
1220static const struct ptp_clock_info vsc85xx_clk_caps = {
1221	.owner		= THIS_MODULE,
1222	.name		= "VSC85xx timer",
1223	.max_adj	= S32_MAX,
1224	.n_alarm	= 0,
1225	.n_pins		= 0,
1226	.n_ext_ts	= 0,
1227	.n_per_out	= 0,
1228	.pps		= 0,
1229	.adjtime        = &vsc85xx_adjtime,
1230	.adjfine	= &vsc85xx_adjfine,
1231	.gettime64	= &vsc85xx_gettime,
1232	.settime64	= &vsc85xx_settime,
1233};
1234
1235static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
1236{
1237	struct vsc8531_private *vsc8531 = phydev->priv;
1238
1239	if (vsc8531->ts_base_addr != phydev->mdio.addr) {
1240		struct mdio_device *dev;
1241
1242		dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr];
1243		phydev = container_of(dev, struct phy_device, mdio);
1244
1245		return phydev->priv;
1246	}
1247
1248	return vsc8531;
1249}
1250
1251static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev)
1252{
1253	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1254
1255	return vsc8531->input_clk_init;
1256}
1257
1258static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
1259{
1260	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1261
1262	vsc8531->input_clk_init = true;
1263}
1264
1265static int __vsc8584_init_ptp(struct phy_device *phydev)
1266{
1267	struct vsc8531_private *vsc8531 = phydev->priv;
1268	static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
1269	static const u8  ltc_seq_a[] = { 8, 6, 5, 4, 2 };
1270	u32 val;
1271
1272	if (!vsc8584_is_1588_input_clk_configured(phydev)) {
1273		phy_lock_mdio_bus(phydev);
1274
1275		/* 1588_DIFF_INPUT_CLK configuration: Use an external clock for
1276		 * the LTC, as per 3.13.29 in the VSC8584 datasheet.
1277		 */
1278		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1279				  MSCC_PHY_PAGE_1588);
1280		phy_ts_base_write(phydev, 29, 0x7ae0);
1281		phy_ts_base_write(phydev, 30, 0xb71c);
1282		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1283				  MSCC_PHY_PAGE_STANDARD);
1284
1285		phy_unlock_mdio_bus(phydev);
1286
1287		vsc8584_set_input_clk_configured(phydev);
1288	}
1289
1290	/* Disable predictor before configuring the 1588 block */
1291	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1292				  MSCC_PHY_PTP_INGR_PREDICTOR);
1293	val &= ~PTP_INGR_PREDICTOR_EN;
1294	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1295			     val);
1296	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1297				  MSCC_PHY_PTP_EGR_PREDICTOR);
1298	val &= ~PTP_EGR_PREDICTOR_EN;
1299	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1300			     val);
1301
1302	/* By default, the internal clock of fixed rate 250MHz is used */
1303	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
1304	val &= ~PTP_LTC_CTRL_CLK_SEL_MASK;
1305	val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250;
1306	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
1307
1308	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE);
1309	val &= ~PTP_LTC_SEQUENCE_A_MASK;
1310	val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]);
1311	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val);
1312
1313	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ);
1314	val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB);
1315	if (ltc_seq_e[PHC_CLK_250MHZ])
1316		val |= PTP_LTC_SEQ_ADD_SUB;
1317	val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]);
1318	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val);
1319
1320	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ,
1321			     PPS_WIDTH_ADJ);
1322
1323	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO,
1324			     IS_ENABLED(CONFIG_MACSEC) ?
1325			     PTP_INGR_DELAY_FIFO_DEPTH_MACSEC :
1326			     PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT);
1327
1328	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO,
1329			     IS_ENABLED(CONFIG_MACSEC) ?
1330			     PTP_EGR_DELAY_FIFO_DEPTH_MACSEC :
1331			     PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT);
1332
1333	/* Enable n-phase sampler for Viper Rev-B */
1334	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1335				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1336	val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS |
1337		 PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS |
1338		 PTP_ACCUR_LOAD_SAVE_BYPASS);
1339	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1340	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1341	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1342	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1343	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1344	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1345			     val);
1346
1347	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1348				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1349	val |= PTP_ACCUR_CALIB_TRIGG;
1350	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1351			     val);
1352
1353	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1354				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1355	val &= ~PTP_ACCUR_CALIB_TRIGG;
1356	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1357	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1358	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1359	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1360	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1361	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1362			     val);
1363
1364	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1365				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1366	val |= PTP_ACCUR_CALIB_TRIGG;
1367	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1368			     val);
1369
1370	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1371				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1372	val &= ~PTP_ACCUR_CALIB_TRIGG;
1373	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1374			     val);
1375
1376	/* Do not access FIFO via SI */
1377	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1378				  MSCC_PHY_PTP_TSTAMP_FIFO_SI);
1379	val &= ~PTP_TSTAMP_FIFO_SI_EN;
1380	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI,
1381			     val);
1382
1383	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1384				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1385	val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE;
1386	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1387			     val);
1388	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1389				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1390	val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE;
1391	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1392			     val);
1393
1394	/* Put the flag that indicates the frame has been modified to bit 7 */
1395	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1396				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1397	val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL;
1398	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1399			     val);
1400	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1401				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1402	val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7);
1403	val &= ~PTP_EGR_REWRITER_FLAG_VAL;
1404	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1405			     val);
1406
1407	/* 30bit mode for RX timestamp, only the nanoseconds are kept in
1408	 * reserved field.
1409	 */
1410	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1411				  MSCC_PHY_PTP_INGR_TSP_CTRL);
1412	val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS;
1413	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
1414			     val);
1415
1416	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
1417	val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS;
1418	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
1419
1420	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1421				  MSCC_PHY_PTP_SERIAL_TOD_IFACE);
1422	val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR;
1423	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE,
1424			     val);
1425
1426	vsc85xx_ts_fsb_init(phydev);
1427
1428	/* Set the Egress timestamp FIFO configuration and status register */
1429	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1430				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1431	val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK);
1432	/* 16 bytes for the signature, 10 for the timestamp in the TS FIFO */
1433	val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7);
1434	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1435			     val);
1436
1437	vsc85xx_ts_reset_fifo(phydev);
1438
1439	val = PTP_IFACE_CTRL_CLK_ENA;
1440	if (!IS_ENABLED(CONFIG_MACSEC))
1441		val |= PTP_IFACE_CTRL_GMII_PROT;
1442	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1443
1444	vsc85xx_ts_set_latencies(phydev);
1445
1446	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE);
1447
1448	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1449	val |= PTP_IFACE_CTRL_EGR_BYPASS;
1450	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1451
1452	vsc85xx_ts_disable_flows(phydev, EGRESS);
1453	vsc85xx_ts_disable_flows(phydev, INGRESS);
1454
1455	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1456				  MSCC_PHY_PTP_ANALYZER_MODE);
1457	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
1458	val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK |
1459		 PTP_ANALYZER_MODE_INGR_ENA_MASK |
1460		 PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK |
1461		 PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK);
1462	/* Strict matching in flow (packets should match flows from the same
1463	 * index in all enabled comparators (except PTP)).
1464	 */
1465	val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) |
1466	       PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7);
1467	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1468			     val);
1469
1470	/* Initialized for ingress and egress flows:
1471	 * - The Ethernet comparator.
1472	 * - The IP comparator.
1473	 * - The PTP comparator.
1474	 */
1475	vsc85xx_eth_cmp1_init(phydev, INGRESS);
1476	vsc85xx_ip_cmp1_init(phydev, INGRESS);
1477	vsc85xx_ptp_cmp_init(phydev, INGRESS);
1478	vsc85xx_eth_cmp1_init(phydev, EGRESS);
1479	vsc85xx_ip_cmp1_init(phydev, EGRESS);
1480	vsc85xx_ptp_cmp_init(phydev, EGRESS);
1481
1482	vsc85xx_ts_eth_cmp1_sig(phydev);
1483
1484	vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
1485	vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
1486	vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
1487	vsc8531->mii_ts.ts_info  = vsc85xx_ts_info;
1488	phydev->mii_ts = &vsc8531->mii_ts;
1489
1490	memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
1491
1492	vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
1493						     &phydev->mdio.dev);
1494	return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
1495}
1496
1497void vsc8584_config_ts_intr(struct phy_device *phydev)
1498{
1499	struct vsc8531_private *priv = phydev->priv;
1500
1501	mutex_lock(&priv->ts_lock);
1502	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK,
1503			     VSC85XX_1588_INT_MASK_MASK);
1504	mutex_unlock(&priv->ts_lock);
1505}
1506
1507int vsc8584_ptp_init(struct phy_device *phydev)
1508{
1509	switch (phydev->phy_id & phydev->drv->phy_id_mask) {
1510	case PHY_ID_VSC8572:
1511	case PHY_ID_VSC8574:
1512	case PHY_ID_VSC8575:
1513	case PHY_ID_VSC8582:
1514	case PHY_ID_VSC8584:
1515		return __vsc8584_init_ptp(phydev);
1516	}
1517
1518	return 0;
1519}
1520
1521irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
1522{
1523	struct vsc8531_private *priv = phydev->priv;
1524	int rc;
1525
1526	mutex_lock(&priv->ts_lock);
1527	rc = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1528				 MSCC_PHY_1588_VSC85XX_INT_STATUS);
1529	/* Ack the PTP interrupt */
1530	vsc85xx_ts_write_csr(phydev, PROCESSOR,
1531			     MSCC_PHY_1588_VSC85XX_INT_STATUS, rc);
1532
1533	if (!(rc & VSC85XX_1588_INT_MASK_MASK)) {
1534		mutex_unlock(&priv->ts_lock);
1535		return IRQ_NONE;
1536	}
1537
1538	if (rc & VSC85XX_1588_INT_FIFO_ADD) {
1539		vsc85xx_get_tx_ts(priv->ptp);
1540	} else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
1541		__skb_queue_purge(&priv->ptp->tx_queue);
1542		vsc85xx_ts_reset_fifo(phydev);
1543	}
1544
1545	mutex_unlock(&priv->ts_lock);
1546	return IRQ_HANDLED;
1547}
1548
1549int vsc8584_ptp_probe(struct phy_device *phydev)
1550{
1551	struct vsc8531_private *vsc8531 = phydev->priv;
1552
1553	vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp),
1554				    GFP_KERNEL);
1555	if (!vsc8531->ptp)
1556		return -ENOMEM;
1557
1558	mutex_init(&vsc8531->phc_lock);
1559	mutex_init(&vsc8531->ts_lock);
1560
1561	/* Retrieve the shared load/save GPIO. Request it as non exclusive as
1562	 * the same GPIO can be requested by all the PHYs of the same package.
1563	 * This GPIO must be used with the gpio_lock taken (the lock is shared
1564	 * between all PHYs).
1565	 */
1566	vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save",
1567						     GPIOD_FLAGS_BIT_NONEXCLUSIVE |
1568						     GPIOD_OUT_LOW);
1569	if (IS_ERR(vsc8531->load_save)) {
1570		phydev_err(phydev, "Can't get load-save GPIO (%ld)\n",
1571			   PTR_ERR(vsc8531->load_save));
1572		return PTR_ERR(vsc8531->load_save);
1573	}
1574
1575	vsc8531->ptp->phydev = phydev;
1576
1577	return 0;
1578}
1579
1580int vsc8584_ptp_probe_once(struct phy_device *phydev)
1581{
1582	struct vsc85xx_shared_private *shared =
1583		(struct vsc85xx_shared_private *)phydev->shared->priv;
1584
1585	/* Initialize shared GPIO lock */
1586	mutex_init(&shared->gpio_lock);
1587
1588	return 0;
1589}
1590