1// SPDX-License-Identifier: GPL-2.0-only
2/* Atlantic Network Driver
3 *
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
6 */
7
8/* File aq_ptp.c:
9 * Definition of functions for Linux PTP support.
10 */
11
12#include <linux/ptp_clock_kernel.h>
13#include <linux/ptp_classify.h>
14#include <linux/interrupt.h>
15#include <linux/clocksource.h>
16
17#include "aq_nic.h"
18#include "aq_ptp.h"
19#include "aq_ring.h"
20#include "aq_phy.h"
21#include "aq_filters.h"
22
23#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
24
25#define AQ_PTP_TX_TIMEOUT        (HZ *  10)
26
27#define POLL_SYNC_TIMER_MS 15
28
29enum ptp_speed_offsets {
30	ptp_offset_idx_10 = 0,
31	ptp_offset_idx_100,
32	ptp_offset_idx_1000,
33	ptp_offset_idx_2500,
34	ptp_offset_idx_5000,
35	ptp_offset_idx_10000,
36};
37
38struct ptp_skb_ring {
39	struct sk_buff **buff;
40	spinlock_t lock;
41	unsigned int size;
42	unsigned int head;
43	unsigned int tail;
44};
45
46struct ptp_tx_timeout {
47	spinlock_t lock;
48	bool active;
49	unsigned long tx_start;
50};
51
52struct aq_ptp_s {
53	struct aq_nic_s *aq_nic;
54	struct hwtstamp_config hwtstamp_config;
55	spinlock_t ptp_lock;
56	spinlock_t ptp_ring_lock;
57	struct ptp_clock *ptp_clock;
58	struct ptp_clock_info ptp_info;
59
60	atomic_t offset_egress;
61	atomic_t offset_ingress;
62
63	struct aq_ring_param_s ptp_ring_param;
64
65	struct ptp_tx_timeout ptp_tx_timeout;
66
67	unsigned int idx_vector;
68	struct napi_struct napi;
69
70	struct aq_ring_s ptp_tx;
71	struct aq_ring_s ptp_rx;
72	struct aq_ring_s hwts_rx;
73
74	struct ptp_skb_ring skb_ring;
75
76	struct aq_rx_filter_l3l4 udp_filter;
77	struct aq_rx_filter_l2 eth_type_filter;
78
79	struct delayed_work poll_sync;
80	u32 poll_timeout_ms;
81
82	bool extts_pin_enabled;
83	u64 last_sync1588_ts;
84
85	bool a1_ptp;
86};
87
88struct ptp_tm_offset {
89	unsigned int mbps;
90	int egress;
91	int ingress;
92};
93
94static struct ptp_tm_offset ptp_offset[6];
95
96void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
97{
98	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
99	int i, egress, ingress;
100
101	if (!aq_ptp)
102		return;
103
104	egress = 0;
105	ingress = 0;
106
107	for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
108		if (mbps == ptp_offset[i].mbps) {
109			egress = ptp_offset[i].egress;
110			ingress = ptp_offset[i].ingress;
111			break;
112		}
113	}
114
115	atomic_set(&aq_ptp->offset_egress, egress);
116	atomic_set(&aq_ptp->offset_ingress, ingress);
117}
118
119static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
120{
121	unsigned int next_head = (ring->head + 1) % ring->size;
122
123	if (next_head == ring->tail)
124		return -ENOMEM;
125
126	ring->buff[ring->head] = skb_get(skb);
127	ring->head = next_head;
128
129	return 0;
130}
131
132static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
133{
134	unsigned long flags;
135	int ret;
136
137	spin_lock_irqsave(&ring->lock, flags);
138	ret = __aq_ptp_skb_put(ring, skb);
139	spin_unlock_irqrestore(&ring->lock, flags);
140
141	return ret;
142}
143
144static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
145{
146	struct sk_buff *skb;
147
148	if (ring->tail == ring->head)
149		return NULL;
150
151	skb = ring->buff[ring->tail];
152	ring->tail = (ring->tail + 1) % ring->size;
153
154	return skb;
155}
156
157static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
158{
159	unsigned long flags;
160	struct sk_buff *skb;
161
162	spin_lock_irqsave(&ring->lock, flags);
163	skb = __aq_ptp_skb_get(ring);
164	spin_unlock_irqrestore(&ring->lock, flags);
165
166	return skb;
167}
168
169static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
170{
171	unsigned long flags;
172	unsigned int len;
173
174	spin_lock_irqsave(&ring->lock, flags);
175	len = (ring->head >= ring->tail) ?
176	ring->head - ring->tail :
177	ring->size - ring->tail + ring->head;
178	spin_unlock_irqrestore(&ring->lock, flags);
179
180	return len;
181}
182
183static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
184{
185	struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
186
187	if (!buff)
188		return -ENOMEM;
189
190	spin_lock_init(&ring->lock);
191
192	ring->buff = buff;
193	ring->size = size;
194	ring->head = 0;
195	ring->tail = 0;
196
197	return 0;
198}
199
200static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
201{
202	struct sk_buff *skb;
203
204	while ((skb = aq_ptp_skb_get(ring)) != NULL)
205		dev_kfree_skb_any(skb);
206}
207
208static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
209{
210	if (ring->buff) {
211		aq_ptp_skb_ring_clean(ring);
212		kfree(ring->buff);
213		ring->buff = NULL;
214	}
215}
216
217static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
218{
219	spin_lock_init(&timeout->lock);
220	timeout->active = false;
221}
222
223static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
224{
225	struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
226	unsigned long flags;
227
228	spin_lock_irqsave(&timeout->lock, flags);
229	timeout->active = true;
230	timeout->tx_start = jiffies;
231	spin_unlock_irqrestore(&timeout->lock, flags);
232}
233
234static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
235{
236	if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
237		struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
238		unsigned long flags;
239
240		spin_lock_irqsave(&timeout->lock, flags);
241		timeout->active = false;
242		spin_unlock_irqrestore(&timeout->lock, flags);
243	}
244}
245
246static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
247{
248	struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
249	unsigned long flags;
250	bool timeout_flag;
251
252	timeout_flag = false;
253
254	spin_lock_irqsave(&timeout->lock, flags);
255	if (timeout->active) {
256		timeout_flag = time_is_before_jiffies(timeout->tx_start +
257						      AQ_PTP_TX_TIMEOUT);
258		/* reset active flag if timeout detected */
259		if (timeout_flag)
260			timeout->active = false;
261	}
262	spin_unlock_irqrestore(&timeout->lock, flags);
263
264	if (timeout_flag) {
265		aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
266		netdev_err(aq_ptp->aq_nic->ndev,
267			   "PTP Timeout. Clearing Tx Timestamp SKBs\n");
268	}
269}
270
271/* aq_ptp_adjfine
272 * @ptp: the ptp clock structure
273 * @ppb: parts per billion adjustment from base
274 *
275 * adjust the frequency of the ptp cycle counter by the
276 * indicated ppb from the base frequency.
277 */
278static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
279{
280	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
281	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
282
283	mutex_lock(&aq_nic->fwreq_mutex);
284	aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
285					     scaled_ppm_to_ppb(scaled_ppm));
286	mutex_unlock(&aq_nic->fwreq_mutex);
287
288	return 0;
289}
290
291/* aq_ptp_adjtime
292 * @ptp: the ptp clock structure
293 * @delta: offset to adjust the cycle counter by
294 *
295 * adjust the timer by resetting the timecounter structure.
296 */
297static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
298{
299	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
300	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
301	unsigned long flags;
302
303	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
304	aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
305	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
306
307	return 0;
308}
309
310/* aq_ptp_gettime
311 * @ptp: the ptp clock structure
312 * @ts: timespec structure to hold the current time value
313 *
314 * read the timecounter and return the correct value on ns,
315 * after converting it into a struct timespec.
316 */
317static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
318{
319	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
320	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
321	unsigned long flags;
322	u64 ns;
323
324	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
325	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
326	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
327
328	*ts = ns_to_timespec64(ns);
329
330	return 0;
331}
332
333/* aq_ptp_settime
334 * @ptp: the ptp clock structure
335 * @ts: the timespec containing the new time for the cycle counter
336 *
337 * reset the timecounter to use a new base value instead of the kernel
338 * wall timer value.
339 */
340static int aq_ptp_settime(struct ptp_clock_info *ptp,
341			  const struct timespec64 *ts)
342{
343	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
344	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
345	unsigned long flags;
346	u64 ns = timespec64_to_ns(ts);
347	u64 now;
348
349	spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
350	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
351	aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
352
353	spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
354
355	return 0;
356}
357
358static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
359				       struct skb_shared_hwtstamps *hwtstamp,
360				       u64 timestamp)
361{
362	memset(hwtstamp, 0, sizeof(*hwtstamp));
363	hwtstamp->hwtstamp = ns_to_ktime(timestamp);
364}
365
366static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
367			      u64 period)
368{
369	if (period)
370		netdev_dbg(aq_nic->ndev,
371			   "Enable GPIO %d pulsing, start time %llu, period %u\n",
372			   pin_index, start, (u32)period);
373	else
374		netdev_dbg(aq_nic->ndev,
375			   "Disable GPIO %d pulsing, start time %llu, period %u\n",
376			   pin_index, start, (u32)period);
377
378	/* Notify hardware of request to being sending pulses.
379	 * If period is ZERO then pulsen is disabled.
380	 */
381	mutex_lock(&aq_nic->fwreq_mutex);
382	aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
383					 start, (u32)period);
384	mutex_unlock(&aq_nic->fwreq_mutex);
385
386	return 0;
387}
388
389static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
390				       struct ptp_clock_request *rq, int on)
391{
392	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
393	struct ptp_clock_time *t = &rq->perout.period;
394	struct ptp_clock_time *s = &rq->perout.start;
395	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
396	u64 start, period;
397	u32 pin_index = rq->perout.index;
398
399	/* verify the request channel is there */
400	if (pin_index >= ptp->n_per_out)
401		return -EINVAL;
402
403	/* we cannot support periods greater
404	 * than 4 seconds due to reg limit
405	 */
406	if (t->sec > 4 || t->sec < 0)
407		return -ERANGE;
408
409	/* convert to unsigned 64b ns,
410	 * verify we can put it in a 32b register
411	 */
412	period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
413
414	/* verify the value is in range supported by hardware */
415	if (period > U32_MAX)
416		return -ERANGE;
417	/* convert to unsigned 64b ns */
418	/* TODO convert to AQ time */
419	start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
420
421	aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
422
423	return 0;
424}
425
426static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
427				    struct ptp_clock_request *rq, int on)
428{
429	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
430	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
431	u64 start, period;
432	u32 pin_index = 0;
433	u32 rest = 0;
434
435	/* verify the request channel is there */
436	if (pin_index >= ptp->n_per_out)
437		return -EINVAL;
438
439	aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
440	div_u64_rem(start, NSEC_PER_SEC, &rest);
441	period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
442	start = on ? start - rest + NSEC_PER_SEC *
443		(rest > 990000000LL ? 2 : 1) : 0;
444
445	aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
446
447	return 0;
448}
449
450static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
451{
452	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
453	u32 enable = aq_ptp->extts_pin_enabled;
454
455	if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
456		aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
457							enable);
458}
459
460static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
461				      struct ptp_clock_request *rq, int on)
462{
463	struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
464
465	u32 pin_index = rq->extts.index;
466
467	if (pin_index >= ptp->n_ext_ts)
468		return -EINVAL;
469
470	aq_ptp->extts_pin_enabled = !!on;
471	if (on) {
472		aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
473		cancel_delayed_work_sync(&aq_ptp->poll_sync);
474		schedule_delayed_work(&aq_ptp->poll_sync,
475				      msecs_to_jiffies(aq_ptp->poll_timeout_ms));
476	}
477
478	aq_ptp_extts_pin_ctrl(aq_ptp);
479	return 0;
480}
481
482/* aq_ptp_gpio_feature_enable
483 * @ptp: the ptp clock structure
484 * @rq: the requested feature to change
485 * @on: whether to enable or disable the feature
486 */
487static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
488				      struct ptp_clock_request *rq, int on)
489{
490	switch (rq->type) {
491	case PTP_CLK_REQ_EXTTS:
492		return aq_ptp_extts_pin_configure(ptp, rq, on);
493	case PTP_CLK_REQ_PEROUT:
494		return aq_ptp_perout_pin_configure(ptp, rq, on);
495	case PTP_CLK_REQ_PPS:
496		return aq_ptp_pps_pin_configure(ptp, rq, on);
497	default:
498		return -EOPNOTSUPP;
499	}
500
501	return 0;
502}
503
504/* aq_ptp_verify
505 * @ptp: the ptp clock structure
506 * @pin: index of the pin in question
507 * @func: the desired function to use
508 * @chan: the function channel index to use
509 */
510static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
511			 enum ptp_pin_function func, unsigned int chan)
512{
513	/* verify the requested pin is there */
514	if (!ptp->pin_config || pin >= ptp->n_pins)
515		return -EINVAL;
516
517	/* enforce locked channels, no changing them */
518	if (chan != ptp->pin_config[pin].chan)
519		return -EINVAL;
520
521	/* we want to keep the functions locked as well */
522	if (func != ptp->pin_config[pin].func)
523		return -EINVAL;
524
525	return 0;
526}
527
528/* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
529 * @adapter: the private adapter struct
530 *
531 * if the timestamp is valid, we convert it into the timecounter ns
532 * value, then store that result into the hwtstamps structure which
533 * is passed up the network stack
534 */
535void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
536{
537	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
538	struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
539	struct skb_shared_hwtstamps hwtstamp;
540
541	if (!skb) {
542		netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
543		return;
544	}
545
546	timestamp += atomic_read(&aq_ptp->offset_egress);
547	aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
548	skb_tstamp_tx(skb, &hwtstamp);
549	dev_kfree_skb_any(skb);
550
551	aq_ptp_tx_timeout_update(aq_ptp);
552}
553
554/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
555 * @adapter: pointer to adapter struct
556 * @skb: particular skb to send timestamp with
557 *
558 * if the timestamp is valid, we convert it into the timecounter ns
559 * value, then store that result into the hwtstamps structure which
560 * is passed up the network stack
561 */
562static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
563			       u64 timestamp)
564{
565	timestamp -= atomic_read(&aq_ptp->offset_ingress);
566	aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
567}
568
569void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
570				struct hwtstamp_config *config)
571{
572	*config = aq_ptp->hwtstamp_config;
573}
574
575static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
576{
577	aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
578			       HW_ATL_RX_ENABLE_CMP_PROT_L4 |
579			       HW_ATL_RX_UDP |
580			       HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
581			       HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
582			       HW_ATL_RX_ENABLE_QUEUE_L3L4 |
583			       aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
584	aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
585
586	aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
587	aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
588}
589
590int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
591			       struct hwtstamp_config *config)
592{
593	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
594	const struct aq_hw_ops *hw_ops;
595	int err = 0;
596
597	hw_ops = aq_nic->aq_hw_ops;
598	if (config->tx_type == HWTSTAMP_TX_ON ||
599	    config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
600		aq_ptp_prepare_filters(aq_ptp);
601		if (hw_ops->hw_filter_l3l4_set) {
602			err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
603							 &aq_ptp->udp_filter);
604		}
605		if (!err && hw_ops->hw_filter_l2_set) {
606			err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
607						       &aq_ptp->eth_type_filter);
608		}
609		aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
610	} else {
611		aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
612		if (hw_ops->hw_filter_l3l4_set) {
613			err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
614							 &aq_ptp->udp_filter);
615		}
616		if (!err && hw_ops->hw_filter_l2_clear) {
617			err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
618							&aq_ptp->eth_type_filter);
619		}
620		aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
621	}
622
623	if (err)
624		return -EREMOTEIO;
625
626	aq_ptp->hwtstamp_config = *config;
627
628	return 0;
629}
630
631bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
632{
633	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
634
635	if (!aq_ptp)
636		return false;
637
638	return &aq_ptp->ptp_tx == ring ||
639	       &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
640}
641
642u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
643		      unsigned int len)
644{
645	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
646	u64 timestamp = 0;
647	u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
648						   p, len, &timestamp);
649
650	if (ret > 0)
651		aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
652
653	return ret;
654}
655
656static int aq_ptp_poll(struct napi_struct *napi, int budget)
657{
658	struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
659	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
660	bool was_cleaned = false;
661	int work_done = 0;
662	int err;
663
664	/* Processing PTP TX traffic */
665	err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
666							&aq_ptp->ptp_tx);
667	if (err < 0)
668		goto err_exit;
669
670	if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
671		aq_ring_tx_clean(&aq_ptp->ptp_tx);
672
673		was_cleaned = true;
674	}
675
676	/* Processing HW_TIMESTAMP RX traffic */
677	err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
678							 &aq_ptp->hwts_rx);
679	if (err < 0)
680		goto err_exit;
681
682	if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
683		aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
684
685		err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
686							      &aq_ptp->hwts_rx);
687		if (err < 0)
688			goto err_exit;
689
690		was_cleaned = true;
691	}
692
693	/* Processing PTP RX traffic */
694	err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
695						    &aq_ptp->ptp_rx);
696	if (err < 0)
697		goto err_exit;
698
699	if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
700		unsigned int sw_tail_old;
701
702		err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
703		if (err < 0)
704			goto err_exit;
705
706		sw_tail_old = aq_ptp->ptp_rx.sw_tail;
707		err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
708		if (err < 0)
709			goto err_exit;
710
711		err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
712							 &aq_ptp->ptp_rx,
713							 sw_tail_old);
714		if (err < 0)
715			goto err_exit;
716	}
717
718	if (was_cleaned)
719		work_done = budget;
720
721	if (work_done < budget) {
722		napi_complete_done(napi, work_done);
723		aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
724					BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
725	}
726
727err_exit:
728	return work_done;
729}
730
731static irqreturn_t aq_ptp_isr(int irq, void *private)
732{
733	struct aq_ptp_s *aq_ptp = private;
734	int err = 0;
735
736	if (!aq_ptp) {
737		err = -EINVAL;
738		goto err_exit;
739	}
740	napi_schedule(&aq_ptp->napi);
741
742err_exit:
743	return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
744}
745
746int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
747{
748	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
749	struct aq_ring_s *ring = &aq_ptp->ptp_tx;
750	unsigned long irq_flags;
751	int err = NETDEV_TX_OK;
752	unsigned int frags;
753
754	if (skb->len <= 0) {
755		dev_kfree_skb_any(skb);
756		goto err_exit;
757	}
758
759	frags = skb_shinfo(skb)->nr_frags + 1;
760	/* Frags cannot be bigger 16KB
761	 * because PTP usually works
762	 * without Jumbo even in a background
763	 */
764	if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
765		/* Drop packet because it doesn't make sence to delay it */
766		dev_kfree_skb_any(skb);
767		goto err_exit;
768	}
769
770	err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
771	if (err) {
772		netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
773			   ring->size);
774		return NETDEV_TX_BUSY;
775	}
776	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
777	aq_ptp_tx_timeout_start(aq_ptp);
778	skb_tx_timestamp(skb);
779
780	spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
781	frags = aq_nic_map_skb(aq_nic, skb, ring);
782
783	if (likely(frags)) {
784		err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
785						       ring, frags);
786		if (err >= 0) {
787			u64_stats_update_begin(&ring->stats.tx.syncp);
788			++ring->stats.tx.packets;
789			ring->stats.tx.bytes += skb->len;
790			u64_stats_update_end(&ring->stats.tx.syncp);
791		}
792	} else {
793		err = NETDEV_TX_BUSY;
794	}
795	spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
796
797err_exit:
798	return err;
799}
800
801void aq_ptp_service_task(struct aq_nic_s *aq_nic)
802{
803	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
804
805	if (!aq_ptp)
806		return;
807
808	aq_ptp_tx_timeout_check(aq_ptp);
809}
810
811int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
812{
813	struct pci_dev *pdev = aq_nic->pdev;
814	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
815	int err = 0;
816
817	if (!aq_ptp)
818		return 0;
819
820	if (pdev->msix_enabled || pdev->msi_enabled) {
821		err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
822				  aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
823	} else {
824		err = -EINVAL;
825		goto err_exit;
826	}
827
828err_exit:
829	return err;
830}
831
832void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
833{
834	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
835	struct pci_dev *pdev = aq_nic->pdev;
836
837	if (!aq_ptp)
838		return;
839
840	free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
841}
842
843int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
844{
845	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
846	int err = 0;
847
848	if (!aq_ptp)
849		return 0;
850
851	err = aq_ring_init(&aq_ptp->ptp_tx, ATL_RING_TX);
852	if (err < 0)
853		goto err_exit;
854	err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
855						 &aq_ptp->ptp_tx,
856						 &aq_ptp->ptp_ring_param);
857	if (err < 0)
858		goto err_exit;
859
860	err = aq_ring_init(&aq_ptp->ptp_rx, ATL_RING_RX);
861	if (err < 0)
862		goto err_exit;
863	err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
864						 &aq_ptp->ptp_rx,
865						 &aq_ptp->ptp_ring_param);
866	if (err < 0)
867		goto err_exit;
868
869	err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
870	if (err < 0)
871		goto err_rx_free;
872	err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
873						 &aq_ptp->ptp_rx,
874						 0U);
875	if (err < 0)
876		goto err_rx_free;
877
878	err = aq_ring_init(&aq_ptp->hwts_rx, ATL_RING_RX);
879	if (err < 0)
880		goto err_rx_free;
881	err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
882						 &aq_ptp->hwts_rx,
883						 &aq_ptp->ptp_ring_param);
884	if (err < 0)
885		goto err_exit;
886	err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
887						      &aq_ptp->hwts_rx);
888	if (err < 0)
889		goto err_exit;
890
891	return err;
892
893err_rx_free:
894	aq_ring_rx_deinit(&aq_ptp->ptp_rx);
895err_exit:
896	return err;
897}
898
899int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
900{
901	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
902	int err = 0;
903
904	if (!aq_ptp)
905		return 0;
906
907	err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
908	if (err < 0)
909		goto err_exit;
910
911	err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
912	if (err < 0)
913		goto err_exit;
914
915	err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
916						  &aq_ptp->hwts_rx);
917	if (err < 0)
918		goto err_exit;
919
920	napi_enable(&aq_ptp->napi);
921
922err_exit:
923	return err;
924}
925
926void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
927{
928	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
929
930	if (!aq_ptp)
931		return;
932
933	aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
934	aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
935
936	aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
937
938	napi_disable(&aq_ptp->napi);
939}
940
941void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
942{
943	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
944
945	if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
946		return;
947
948	aq_ring_tx_clean(&aq_ptp->ptp_tx);
949	aq_ring_rx_deinit(&aq_ptp->ptp_rx);
950}
951
952int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
953{
954	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
955	unsigned int tx_ring_idx, rx_ring_idx;
956	struct aq_ring_s *hwts;
957	struct aq_ring_s *ring;
958	int err;
959
960	if (!aq_ptp)
961		return 0;
962
963	tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
964
965	ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
966				tx_ring_idx, &aq_nic->aq_nic_cfg);
967	if (!ring) {
968		err = -ENOMEM;
969		goto err_exit;
970	}
971
972	rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
973
974	ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
975				rx_ring_idx, &aq_nic->aq_nic_cfg);
976	if (!ring) {
977		err = -ENOMEM;
978		goto err_exit_ptp_tx;
979	}
980
981	hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
982				     aq_nic->aq_nic_cfg.rxds,
983				     aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
984	if (!hwts) {
985		err = -ENOMEM;
986		goto err_exit_ptp_rx;
987	}
988
989	err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
990	if (err != 0) {
991		err = -ENOMEM;
992		goto err_exit_hwts_rx;
993	}
994
995	aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
996	aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
997			aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
998	cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
999			&aq_ptp->ptp_ring_param.affinity_mask);
1000
1001	return 0;
1002
1003err_exit_hwts_rx:
1004	aq_ring_free(&aq_ptp->hwts_rx);
1005err_exit_ptp_rx:
1006	aq_ring_free(&aq_ptp->ptp_rx);
1007err_exit_ptp_tx:
1008	aq_ring_free(&aq_ptp->ptp_tx);
1009err_exit:
1010	return err;
1011}
1012
1013void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
1014{
1015	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1016
1017	if (!aq_ptp)
1018		return;
1019
1020	aq_ring_free(&aq_ptp->ptp_tx);
1021	aq_ring_free(&aq_ptp->ptp_rx);
1022	aq_ring_free(&aq_ptp->hwts_rx);
1023
1024	aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
1025}
1026
1027#define MAX_PTP_GPIO_COUNT 4
1028
1029static struct ptp_clock_info aq_ptp_clock = {
1030	.owner		= THIS_MODULE,
1031	.name		= "atlantic ptp",
1032	.max_adj	= 999999999,
1033	.n_ext_ts	= 0,
1034	.pps		= 0,
1035	.adjfine	= aq_ptp_adjfine,
1036	.adjtime	= aq_ptp_adjtime,
1037	.gettime64	= aq_ptp_gettime,
1038	.settime64	= aq_ptp_settime,
1039	.n_per_out	= 0,
1040	.enable		= aq_ptp_gpio_feature_enable,
1041	.n_pins		= 0,
1042	.verify		= aq_ptp_verify,
1043	.pin_config	= NULL,
1044};
1045
1046#define ptp_offset_init(__idx, __mbps, __egress, __ingress)   do { \
1047		ptp_offset[__idx].mbps = (__mbps); \
1048		ptp_offset[__idx].egress = (__egress); \
1049		ptp_offset[__idx].ingress = (__ingress); } \
1050		while (0)
1051
1052static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
1053{
1054	int i;
1055
1056	/* Load offsets for PTP */
1057	for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
1058		switch (i) {
1059		/* 100M */
1060		case ptp_offset_idx_100:
1061			ptp_offset_init(i, 100,
1062					offsets->egress_100,
1063					offsets->ingress_100);
1064			break;
1065		/* 1G */
1066		case ptp_offset_idx_1000:
1067			ptp_offset_init(i, 1000,
1068					offsets->egress_1000,
1069					offsets->ingress_1000);
1070			break;
1071		/* 2.5G */
1072		case ptp_offset_idx_2500:
1073			ptp_offset_init(i, 2500,
1074					offsets->egress_2500,
1075					offsets->ingress_2500);
1076			break;
1077		/* 5G */
1078		case ptp_offset_idx_5000:
1079			ptp_offset_init(i, 5000,
1080					offsets->egress_5000,
1081					offsets->ingress_5000);
1082			break;
1083		/* 10G */
1084		case ptp_offset_idx_10000:
1085			ptp_offset_init(i, 10000,
1086					offsets->egress_10000,
1087					offsets->ingress_10000);
1088			break;
1089		}
1090	}
1091}
1092
1093static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
1094{
1095	memset(ptp_offset, 0, sizeof(ptp_offset));
1096
1097	aq_ptp_offset_init_from_fw(offsets);
1098}
1099
1100static void aq_ptp_gpio_init(struct ptp_clock_info *info,
1101			     struct hw_atl_info *hw_info)
1102{
1103	struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
1104	u32 extts_pin_cnt = 0;
1105	u32 out_pin_cnt = 0;
1106	u32 i;
1107
1108	memset(pin_desc, 0, sizeof(pin_desc));
1109
1110	for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
1111		if (hw_info->gpio_pin[i] ==
1112		    (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
1113			snprintf(pin_desc[out_pin_cnt].name,
1114				 sizeof(pin_desc[out_pin_cnt].name),
1115				 "AQ_GPIO%d", i);
1116			pin_desc[out_pin_cnt].index = out_pin_cnt;
1117			pin_desc[out_pin_cnt].chan = out_pin_cnt;
1118			pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
1119		}
1120	}
1121
1122	info->n_per_out = out_pin_cnt;
1123
1124	if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
1125		extts_pin_cnt += 1;
1126
1127		snprintf(pin_desc[out_pin_cnt].name,
1128			 sizeof(pin_desc[out_pin_cnt].name),
1129			  "AQ_GPIO%d", out_pin_cnt);
1130		pin_desc[out_pin_cnt].index = out_pin_cnt;
1131		pin_desc[out_pin_cnt].chan = 0;
1132		pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
1133	}
1134
1135	info->n_pins = out_pin_cnt + extts_pin_cnt;
1136	info->n_ext_ts = extts_pin_cnt;
1137
1138	if (!info->n_pins)
1139		return;
1140
1141	info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
1142				   GFP_KERNEL);
1143
1144	if (!info->pin_config)
1145		return;
1146
1147	memcpy(info->pin_config, &pin_desc,
1148	       sizeof(struct ptp_pin_desc) * info->n_pins);
1149}
1150
1151void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
1152{
1153	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1154	struct timespec64 ts;
1155
1156	ktime_get_real_ts64(&ts);
1157	aq_ptp_settime(&aq_ptp->ptp_info, &ts);
1158}
1159
1160static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
1161
1162int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
1163{
1164	bool a1_ptp = ATL_HW_IS_CHIP_FEATURE(aq_nic->aq_hw, ATLANTIC);
1165	struct hw_atl_utils_mbox mbox;
1166	struct ptp_clock *clock;
1167	struct aq_ptp_s *aq_ptp;
1168	int err = 0;
1169
1170	if (!a1_ptp) {
1171		aq_nic->aq_ptp = NULL;
1172		return 0;
1173	}
1174
1175	if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
1176		aq_nic->aq_ptp = NULL;
1177		return 0;
1178	}
1179
1180	if (!aq_nic->aq_fw_ops->enable_ptp) {
1181		aq_nic->aq_ptp = NULL;
1182		return 0;
1183	}
1184
1185	hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
1186
1187	if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
1188		aq_nic->aq_ptp = NULL;
1189		return 0;
1190	}
1191
1192	aq_ptp_offset_init(&mbox.info.ptp_offset);
1193
1194	aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
1195	if (!aq_ptp) {
1196		err = -ENOMEM;
1197		goto err_exit;
1198	}
1199
1200	aq_ptp->aq_nic = aq_nic;
1201	aq_ptp->a1_ptp = a1_ptp;
1202
1203	spin_lock_init(&aq_ptp->ptp_lock);
1204	spin_lock_init(&aq_ptp->ptp_ring_lock);
1205
1206	aq_ptp->ptp_info = aq_ptp_clock;
1207	aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
1208	clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
1209	if (IS_ERR(clock)) {
1210		netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
1211		err = PTR_ERR(clock);
1212		goto err_exit;
1213	}
1214	aq_ptp->ptp_clock = clock;
1215	aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
1216
1217	atomic_set(&aq_ptp->offset_egress, 0);
1218	atomic_set(&aq_ptp->offset_ingress, 0);
1219
1220	netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
1221		       aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
1222
1223	aq_ptp->idx_vector = idx_vec;
1224
1225	aq_nic->aq_ptp = aq_ptp;
1226
1227	/* enable ptp counter */
1228	aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
1229	mutex_lock(&aq_nic->fwreq_mutex);
1230	aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
1231	aq_ptp_clock_init(aq_nic);
1232	mutex_unlock(&aq_nic->fwreq_mutex);
1233
1234	INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
1235	aq_ptp->eth_type_filter.location =
1236			aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
1237	aq_ptp->udp_filter.location =
1238			aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
1239
1240	return 0;
1241
1242err_exit:
1243	if (aq_ptp)
1244		kfree(aq_ptp->ptp_info.pin_config);
1245	kfree(aq_ptp);
1246	aq_nic->aq_ptp = NULL;
1247	return err;
1248}
1249
1250void aq_ptp_unregister(struct aq_nic_s *aq_nic)
1251{
1252	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1253
1254	if (!aq_ptp)
1255		return;
1256
1257	ptp_clock_unregister(aq_ptp->ptp_clock);
1258}
1259
1260void aq_ptp_free(struct aq_nic_s *aq_nic)
1261{
1262	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1263
1264	if (!aq_ptp)
1265		return;
1266
1267	aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
1268			      aq_ptp->eth_type_filter.location);
1269	aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
1270			      aq_ptp->udp_filter.location);
1271	cancel_delayed_work_sync(&aq_ptp->poll_sync);
1272	/* disable ptp */
1273	mutex_lock(&aq_nic->fwreq_mutex);
1274	aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
1275	mutex_unlock(&aq_nic->fwreq_mutex);
1276
1277	kfree(aq_ptp->ptp_info.pin_config);
1278
1279	netif_napi_del(&aq_ptp->napi);
1280	kfree(aq_ptp);
1281	aq_nic->aq_ptp = NULL;
1282}
1283
1284struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
1285{
1286	return aq_ptp->ptp_clock;
1287}
1288
1289/* PTP external GPIO nanoseconds count */
1290static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
1291{
1292	u64 ts = 0;
1293
1294	if (aq_nic->aq_hw_ops->hw_get_sync_ts)
1295		aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
1296
1297	return ts;
1298}
1299
1300static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
1301{
1302	if (aq_ptp->extts_pin_enabled) {
1303		aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
1304		aq_ptp->last_sync1588_ts =
1305				aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
1306		schedule_delayed_work(&aq_ptp->poll_sync,
1307				      msecs_to_jiffies(aq_ptp->poll_timeout_ms));
1308	}
1309}
1310
1311int aq_ptp_link_change(struct aq_nic_s *aq_nic)
1312{
1313	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1314
1315	if (!aq_ptp)
1316		return 0;
1317
1318	if (aq_nic->aq_hw->aq_link_status.mbps)
1319		aq_ptp_start_work(aq_ptp);
1320	else
1321		cancel_delayed_work_sync(&aq_ptp->poll_sync);
1322
1323	return 0;
1324}
1325
1326static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
1327{
1328	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
1329	u64 sync_ts2;
1330	u64 sync_ts;
1331
1332	sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
1333
1334	if (sync_ts != aq_ptp->last_sync1588_ts) {
1335		sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
1336		if (sync_ts != sync_ts2) {
1337			sync_ts = sync_ts2;
1338			sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
1339			if (sync_ts != sync_ts2) {
1340				netdev_err(aq_nic->ndev,
1341					   "%s: Unable to get correct GPIO TS",
1342					   __func__);
1343				sync_ts = 0;
1344			}
1345		}
1346
1347		*new_ts = sync_ts;
1348		return true;
1349	}
1350	return false;
1351}
1352
1353static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
1354{
1355	struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
1356	u64 sync_ts;
1357
1358	 /* Sync1588 pin was triggered */
1359	if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
1360		if (aq_ptp->extts_pin_enabled) {
1361			struct ptp_clock_event ptp_event;
1362			u64 time = 0;
1363
1364			aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
1365							      sync_ts, &time);
1366			ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
1367			ptp_event.timestamp = time;
1368
1369			ptp_event.type = PTP_CLOCK_EXTTS;
1370			ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
1371		}
1372
1373		aq_ptp->last_sync1588_ts = sync_ts;
1374	}
1375
1376	return 0;
1377}
1378
1379static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
1380{
1381	struct delayed_work *dw = to_delayed_work(w);
1382	struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
1383
1384	aq_ptp_check_sync1588(aq_ptp);
1385
1386	if (aq_ptp->extts_pin_enabled) {
1387		unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
1388
1389		schedule_delayed_work(&aq_ptp->poll_sync, timeout);
1390	}
1391}
1392
1393int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type)
1394{
1395	if (!aq_nic->aq_ptp)
1396		return 0;
1397
1398	/* Additional RX ring is allocated for PTP HWTS on A1 */
1399	return (aq_nic->aq_ptp->a1_ptp && ring_type == ATL_RING_RX) ? 2 : 1;
1400}
1401
1402u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data)
1403{
1404	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1405	unsigned int count = 0U;
1406
1407	if (!aq_ptp)
1408		return data;
1409
1410	count = aq_ring_fill_stats_data(&aq_ptp->ptp_rx, data);
1411	data += count;
1412	count = aq_ring_fill_stats_data(&aq_ptp->ptp_tx, data);
1413	data += count;
1414
1415	if (aq_ptp->a1_ptp) {
1416		/* Only Receive ring for HWTS */
1417		count = aq_ring_fill_stats_data(&aq_ptp->hwts_rx, data);
1418		data += count;
1419	}
1420
1421	return data;
1422}
1423
1424#endif
1425