1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
4 *
5 * Copyright (C) 2012 Freescale Semiconductor, Inc.
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/ptrace.h>
14#include <linux/errno.h>
15#include <linux/ioport.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/skbuff.h>
23#include <linux/spinlock.h>
24#include <linux/workqueue.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/clk.h>
29#include <linux/platform_device.h>
30#include <linux/phy.h>
31#include <linux/fec.h>
32#include <linux/of.h>
33#include <linux/of_device.h>
34#include <linux/of_gpio.h>
35#include <linux/of_net.h>
36
37#include "fec.h"
38
39/* FEC 1588 register bits */
40#define FEC_T_CTRL_SLAVE                0x00002000
41#define FEC_T_CTRL_CAPTURE              0x00000800
42#define FEC_T_CTRL_RESTART              0x00000200
43#define FEC_T_CTRL_PERIOD_RST           0x00000030
44#define FEC_T_CTRL_PERIOD_EN		0x00000010
45#define FEC_T_CTRL_ENABLE               0x00000001
46
47#define FEC_T_INC_MASK                  0x0000007f
48#define FEC_T_INC_OFFSET                0
49#define FEC_T_INC_CORR_MASK             0x00007f00
50#define FEC_T_INC_CORR_OFFSET           8
51
52#define FEC_T_CTRL_PINPER		0x00000080
53#define FEC_T_TF0_MASK			0x00000001
54#define FEC_T_TF0_OFFSET		0
55#define FEC_T_TF1_MASK			0x00000002
56#define FEC_T_TF1_OFFSET		1
57#define FEC_T_TF2_MASK			0x00000004
58#define FEC_T_TF2_OFFSET		2
59#define FEC_T_TF3_MASK			0x00000008
60#define FEC_T_TF3_OFFSET		3
61#define FEC_T_TDRE_MASK			0x00000001
62#define FEC_T_TDRE_OFFSET		0
63#define FEC_T_TMODE_MASK		0x0000003C
64#define FEC_T_TMODE_OFFSET		2
65#define FEC_T_TIE_MASK			0x00000040
66#define FEC_T_TIE_OFFSET		6
67#define FEC_T_TF_MASK			0x00000080
68#define FEC_T_TF_OFFSET			7
69
70#define FEC_ATIME_CTRL		0x400
71#define FEC_ATIME		0x404
72#define FEC_ATIME_EVT_OFFSET	0x408
73#define FEC_ATIME_EVT_PERIOD	0x40c
74#define FEC_ATIME_CORR		0x410
75#define FEC_ATIME_INC		0x414
76#define FEC_TS_TIMESTAMP	0x418
77
78#define FEC_TGSR		0x604
79#define FEC_TCSR(n)		(0x608 + n * 0x08)
80#define FEC_TCCR(n)		(0x60C + n * 0x08)
81#define MAX_TIMER_CHANNEL	3
82#define FEC_TMODE_TOGGLE	0x05
83#define FEC_HIGH_PULSE		0x0F
84
85#define FEC_CC_MULT	(1 << 31)
86#define FEC_COUNTER_PERIOD	(1 << 31)
87#define PPS_OUPUT_RELOAD_PERIOD	NSEC_PER_SEC
88#define FEC_CHANNLE_0		0
89#define DEFAULT_PPS_CHANNEL	FEC_CHANNLE_0
90
91/**
92 * fec_ptp_enable_pps
93 * @fep: the fec_enet_private structure handle
94 * @enable: enable the channel pps output
95 *
96 * This function enble the PPS ouput on the timer channel.
97 */
98static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
99{
100	unsigned long flags;
101	u32 val, tempval;
102	struct timespec64 ts;
103	u64 ns;
104	val = 0;
105
106	if (fep->pps_enable == enable)
107		return 0;
108
109	fep->pps_channel = DEFAULT_PPS_CHANNEL;
110	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
111
112	spin_lock_irqsave(&fep->tmreg_lock, flags);
113
114	if (enable) {
115		/* clear capture or output compare interrupt status if have.
116		 */
117		writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
118
119		/* It is recommended to double check the TMODE field in the
120		 * TCSR register to be cleared before the first compare counter
121		 * is written into TCCR register. Just add a double check.
122		 */
123		val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
124		do {
125			val &= ~(FEC_T_TMODE_MASK);
126			writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
127			val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
128		} while (val & FEC_T_TMODE_MASK);
129
130		/* Dummy read counter to update the counter */
131		timecounter_read(&fep->tc);
132		/* We want to find the first compare event in the next
133		 * second point. So we need to know what the ptp time
134		 * is now and how many nanoseconds is ahead to get next second.
135		 * The remaining nanosecond ahead before the next second would be
136		 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
137		 * to current timer would be next second.
138		 */
139		tempval = fep->cc.read(&fep->cc);
140		/* Convert the ptp local counter to 1588 timestamp */
141		ns = timecounter_cyc2time(&fep->tc, tempval);
142		ts = ns_to_timespec64(ns);
143
144		/* The tempval is  less than 3 seconds, and  so val is less than
145		 * 4 seconds. No overflow for 32bit calculation.
146		 */
147		val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
148
149		/* Need to consider the situation that the current time is
150		 * very close to the second point, which means NSEC_PER_SEC
151		 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
152		 * is still running when we calculate the first compare event, it is
153		 * possible that the remaining nanoseonds run out before the compare
154		 * counter is calculated and written into TCCR register. To avoid
155		 * this possibility, we will set the compare event to be the next
156		 * of next second. The current setting is 31-bit timer and wrap
157		 * around over 2 seconds. So it is okay to set the next of next
158		 * seond for the timer.
159		 */
160		val += NSEC_PER_SEC;
161
162		/* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
163		 * ptp counter, which maybe cause 32-bit wrap. Since the
164		 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
165		 * We can ensure the wrap will not cause issue. If the offset
166		 * is bigger than fep->cc.mask would be a error.
167		 */
168		val &= fep->cc.mask;
169		writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
170
171		/* Calculate the second the compare event timestamp */
172		fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
173
174		/* * Enable compare event when overflow */
175		val = readl(fep->hwp + FEC_ATIME_CTRL);
176		val |= FEC_T_CTRL_PINPER;
177		writel(val, fep->hwp + FEC_ATIME_CTRL);
178
179		/* Compare channel setting. */
180		val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
181		val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
182		val &= ~(1 << FEC_T_TDRE_OFFSET);
183		val &= ~(FEC_T_TMODE_MASK);
184		val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
185		writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
186
187		/* Write the second compare event timestamp and calculate
188		 * the third timestamp. Refer the TCCR register detail in the spec.
189		 */
190		writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
191		fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
192	} else {
193		writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
194	}
195
196	fep->pps_enable = enable;
197	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
198
199	return 0;
200}
201
202/**
203 * fec_ptp_read - read raw cycle counter (to be used by time counter)
204 * @cc: the cyclecounter structure
205 *
206 * this function reads the cyclecounter registers and is called by the
207 * cyclecounter structure used to construct a ns counter from the
208 * arbitrary fixed point registers
209 */
210static u64 fec_ptp_read(const struct cyclecounter *cc)
211{
212	struct fec_enet_private *fep =
213		container_of(cc, struct fec_enet_private, cc);
214	u32 tempval;
215
216	tempval = readl(fep->hwp + FEC_ATIME_CTRL);
217	tempval |= FEC_T_CTRL_CAPTURE;
218	writel(tempval, fep->hwp + FEC_ATIME_CTRL);
219
220	if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
221		udelay(1);
222
223	return readl(fep->hwp + FEC_ATIME);
224}
225
226/**
227 * fec_ptp_start_cyclecounter - create the cycle counter from hw
228 * @ndev: network device
229 *
230 * this function initializes the timecounter and cyclecounter
231 * structures for use in generated a ns counter from the arbitrary
232 * fixed point cycles registers in the hardware.
233 */
234void fec_ptp_start_cyclecounter(struct net_device *ndev)
235{
236	struct fec_enet_private *fep = netdev_priv(ndev);
237	unsigned long flags;
238	int inc;
239
240	inc = 1000000000 / fep->cycle_speed;
241
242	/* grab the ptp lock */
243	spin_lock_irqsave(&fep->tmreg_lock, flags);
244
245	/* 1ns counter */
246	writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
247
248	/* use 31-bit timer counter */
249	writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
250
251	writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
252		fep->hwp + FEC_ATIME_CTRL);
253
254	memset(&fep->cc, 0, sizeof(fep->cc));
255	fep->cc.read = fec_ptp_read;
256	fep->cc.mask = CLOCKSOURCE_MASK(31);
257	fep->cc.shift = 31;
258	fep->cc.mult = FEC_CC_MULT;
259
260	/* reset the ns time counter */
261	timecounter_init(&fep->tc, &fep->cc, 0);
262
263	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
264}
265
266/**
267 * fec_ptp_adjfreq - adjust ptp cycle frequency
268 * @ptp: the ptp clock structure
269 * @ppb: parts per billion adjustment from base
270 *
271 * Adjust the frequency of the ptp cycle counter by the
272 * indicated ppb from the base frequency.
273 *
274 * Because ENET hardware frequency adjust is complex,
275 * using software method to do that.
276 */
277static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
278{
279	unsigned long flags;
280	int neg_adj = 0;
281	u32 i, tmp;
282	u32 corr_inc, corr_period;
283	u32 corr_ns;
284	u64 lhs, rhs;
285
286	struct fec_enet_private *fep =
287	    container_of(ptp, struct fec_enet_private, ptp_caps);
288
289	if (ppb == 0)
290		return 0;
291
292	if (ppb < 0) {
293		ppb = -ppb;
294		neg_adj = 1;
295	}
296
297	/* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
298	 * Try to find the corr_inc  between 1 to fep->ptp_inc to
299	 * meet adjustment requirement.
300	 */
301	lhs = NSEC_PER_SEC;
302	rhs = (u64)ppb * (u64)fep->ptp_inc;
303	for (i = 1; i <= fep->ptp_inc; i++) {
304		if (lhs >= rhs) {
305			corr_inc = i;
306			corr_period = div_u64(lhs, rhs);
307			break;
308		}
309		lhs += NSEC_PER_SEC;
310	}
311	/* Not found? Set it to high value - double speed
312	 * correct in every clock step.
313	 */
314	if (i > fep->ptp_inc) {
315		corr_inc = fep->ptp_inc;
316		corr_period = 1;
317	}
318
319	if (neg_adj)
320		corr_ns = fep->ptp_inc - corr_inc;
321	else
322		corr_ns = fep->ptp_inc + corr_inc;
323
324	spin_lock_irqsave(&fep->tmreg_lock, flags);
325
326	tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
327	tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
328	writel(tmp, fep->hwp + FEC_ATIME_INC);
329	corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
330	writel(corr_period, fep->hwp + FEC_ATIME_CORR);
331	/* dummy read to update the timer. */
332	timecounter_read(&fep->tc);
333
334	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
335
336	return 0;
337}
338
339/**
340 * fec_ptp_adjtime
341 * @ptp: the ptp clock structure
342 * @delta: offset to adjust the cycle counter by
343 *
344 * adjust the timer by resetting the timecounter structure.
345 */
346static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
347{
348	struct fec_enet_private *fep =
349	    container_of(ptp, struct fec_enet_private, ptp_caps);
350	unsigned long flags;
351
352	spin_lock_irqsave(&fep->tmreg_lock, flags);
353	timecounter_adjtime(&fep->tc, delta);
354	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
355
356	return 0;
357}
358
359/**
360 * fec_ptp_gettime
361 * @ptp: the ptp clock structure
362 * @ts: timespec structure to hold the current time value
363 *
364 * read the timecounter and return the correct value on ns,
365 * after converting it into a struct timespec.
366 */
367static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
368{
369	struct fec_enet_private *adapter =
370	    container_of(ptp, struct fec_enet_private, ptp_caps);
371	u64 ns;
372	unsigned long flags;
373
374	mutex_lock(&adapter->ptp_clk_mutex);
375	/* Check the ptp clock */
376	if (!adapter->ptp_clk_on) {
377		mutex_unlock(&adapter->ptp_clk_mutex);
378		return -EINVAL;
379	}
380	spin_lock_irqsave(&adapter->tmreg_lock, flags);
381	ns = timecounter_read(&adapter->tc);
382	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
383	mutex_unlock(&adapter->ptp_clk_mutex);
384
385	*ts = ns_to_timespec64(ns);
386
387	return 0;
388}
389
390/**
391 * fec_ptp_settime
392 * @ptp: the ptp clock structure
393 * @ts: the timespec containing the new time for the cycle counter
394 *
395 * reset the timecounter to use a new base value instead of the kernel
396 * wall timer value.
397 */
398static int fec_ptp_settime(struct ptp_clock_info *ptp,
399			   const struct timespec64 *ts)
400{
401	struct fec_enet_private *fep =
402	    container_of(ptp, struct fec_enet_private, ptp_caps);
403
404	u64 ns;
405	unsigned long flags;
406	u32 counter;
407
408	mutex_lock(&fep->ptp_clk_mutex);
409	/* Check the ptp clock */
410	if (!fep->ptp_clk_on) {
411		mutex_unlock(&fep->ptp_clk_mutex);
412		return -EINVAL;
413	}
414
415	ns = timespec64_to_ns(ts);
416	/* Get the timer value based on timestamp.
417	 * Update the counter with the masked value.
418	 */
419	counter = ns & fep->cc.mask;
420
421	spin_lock_irqsave(&fep->tmreg_lock, flags);
422	writel(counter, fep->hwp + FEC_ATIME);
423	timecounter_init(&fep->tc, &fep->cc, ns);
424	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
425	mutex_unlock(&fep->ptp_clk_mutex);
426	return 0;
427}
428
429/**
430 * fec_ptp_enable
431 * @ptp: the ptp clock structure
432 * @rq: the requested feature to change
433 * @on: whether to enable or disable the feature
434 *
435 */
436static int fec_ptp_enable(struct ptp_clock_info *ptp,
437			  struct ptp_clock_request *rq, int on)
438{
439	struct fec_enet_private *fep =
440	    container_of(ptp, struct fec_enet_private, ptp_caps);
441	int ret = 0;
442
443	if (rq->type == PTP_CLK_REQ_PPS) {
444		ret = fec_ptp_enable_pps(fep, on);
445
446		return ret;
447	}
448	return -EOPNOTSUPP;
449}
450
451/**
452 * fec_ptp_disable_hwts - disable hardware time stamping
453 * @ndev: pointer to net_device
454 */
455void fec_ptp_disable_hwts(struct net_device *ndev)
456{
457	struct fec_enet_private *fep = netdev_priv(ndev);
458
459	fep->hwts_tx_en = 0;
460	fep->hwts_rx_en = 0;
461}
462
463int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
464{
465	struct fec_enet_private *fep = netdev_priv(ndev);
466
467	struct hwtstamp_config config;
468
469	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
470		return -EFAULT;
471
472	/* reserved for future extensions */
473	if (config.flags)
474		return -EINVAL;
475
476	switch (config.tx_type) {
477	case HWTSTAMP_TX_OFF:
478		fep->hwts_tx_en = 0;
479		break;
480	case HWTSTAMP_TX_ON:
481		fep->hwts_tx_en = 1;
482		break;
483	default:
484		return -ERANGE;
485	}
486
487	switch (config.rx_filter) {
488	case HWTSTAMP_FILTER_NONE:
489		fep->hwts_rx_en = 0;
490		break;
491
492	default:
493		fep->hwts_rx_en = 1;
494		config.rx_filter = HWTSTAMP_FILTER_ALL;
495		break;
496	}
497
498	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
499	    -EFAULT : 0;
500}
501
502int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
503{
504	struct fec_enet_private *fep = netdev_priv(ndev);
505	struct hwtstamp_config config;
506
507	config.flags = 0;
508	config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
509	config.rx_filter = (fep->hwts_rx_en ?
510			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
511
512	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
513		-EFAULT : 0;
514}
515
516/*
517 * fec_time_keep - call timecounter_read every second to avoid timer overrun
518 *                 because ENET just support 32bit counter, will timeout in 4s
519 */
520static void fec_time_keep(struct work_struct *work)
521{
522	struct delayed_work *dwork = to_delayed_work(work);
523	struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
524	unsigned long flags;
525
526	mutex_lock(&fep->ptp_clk_mutex);
527	if (fep->ptp_clk_on) {
528		spin_lock_irqsave(&fep->tmreg_lock, flags);
529		timecounter_read(&fep->tc);
530		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
531	}
532	mutex_unlock(&fep->ptp_clk_mutex);
533
534	schedule_delayed_work(&fep->time_keep, HZ);
535}
536
537/* This function checks the pps event and reloads the timer compare counter. */
538static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
539{
540	struct net_device *ndev = dev_id;
541	struct fec_enet_private *fep = netdev_priv(ndev);
542	u32 val;
543	u8 channel = fep->pps_channel;
544	struct ptp_clock_event event;
545
546	val = readl(fep->hwp + FEC_TCSR(channel));
547	if (val & FEC_T_TF_MASK) {
548		/* Write the next next compare(not the next according the spec)
549		 * value to the register
550		 */
551		writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
552		do {
553			writel(val, fep->hwp + FEC_TCSR(channel));
554		} while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
555
556		/* Update the counter; */
557		fep->next_counter = (fep->next_counter + fep->reload_period) &
558				fep->cc.mask;
559
560		event.type = PTP_CLOCK_PPS;
561		ptp_clock_event(fep->ptp_clock, &event);
562		return IRQ_HANDLED;
563	}
564
565	return IRQ_NONE;
566}
567
568/**
569 * fec_ptp_init
570 * @pdev: The FEC network adapter
571 * @irq_idx: the interrupt index
572 *
573 * This function performs the required steps for enabling ptp
574 * support. If ptp support has already been loaded it simply calls the
575 * cyclecounter init routine and exits.
576 */
577
578void fec_ptp_init(struct platform_device *pdev, int irq_idx)
579{
580	struct net_device *ndev = platform_get_drvdata(pdev);
581	struct fec_enet_private *fep = netdev_priv(ndev);
582	int irq;
583	int ret;
584
585	fep->ptp_caps.owner = THIS_MODULE;
586	strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
587
588	fep->ptp_caps.max_adj = 250000000;
589	fep->ptp_caps.n_alarm = 0;
590	fep->ptp_caps.n_ext_ts = 0;
591	fep->ptp_caps.n_per_out = 0;
592	fep->ptp_caps.n_pins = 0;
593	fep->ptp_caps.pps = 1;
594	fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
595	fep->ptp_caps.adjtime = fec_ptp_adjtime;
596	fep->ptp_caps.gettime64 = fec_ptp_gettime;
597	fep->ptp_caps.settime64 = fec_ptp_settime;
598	fep->ptp_caps.enable = fec_ptp_enable;
599
600	fep->cycle_speed = clk_get_rate(fep->clk_ptp);
601	if (!fep->cycle_speed) {
602		fep->cycle_speed = NSEC_PER_SEC;
603		dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
604	}
605	fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
606
607	spin_lock_init(&fep->tmreg_lock);
608
609	fec_ptp_start_cyclecounter(ndev);
610
611	INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
612
613	irq = platform_get_irq_byname_optional(pdev, "pps");
614	if (irq < 0)
615		irq = platform_get_irq_optional(pdev, irq_idx);
616	/* Failure to get an irq is not fatal,
617	 * only the PTP_CLOCK_PPS clock events should stop
618	 */
619	if (irq >= 0) {
620		ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
621				       0, pdev->name, ndev);
622		if (ret < 0)
623			dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
624				 ret);
625	}
626
627	fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
628	if (IS_ERR(fep->ptp_clock)) {
629		fep->ptp_clock = NULL;
630		dev_err(&pdev->dev, "ptp_clock_register failed\n");
631	}
632
633	schedule_delayed_work(&fep->time_keep, HZ);
634}
635
636void fec_ptp_stop(struct platform_device *pdev)
637{
638	struct net_device *ndev = platform_get_drvdata(pdev);
639	struct fec_enet_private *fep = netdev_priv(ndev);
640
641	cancel_delayed_work_sync(&fep->time_keep);
642	if (fep->ptp_clock)
643		ptp_clock_unregister(fep->ptp_clock);
644}
645