2 * Driver for the National Semiconductor DP83640 PHYTER
4 * Copyright (C) 2010 OMICRON electronics GmbH
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/crc32.h>
24 #include <linux/ethtool.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/mii.h>
28 #include <linux/module.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_vlan.h>
32 #include <linux/phy.h>
33 #include <linux/ptp_classify.h>
34 #include <linux/ptp_clock_kernel.h>
36 #include "dp83640_reg.h"
38 #define DP83640_PHY_ID 0x20005ce1
44 #define PSF_EVNT 0x4000
50 #define DP83640_N_PINS 12
52 #define MII_DP83640_MICR 0x11
53 #define MII_DP83640_MISR 0x12
55 #define MII_DP83640_MICR_OE 0x1
56 #define MII_DP83640_MICR_IE 0x2
58 #define MII_DP83640_MISR_RHF_INT_EN 0x01
59 #define MII_DP83640_MISR_FHF_INT_EN 0x02
60 #define MII_DP83640_MISR_ANC_INT_EN 0x04
61 #define MII_DP83640_MISR_DUP_INT_EN 0x08
62 #define MII_DP83640_MISR_SPD_INT_EN 0x10
63 #define MII_DP83640_MISR_LINK_INT_EN 0x20
64 #define MII_DP83640_MISR_ED_INT_EN 0x40
65 #define MII_DP83640_MISR_LQ_INT_EN 0x80
67 /* phyter seems to miss the mark by 16 ns */
68 #define ADJTIME_FIX 16
70 #define SKB_TIMESTAMP_TIMEOUT 2 /* jiffies */
72 #if defined(__BIG_ENDIAN)
74 #elif defined(__LITTLE_ENDIAN)
75 #define ENDIAN_FLAG PSF_ENDIAN
78 struct dp83640_skb_info
{
84 u16 ns_lo
; /* ns[15:0] */
85 u16 ns_hi
; /* overflow[1:0], ns[29:16] */
86 u16 sec_lo
; /* sec[15:0] */
87 u16 sec_hi
; /* sec[31:16] */
88 u16 seqid
; /* sequenceId[15:0] */
89 u16 msgtype
; /* messageType[3:0], hash[11:0] */
93 u16 ns_lo
; /* ns[15:0] */
94 u16 ns_hi
; /* overflow[1:0], ns[29:16] */
95 u16 sec_lo
; /* sec[15:0] */
96 u16 sec_hi
; /* sec[31:16] */
100 struct list_head list
;
108 struct dp83640_clock
;
110 struct dp83640_private
{
111 struct list_head list
;
112 struct dp83640_clock
*clock
;
113 struct phy_device
*phydev
;
114 struct delayed_work ts_work
;
119 /* remember state of cfg0 during calibration */
121 /* remember the last event time stamp */
122 struct phy_txts edata
;
123 /* list of rx timestamps */
124 struct list_head rxts
;
125 struct list_head rxpool
;
126 struct rxts rx_pool_data
[MAX_RXTS
];
127 /* protects above three fields from concurrent access */
129 /* queues of incoming and outgoing packets */
130 struct sk_buff_head rx_queue
;
131 struct sk_buff_head tx_queue
;
134 struct dp83640_clock
{
135 /* keeps the instance in the 'phyter_clocks' list */
136 struct list_head list
;
137 /* we create one clock instance per MII bus */
139 /* protects extended registers from concurrent access */
140 struct mutex extreg_lock
;
141 /* remembers which page was last selected */
143 /* our advertised capabilities */
144 struct ptp_clock_info caps
;
145 /* protects the three fields below from concurrent access */
146 struct mutex clock_lock
;
147 /* the one phyter from which we shall read */
148 struct dp83640_private
*chosen
;
149 /* list of the other attached phyters, not chosen */
150 struct list_head phylist
;
151 /* reference to our PTP hardware clock */
152 struct ptp_clock
*ptp_clock
;
169 static int chosen_phy
= -1;
170 static ushort gpio_tab
[GPIO_TABLE_SIZE
] = {
171 1, 2, 3, 4, 8, 9, 10, 11
174 module_param(chosen_phy
, int, 0444);
175 module_param_array(gpio_tab
, ushort
, NULL
, 0444);
177 MODULE_PARM_DESC(chosen_phy
, \
178 "The address of the PHY to use for the ancillary clock features");
179 MODULE_PARM_DESC(gpio_tab
, \
180 "Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
182 static void dp83640_gpio_defaults(struct ptp_pin_desc
*pd
)
186 for (i
= 0; i
< DP83640_N_PINS
; i
++) {
187 snprintf(pd
[i
].name
, sizeof(pd
[i
].name
), "GPIO%d", 1 + i
);
191 for (i
= 0; i
< GPIO_TABLE_SIZE
; i
++) {
192 if (gpio_tab
[i
] < 1 || gpio_tab
[i
] > DP83640_N_PINS
) {
193 pr_err("gpio_tab[%d]=%hu out of range", i
, gpio_tab
[i
]);
198 index
= gpio_tab
[CALIBRATE_GPIO
] - 1;
199 pd
[index
].func
= PTP_PF_PHYSYNC
;
202 index
= gpio_tab
[PEROUT_GPIO
] - 1;
203 pd
[index
].func
= PTP_PF_PEROUT
;
206 for (i
= EXTTS0_GPIO
; i
< GPIO_TABLE_SIZE
; i
++) {
207 index
= gpio_tab
[i
] - 1;
208 pd
[index
].func
= PTP_PF_EXTTS
;
209 pd
[index
].chan
= i
- EXTTS0_GPIO
;
213 /* a list of clocks and a mutex to protect it */
214 static LIST_HEAD(phyter_clocks
);
215 static DEFINE_MUTEX(phyter_clocks_lock
);
217 static void rx_timestamp_work(struct work_struct
*work
);
219 /* extended register access functions */
221 #define BROADCAST_ADDR 31
223 static inline int broadcast_write(struct phy_device
*phydev
, u32 regnum
,
226 return mdiobus_write(phydev
->mdio
.bus
, BROADCAST_ADDR
, regnum
, val
);
229 /* Caller must hold extreg_lock. */
230 static int ext_read(struct phy_device
*phydev
, int page
, u32 regnum
)
232 struct dp83640_private
*dp83640
= phydev
->priv
;
235 if (dp83640
->clock
->page
!= page
) {
236 broadcast_write(phydev
, PAGESEL
, page
);
237 dp83640
->clock
->page
= page
;
239 val
= phy_read(phydev
, regnum
);
244 /* Caller must hold extreg_lock. */
245 static void ext_write(int broadcast
, struct phy_device
*phydev
,
246 int page
, u32 regnum
, u16 val
)
248 struct dp83640_private
*dp83640
= phydev
->priv
;
250 if (dp83640
->clock
->page
!= page
) {
251 broadcast_write(phydev
, PAGESEL
, page
);
252 dp83640
->clock
->page
= page
;
255 broadcast_write(phydev
, regnum
, val
);
257 phy_write(phydev
, regnum
, val
);
260 /* Caller must hold extreg_lock. */
261 static int tdr_write(int bc
, struct phy_device
*dev
,
262 const struct timespec64
*ts
, u16 cmd
)
264 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_nsec
& 0xffff);/* ns[15:0] */
265 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_nsec
>> 16); /* ns[31:16] */
266 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_sec
& 0xffff); /* sec[15:0] */
267 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_sec
>> 16); /* sec[31:16]*/
269 ext_write(bc
, dev
, PAGE4
, PTP_CTL
, cmd
);
274 /* convert phy timestamps into driver timestamps */
276 static void phy2rxts(struct phy_rxts
*p
, struct rxts
*rxts
)
281 sec
|= p
->sec_hi
<< 16;
284 rxts
->ns
|= (p
->ns_hi
& 0x3fff) << 16;
285 rxts
->ns
+= ((u64
)sec
) * 1000000000ULL;
286 rxts
->seqid
= p
->seqid
;
287 rxts
->msgtype
= (p
->msgtype
>> 12) & 0xf;
288 rxts
->hash
= p
->msgtype
& 0x0fff;
289 rxts
->tmo
= jiffies
+ SKB_TIMESTAMP_TIMEOUT
;
292 static u64
phy2txts(struct phy_txts
*p
)
298 sec
|= p
->sec_hi
<< 16;
301 ns
|= (p
->ns_hi
& 0x3fff) << 16;
302 ns
+= ((u64
)sec
) * 1000000000ULL;
307 static int periodic_output(struct dp83640_clock
*clock
,
308 struct ptp_clock_request
*clkreq
, bool on
,
311 struct dp83640_private
*dp83640
= clock
->chosen
;
312 struct phy_device
*phydev
= dp83640
->phydev
;
313 u32 sec
, nsec
, pwidth
;
314 u16 gpio
, ptp_trig
, val
;
317 gpio
= 1 + ptp_find_pin(clock
->ptp_clock
, PTP_PF_PEROUT
,
326 (trigger
& TRIG_CSEL_MASK
) << TRIG_CSEL_SHIFT
|
327 (gpio
& TRIG_GPIO_MASK
) << TRIG_GPIO_SHIFT
|
331 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
335 mutex_lock(&clock
->extreg_lock
);
336 ext_write(0, phydev
, PAGE5
, PTP_TRIG
, ptp_trig
);
337 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
338 mutex_unlock(&clock
->extreg_lock
);
342 sec
= clkreq
->perout
.start
.sec
;
343 nsec
= clkreq
->perout
.start
.nsec
;
344 pwidth
= clkreq
->perout
.period
.sec
* 1000000000UL;
345 pwidth
+= clkreq
->perout
.period
.nsec
;
348 mutex_lock(&clock
->extreg_lock
);
350 ext_write(0, phydev
, PAGE5
, PTP_TRIG
, ptp_trig
);
354 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
355 ext_write(0, phydev
, PAGE4
, PTP_TDR
, nsec
& 0xffff); /* ns[15:0] */
356 ext_write(0, phydev
, PAGE4
, PTP_TDR
, nsec
>> 16); /* ns[31:16] */
357 ext_write(0, phydev
, PAGE4
, PTP_TDR
, sec
& 0xffff); /* sec[15:0] */
358 ext_write(0, phydev
, PAGE4
, PTP_TDR
, sec
>> 16); /* sec[31:16] */
359 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
& 0xffff); /* ns[15:0] */
360 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
>> 16); /* ns[31:16] */
361 /* Triggers 0 and 1 has programmable pulsewidth2 */
363 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
& 0xffff);
364 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
>> 16);
370 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
372 mutex_unlock(&clock
->extreg_lock
);
376 /* ptp clock methods */
378 static int ptp_dp83640_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
380 struct dp83640_clock
*clock
=
381 container_of(ptp
, struct dp83640_clock
, caps
);
382 struct phy_device
*phydev
= clock
->chosen
->phydev
;
393 rate
= div_u64(rate
, 1953125);
395 hi
= (rate
>> 16) & PTP_RATE_HI_MASK
;
401 mutex_lock(&clock
->extreg_lock
);
403 ext_write(1, phydev
, PAGE4
, PTP_RATEH
, hi
);
404 ext_write(1, phydev
, PAGE4
, PTP_RATEL
, lo
);
406 mutex_unlock(&clock
->extreg_lock
);
411 static int ptp_dp83640_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
413 struct dp83640_clock
*clock
=
414 container_of(ptp
, struct dp83640_clock
, caps
);
415 struct phy_device
*phydev
= clock
->chosen
->phydev
;
416 struct timespec64 ts
;
419 delta
+= ADJTIME_FIX
;
421 ts
= ns_to_timespec64(delta
);
423 mutex_lock(&clock
->extreg_lock
);
425 err
= tdr_write(1, phydev
, &ts
, PTP_STEP_CLK
);
427 mutex_unlock(&clock
->extreg_lock
);
432 static int ptp_dp83640_gettime(struct ptp_clock_info
*ptp
,
433 struct timespec64
*ts
)
435 struct dp83640_clock
*clock
=
436 container_of(ptp
, struct dp83640_clock
, caps
);
437 struct phy_device
*phydev
= clock
->chosen
->phydev
;
440 mutex_lock(&clock
->extreg_lock
);
442 ext_write(0, phydev
, PAGE4
, PTP_CTL
, PTP_RD_CLK
);
444 val
[0] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* ns[15:0] */
445 val
[1] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* ns[31:16] */
446 val
[2] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* sec[15:0] */
447 val
[3] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* sec[31:16] */
449 mutex_unlock(&clock
->extreg_lock
);
451 ts
->tv_nsec
= val
[0] | (val
[1] << 16);
452 ts
->tv_sec
= val
[2] | (val
[3] << 16);
457 static int ptp_dp83640_settime(struct ptp_clock_info
*ptp
,
458 const struct timespec64
*ts
)
460 struct dp83640_clock
*clock
=
461 container_of(ptp
, struct dp83640_clock
, caps
);
462 struct phy_device
*phydev
= clock
->chosen
->phydev
;
465 mutex_lock(&clock
->extreg_lock
);
467 err
= tdr_write(1, phydev
, ts
, PTP_LOAD_CLK
);
469 mutex_unlock(&clock
->extreg_lock
);
474 static int ptp_dp83640_enable(struct ptp_clock_info
*ptp
,
475 struct ptp_clock_request
*rq
, int on
)
477 struct dp83640_clock
*clock
=
478 container_of(ptp
, struct dp83640_clock
, caps
);
479 struct phy_device
*phydev
= clock
->chosen
->phydev
;
481 u16 evnt
, event_num
, gpio_num
;
484 case PTP_CLK_REQ_EXTTS
:
485 index
= rq
->extts
.index
;
486 if (index
>= N_EXT_TS
)
488 event_num
= EXT_EVENT
+ index
;
489 evnt
= EVNT_WR
| (event_num
& EVNT_SEL_MASK
) << EVNT_SEL_SHIFT
;
491 gpio_num
= 1 + ptp_find_pin(clock
->ptp_clock
,
492 PTP_PF_EXTTS
, index
);
495 evnt
|= (gpio_num
& EVNT_GPIO_MASK
) << EVNT_GPIO_SHIFT
;
496 if (rq
->extts
.flags
& PTP_FALLING_EDGE
)
501 mutex_lock(&clock
->extreg_lock
);
502 ext_write(0, phydev
, PAGE5
, PTP_EVNT
, evnt
);
503 mutex_unlock(&clock
->extreg_lock
);
506 case PTP_CLK_REQ_PEROUT
:
507 if (rq
->perout
.index
>= N_PER_OUT
)
509 return periodic_output(clock
, rq
, on
, rq
->perout
.index
);
518 static int ptp_dp83640_verify(struct ptp_clock_info
*ptp
, unsigned int pin
,
519 enum ptp_pin_function func
, unsigned int chan
)
521 struct dp83640_clock
*clock
=
522 container_of(ptp
, struct dp83640_clock
, caps
);
524 if (clock
->caps
.pin_config
[pin
].func
== PTP_PF_PHYSYNC
&&
525 !list_empty(&clock
->phylist
))
528 if (func
== PTP_PF_PHYSYNC
)
534 static u8 status_frame_dst
[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
535 static u8 status_frame_src
[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
537 static void enable_status_frames(struct phy_device
*phydev
, bool on
)
539 struct dp83640_private
*dp83640
= phydev
->priv
;
540 struct dp83640_clock
*clock
= dp83640
->clock
;
544 cfg0
= PSF_EVNT_EN
| PSF_RXTS_EN
| PSF_TXTS_EN
| ENDIAN_FLAG
;
546 ver
= (PSF_PTPVER
& VERSIONPTP_MASK
) << VERSIONPTP_SHIFT
;
548 mutex_lock(&clock
->extreg_lock
);
550 ext_write(0, phydev
, PAGE5
, PSF_CFG0
, cfg0
);
551 ext_write(0, phydev
, PAGE6
, PSF_CFG1
, ver
);
553 mutex_unlock(&clock
->extreg_lock
);
555 if (!phydev
->attached_dev
) {
556 pr_warn("expected to find an attached netdevice\n");
561 if (dev_mc_add(phydev
->attached_dev
, status_frame_dst
))
562 pr_warn("failed to add mc address\n");
564 if (dev_mc_del(phydev
->attached_dev
, status_frame_dst
))
565 pr_warn("failed to delete mc address\n");
569 static bool is_status_frame(struct sk_buff
*skb
, int type
)
571 struct ethhdr
*h
= eth_hdr(skb
);
573 if (PTP_CLASS_V2_L2
== type
&&
574 !memcmp(h
->h_source
, status_frame_src
, sizeof(status_frame_src
)))
580 static int expired(struct rxts
*rxts
)
582 return time_after(jiffies
, rxts
->tmo
);
585 /* Caller must hold rx_lock. */
586 static void prune_rx_ts(struct dp83640_private
*dp83640
)
588 struct list_head
*this, *next
;
591 list_for_each_safe(this, next
, &dp83640
->rxts
) {
592 rxts
= list_entry(this, struct rxts
, list
);
594 list_del_init(&rxts
->list
);
595 list_add(&rxts
->list
, &dp83640
->rxpool
);
600 /* synchronize the phyters so they act as one clock */
602 static void enable_broadcast(struct phy_device
*phydev
, int init_page
, int on
)
605 phy_write(phydev
, PAGESEL
, 0);
606 val
= phy_read(phydev
, PHYCR2
);
611 phy_write(phydev
, PHYCR2
, val
);
612 phy_write(phydev
, PAGESEL
, init_page
);
615 static void recalibrate(struct dp83640_clock
*clock
)
618 struct phy_txts event_ts
;
619 struct timespec64 ts
;
620 struct list_head
*this;
621 struct dp83640_private
*tmp
;
622 struct phy_device
*master
= clock
->chosen
->phydev
;
623 u16 cal_gpio
, cfg0
, evnt
, ptp_trig
, trigger
, val
;
625 trigger
= CAL_TRIGGER
;
626 cal_gpio
= 1 + ptp_find_pin(clock
->ptp_clock
, PTP_PF_PHYSYNC
, 0);
628 pr_err("PHY calibration pin not available - PHY is not calibrated.");
632 mutex_lock(&clock
->extreg_lock
);
635 * enable broadcast, disable status frames, enable ptp clock
637 list_for_each(this, &clock
->phylist
) {
638 tmp
= list_entry(this, struct dp83640_private
, list
);
639 enable_broadcast(tmp
->phydev
, clock
->page
, 1);
640 tmp
->cfg0
= ext_read(tmp
->phydev
, PAGE5
, PSF_CFG0
);
641 ext_write(0, tmp
->phydev
, PAGE5
, PSF_CFG0
, 0);
642 ext_write(0, tmp
->phydev
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
644 enable_broadcast(master
, clock
->page
, 1);
645 cfg0
= ext_read(master
, PAGE5
, PSF_CFG0
);
646 ext_write(0, master
, PAGE5
, PSF_CFG0
, 0);
647 ext_write(0, master
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
650 * enable an event timestamp
652 evnt
= EVNT_WR
| EVNT_RISE
| EVNT_SINGLE
;
653 evnt
|= (CAL_EVENT
& EVNT_SEL_MASK
) << EVNT_SEL_SHIFT
;
654 evnt
|= (cal_gpio
& EVNT_GPIO_MASK
) << EVNT_GPIO_SHIFT
;
656 list_for_each(this, &clock
->phylist
) {
657 tmp
= list_entry(this, struct dp83640_private
, list
);
658 ext_write(0, tmp
->phydev
, PAGE5
, PTP_EVNT
, evnt
);
660 ext_write(0, master
, PAGE5
, PTP_EVNT
, evnt
);
663 * configure a trigger
665 ptp_trig
= TRIG_WR
| TRIG_IF_LATE
| TRIG_PULSE
;
666 ptp_trig
|= (trigger
& TRIG_CSEL_MASK
) << TRIG_CSEL_SHIFT
;
667 ptp_trig
|= (cal_gpio
& TRIG_GPIO_MASK
) << TRIG_GPIO_SHIFT
;
668 ext_write(0, master
, PAGE5
, PTP_TRIG
, ptp_trig
);
671 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
673 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
678 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
680 /* disable trigger */
681 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
683 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
686 * read out and correct offsets
688 val
= ext_read(master
, PAGE4
, PTP_STS
);
689 pr_info("master PTP_STS 0x%04hx\n", val
);
690 val
= ext_read(master
, PAGE4
, PTP_ESTS
);
691 pr_info("master PTP_ESTS 0x%04hx\n", val
);
692 event_ts
.ns_lo
= ext_read(master
, PAGE4
, PTP_EDATA
);
693 event_ts
.ns_hi
= ext_read(master
, PAGE4
, PTP_EDATA
);
694 event_ts
.sec_lo
= ext_read(master
, PAGE4
, PTP_EDATA
);
695 event_ts
.sec_hi
= ext_read(master
, PAGE4
, PTP_EDATA
);
696 now
= phy2txts(&event_ts
);
698 list_for_each(this, &clock
->phylist
) {
699 tmp
= list_entry(this, struct dp83640_private
, list
);
700 val
= ext_read(tmp
->phydev
, PAGE4
, PTP_STS
);
701 pr_info("slave PTP_STS 0x%04hx\n", val
);
702 val
= ext_read(tmp
->phydev
, PAGE4
, PTP_ESTS
);
703 pr_info("slave PTP_ESTS 0x%04hx\n", val
);
704 event_ts
.ns_lo
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
705 event_ts
.ns_hi
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
706 event_ts
.sec_lo
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
707 event_ts
.sec_hi
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
708 diff
= now
- (s64
) phy2txts(&event_ts
);
709 pr_info("slave offset %lld nanoseconds\n", diff
);
711 ts
= ns_to_timespec64(diff
);
712 tdr_write(0, tmp
->phydev
, &ts
, PTP_STEP_CLK
);
716 * restore status frames
718 list_for_each(this, &clock
->phylist
) {
719 tmp
= list_entry(this, struct dp83640_private
, list
);
720 ext_write(0, tmp
->phydev
, PAGE5
, PSF_CFG0
, tmp
->cfg0
);
722 ext_write(0, master
, PAGE5
, PSF_CFG0
, cfg0
);
724 mutex_unlock(&clock
->extreg_lock
);
727 /* time stamping methods */
729 static inline u16
exts_chan_to_edata(int ch
)
731 return 1 << ((ch
+ EXT_EVENT
) * 2);
734 static int decode_evnt(struct dp83640_private
*dp83640
,
735 void *data
, int len
, u16 ests
)
737 struct phy_txts
*phy_txts
;
738 struct ptp_clock_event event
;
740 int words
= (ests
>> EVNT_TS_LEN_SHIFT
) & EVNT_TS_LEN_MASK
;
743 /* calculate length of the event timestamp status message */
744 if (ests
& MULT_EVNT
)
745 parsed
= (words
+ 2) * sizeof(u16
);
747 parsed
= (words
+ 1) * sizeof(u16
);
749 /* check if enough data is available */
753 if (ests
& MULT_EVNT
) {
754 ext_status
= *(u16
*) data
;
755 data
+= sizeof(ext_status
);
760 switch (words
) { /* fall through in every case */
762 dp83640
->edata
.sec_hi
= phy_txts
->sec_hi
;
764 dp83640
->edata
.sec_lo
= phy_txts
->sec_lo
;
766 dp83640
->edata
.ns_hi
= phy_txts
->ns_hi
;
768 dp83640
->edata
.ns_lo
= phy_txts
->ns_lo
;
772 i
= ((ests
>> EVNT_NUM_SHIFT
) & EVNT_NUM_MASK
) - EXT_EVENT
;
773 ext_status
= exts_chan_to_edata(i
);
776 event
.type
= PTP_CLOCK_EXTTS
;
777 event
.timestamp
= phy2txts(&dp83640
->edata
);
779 /* Compensate for input path and synchronization delays */
780 event
.timestamp
-= 35;
782 for (i
= 0; i
< N_EXT_TS
; i
++) {
783 if (ext_status
& exts_chan_to_edata(i
)) {
785 ptp_clock_event(dp83640
->clock
->ptp_clock
, &event
);
792 #define DP83640_PACKET_HASH_OFFSET 20
793 #define DP83640_PACKET_HASH_LEN 10
795 static int match(struct sk_buff
*skb
, unsigned int type
, struct rxts
*rxts
)
798 unsigned int offset
= 0;
799 u8
*msgtype
, *data
= skb_mac_header(skb
);
801 /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
803 if (type
& PTP_CLASS_VLAN
)
806 switch (type
& PTP_CLASS_PMASK
) {
808 offset
+= ETH_HLEN
+ IPV4_HLEN(data
+ offset
) + UDP_HLEN
;
811 offset
+= ETH_HLEN
+ IP6_HLEN
+ UDP_HLEN
;
820 if (skb
->len
+ ETH_HLEN
< offset
+ OFF_PTP_SEQUENCE_ID
+ sizeof(*seqid
))
823 if (unlikely(type
& PTP_CLASS_V1
))
824 msgtype
= data
+ offset
+ OFF_PTP_CONTROL
;
826 msgtype
= data
+ offset
;
827 if (rxts
->msgtype
!= (*msgtype
& 0xf))
830 seqid
= (u16
*)(data
+ offset
+ OFF_PTP_SEQUENCE_ID
);
831 if (rxts
->seqid
!= ntohs(*seqid
))
834 hash
= ether_crc(DP83640_PACKET_HASH_LEN
,
835 data
+ offset
+ DP83640_PACKET_HASH_OFFSET
) >> 20;
836 if (rxts
->hash
!= hash
)
842 static void decode_rxts(struct dp83640_private
*dp83640
,
843 struct phy_rxts
*phy_rxts
)
846 struct skb_shared_hwtstamps
*shhwtstamps
= NULL
;
851 overflow
= (phy_rxts
->ns_hi
>> 14) & 0x3;
853 pr_debug("rx timestamp queue overflow, count %d\n", overflow
);
855 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
857 prune_rx_ts(dp83640
);
859 if (list_empty(&dp83640
->rxpool
)) {
860 pr_debug("rx timestamp pool is empty\n");
863 rxts
= list_first_entry(&dp83640
->rxpool
, struct rxts
, list
);
864 list_del_init(&rxts
->list
);
865 phy2rxts(phy_rxts
, rxts
);
867 spin_lock(&dp83640
->rx_queue
.lock
);
868 skb_queue_walk(&dp83640
->rx_queue
, skb
) {
869 struct dp83640_skb_info
*skb_info
;
871 skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
872 if (match(skb
, skb_info
->ptp_type
, rxts
)) {
873 __skb_unlink(skb
, &dp83640
->rx_queue
);
874 shhwtstamps
= skb_hwtstamps(skb
);
875 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
876 shhwtstamps
->hwtstamp
= ns_to_ktime(rxts
->ns
);
878 list_add(&rxts
->list
, &dp83640
->rxpool
);
882 spin_unlock(&dp83640
->rx_queue
.lock
);
885 list_add_tail(&rxts
->list
, &dp83640
->rxts
);
887 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
890 static void decode_txts(struct dp83640_private
*dp83640
,
891 struct phy_txts
*phy_txts
)
893 struct skb_shared_hwtstamps shhwtstamps
;
898 /* We must already have the skb that triggered this. */
900 skb
= skb_dequeue(&dp83640
->tx_queue
);
903 pr_debug("have timestamp but tx_queue empty\n");
907 overflow
= (phy_txts
->ns_hi
>> 14) & 0x3;
909 pr_debug("tx timestamp queue overflow, count %d\n", overflow
);
911 skb_complete_tx_timestamp(skb
, NULL
);
912 skb
= skb_dequeue(&dp83640
->tx_queue
);
917 ns
= phy2txts(phy_txts
);
918 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
919 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
920 skb_complete_tx_timestamp(skb
, &shhwtstamps
);
923 static void decode_status_frame(struct dp83640_private
*dp83640
,
926 struct phy_rxts
*phy_rxts
;
927 struct phy_txts
*phy_txts
;
934 for (len
= skb_headlen(skb
) - 2; len
> sizeof(type
); len
-= size
) {
937 ests
= type
& 0x0fff;
938 type
= type
& 0xf000;
942 if (PSF_RX
== type
&& len
>= sizeof(*phy_rxts
)) {
944 phy_rxts
= (struct phy_rxts
*) ptr
;
945 decode_rxts(dp83640
, phy_rxts
);
946 size
= sizeof(*phy_rxts
);
948 } else if (PSF_TX
== type
&& len
>= sizeof(*phy_txts
)) {
950 phy_txts
= (struct phy_txts
*) ptr
;
951 decode_txts(dp83640
, phy_txts
);
952 size
= sizeof(*phy_txts
);
954 } else if (PSF_EVNT
== type
) {
956 size
= decode_evnt(dp83640
, ptr
, len
, ests
);
966 static int is_sync(struct sk_buff
*skb
, int type
)
968 u8
*data
= skb
->data
, *msgtype
;
969 unsigned int offset
= 0;
971 if (type
& PTP_CLASS_VLAN
)
974 switch (type
& PTP_CLASS_PMASK
) {
976 offset
+= ETH_HLEN
+ IPV4_HLEN(data
+ offset
) + UDP_HLEN
;
979 offset
+= ETH_HLEN
+ IP6_HLEN
+ UDP_HLEN
;
988 if (type
& PTP_CLASS_V1
)
989 offset
+= OFF_PTP_CONTROL
;
991 if (skb
->len
< offset
+ 1)
994 msgtype
= data
+ offset
;
996 return (*msgtype
& 0xf) == 0;
999 static void dp83640_free_clocks(void)
1001 struct dp83640_clock
*clock
;
1002 struct list_head
*this, *next
;
1004 mutex_lock(&phyter_clocks_lock
);
1006 list_for_each_safe(this, next
, &phyter_clocks
) {
1007 clock
= list_entry(this, struct dp83640_clock
, list
);
1008 if (!list_empty(&clock
->phylist
)) {
1009 pr_warn("phy list non-empty while unloading\n");
1012 list_del(&clock
->list
);
1013 mutex_destroy(&clock
->extreg_lock
);
1014 mutex_destroy(&clock
->clock_lock
);
1015 put_device(&clock
->bus
->dev
);
1016 kfree(clock
->caps
.pin_config
);
1020 mutex_unlock(&phyter_clocks_lock
);
1023 static void dp83640_clock_init(struct dp83640_clock
*clock
, struct mii_bus
*bus
)
1025 INIT_LIST_HEAD(&clock
->list
);
1027 mutex_init(&clock
->extreg_lock
);
1028 mutex_init(&clock
->clock_lock
);
1029 INIT_LIST_HEAD(&clock
->phylist
);
1030 clock
->caps
.owner
= THIS_MODULE
;
1031 sprintf(clock
->caps
.name
, "dp83640 timer");
1032 clock
->caps
.max_adj
= 1953124;
1033 clock
->caps
.n_alarm
= 0;
1034 clock
->caps
.n_ext_ts
= N_EXT_TS
;
1035 clock
->caps
.n_per_out
= N_PER_OUT
;
1036 clock
->caps
.n_pins
= DP83640_N_PINS
;
1037 clock
->caps
.pps
= 0;
1038 clock
->caps
.adjfreq
= ptp_dp83640_adjfreq
;
1039 clock
->caps
.adjtime
= ptp_dp83640_adjtime
;
1040 clock
->caps
.gettime64
= ptp_dp83640_gettime
;
1041 clock
->caps
.settime64
= ptp_dp83640_settime
;
1042 clock
->caps
.enable
= ptp_dp83640_enable
;
1043 clock
->caps
.verify
= ptp_dp83640_verify
;
1045 * Convert the module param defaults into a dynamic pin configuration.
1047 dp83640_gpio_defaults(clock
->caps
.pin_config
);
1049 * Get a reference to this bus instance.
1051 get_device(&bus
->dev
);
1054 static int choose_this_phy(struct dp83640_clock
*clock
,
1055 struct phy_device
*phydev
)
1057 if (chosen_phy
== -1 && !clock
->chosen
)
1060 if (chosen_phy
== phydev
->mdio
.addr
)
1066 static struct dp83640_clock
*dp83640_clock_get(struct dp83640_clock
*clock
)
1069 mutex_lock(&clock
->clock_lock
);
1074 * Look up and lock a clock by bus instance.
1075 * If there is no clock for this bus, then create it first.
1077 static struct dp83640_clock
*dp83640_clock_get_bus(struct mii_bus
*bus
)
1079 struct dp83640_clock
*clock
= NULL
, *tmp
;
1080 struct list_head
*this;
1082 mutex_lock(&phyter_clocks_lock
);
1084 list_for_each(this, &phyter_clocks
) {
1085 tmp
= list_entry(this, struct dp83640_clock
, list
);
1086 if (tmp
->bus
== bus
) {
1094 clock
= kzalloc(sizeof(struct dp83640_clock
), GFP_KERNEL
);
1098 clock
->caps
.pin_config
= kzalloc(sizeof(struct ptp_pin_desc
) *
1099 DP83640_N_PINS
, GFP_KERNEL
);
1100 if (!clock
->caps
.pin_config
) {
1105 dp83640_clock_init(clock
, bus
);
1106 list_add_tail(&phyter_clocks
, &clock
->list
);
1108 mutex_unlock(&phyter_clocks_lock
);
1110 return dp83640_clock_get(clock
);
1113 static void dp83640_clock_put(struct dp83640_clock
*clock
)
1115 mutex_unlock(&clock
->clock_lock
);
1118 static int dp83640_probe(struct phy_device
*phydev
)
1120 struct dp83640_clock
*clock
;
1121 struct dp83640_private
*dp83640
;
1122 int err
= -ENOMEM
, i
;
1124 if (phydev
->mdio
.addr
== BROADCAST_ADDR
)
1127 clock
= dp83640_clock_get_bus(phydev
->mdio
.bus
);
1131 dp83640
= kzalloc(sizeof(struct dp83640_private
), GFP_KERNEL
);
1135 dp83640
->phydev
= phydev
;
1136 INIT_DELAYED_WORK(&dp83640
->ts_work
, rx_timestamp_work
);
1138 INIT_LIST_HEAD(&dp83640
->rxts
);
1139 INIT_LIST_HEAD(&dp83640
->rxpool
);
1140 for (i
= 0; i
< MAX_RXTS
; i
++)
1141 list_add(&dp83640
->rx_pool_data
[i
].list
, &dp83640
->rxpool
);
1143 phydev
->priv
= dp83640
;
1145 spin_lock_init(&dp83640
->rx_lock
);
1146 skb_queue_head_init(&dp83640
->rx_queue
);
1147 skb_queue_head_init(&dp83640
->tx_queue
);
1149 dp83640
->clock
= clock
;
1151 if (choose_this_phy(clock
, phydev
)) {
1152 clock
->chosen
= dp83640
;
1153 clock
->ptp_clock
= ptp_clock_register(&clock
->caps
,
1155 if (IS_ERR(clock
->ptp_clock
)) {
1156 err
= PTR_ERR(clock
->ptp_clock
);
1160 list_add_tail(&dp83640
->list
, &clock
->phylist
);
1162 dp83640_clock_put(clock
);
1166 clock
->chosen
= NULL
;
1169 dp83640_clock_put(clock
);
1174 static void dp83640_remove(struct phy_device
*phydev
)
1176 struct dp83640_clock
*clock
;
1177 struct list_head
*this, *next
;
1178 struct dp83640_private
*tmp
, *dp83640
= phydev
->priv
;
1180 if (phydev
->mdio
.addr
== BROADCAST_ADDR
)
1183 enable_status_frames(phydev
, false);
1184 cancel_delayed_work_sync(&dp83640
->ts_work
);
1186 skb_queue_purge(&dp83640
->rx_queue
);
1187 skb_queue_purge(&dp83640
->tx_queue
);
1189 clock
= dp83640_clock_get(dp83640
->clock
);
1191 if (dp83640
== clock
->chosen
) {
1192 ptp_clock_unregister(clock
->ptp_clock
);
1193 clock
->chosen
= NULL
;
1195 list_for_each_safe(this, next
, &clock
->phylist
) {
1196 tmp
= list_entry(this, struct dp83640_private
, list
);
1197 if (tmp
== dp83640
) {
1198 list_del_init(&tmp
->list
);
1204 dp83640_clock_put(clock
);
1208 static int dp83640_config_init(struct phy_device
*phydev
)
1210 struct dp83640_private
*dp83640
= phydev
->priv
;
1211 struct dp83640_clock
*clock
= dp83640
->clock
;
1213 if (clock
->chosen
&& !list_empty(&clock
->phylist
))
1216 mutex_lock(&clock
->extreg_lock
);
1217 enable_broadcast(phydev
, clock
->page
, 1);
1218 mutex_unlock(&clock
->extreg_lock
);
1221 enable_status_frames(phydev
, true);
1223 mutex_lock(&clock
->extreg_lock
);
1224 ext_write(0, phydev
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
1225 mutex_unlock(&clock
->extreg_lock
);
1230 static int dp83640_ack_interrupt(struct phy_device
*phydev
)
1232 int err
= phy_read(phydev
, MII_DP83640_MISR
);
1240 static int dp83640_config_intr(struct phy_device
*phydev
)
1246 if (phydev
->interrupts
== PHY_INTERRUPT_ENABLED
) {
1247 misr
= phy_read(phydev
, MII_DP83640_MISR
);
1251 (MII_DP83640_MISR_ANC_INT_EN
|
1252 MII_DP83640_MISR_DUP_INT_EN
|
1253 MII_DP83640_MISR_SPD_INT_EN
|
1254 MII_DP83640_MISR_LINK_INT_EN
);
1255 err
= phy_write(phydev
, MII_DP83640_MISR
, misr
);
1259 micr
= phy_read(phydev
, MII_DP83640_MICR
);
1263 (MII_DP83640_MICR_OE
|
1264 MII_DP83640_MICR_IE
);
1265 return phy_write(phydev
, MII_DP83640_MICR
, micr
);
1267 micr
= phy_read(phydev
, MII_DP83640_MICR
);
1271 ~(MII_DP83640_MICR_OE
|
1272 MII_DP83640_MICR_IE
);
1273 err
= phy_write(phydev
, MII_DP83640_MICR
, micr
);
1277 misr
= phy_read(phydev
, MII_DP83640_MISR
);
1281 ~(MII_DP83640_MISR_ANC_INT_EN
|
1282 MII_DP83640_MISR_DUP_INT_EN
|
1283 MII_DP83640_MISR_SPD_INT_EN
|
1284 MII_DP83640_MISR_LINK_INT_EN
);
1285 return phy_write(phydev
, MII_DP83640_MISR
, misr
);
1289 static int dp83640_hwtstamp(struct phy_device
*phydev
, struct ifreq
*ifr
)
1291 struct dp83640_private
*dp83640
= phydev
->priv
;
1292 struct hwtstamp_config cfg
;
1295 if (copy_from_user(&cfg
, ifr
->ifr_data
, sizeof(cfg
)))
1298 if (cfg
.flags
) /* reserved for future extensions */
1301 if (cfg
.tx_type
< 0 || cfg
.tx_type
> HWTSTAMP_TX_ONESTEP_SYNC
)
1304 dp83640
->hwts_tx_en
= cfg
.tx_type
;
1306 switch (cfg
.rx_filter
) {
1307 case HWTSTAMP_FILTER_NONE
:
1308 dp83640
->hwts_rx_en
= 0;
1310 dp83640
->version
= 0;
1312 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1313 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1314 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1315 dp83640
->hwts_rx_en
= 1;
1316 dp83640
->layer
= PTP_CLASS_L4
;
1317 dp83640
->version
= PTP_CLASS_V1
;
1319 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1320 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1321 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1322 dp83640
->hwts_rx_en
= 1;
1323 dp83640
->layer
= PTP_CLASS_L4
;
1324 dp83640
->version
= PTP_CLASS_V2
;
1326 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1327 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1328 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1329 dp83640
->hwts_rx_en
= 1;
1330 dp83640
->layer
= PTP_CLASS_L2
;
1331 dp83640
->version
= PTP_CLASS_V2
;
1333 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1334 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1335 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1336 dp83640
->hwts_rx_en
= 1;
1337 dp83640
->layer
= PTP_CLASS_L4
| PTP_CLASS_L2
;
1338 dp83640
->version
= PTP_CLASS_V2
;
1344 txcfg0
= (dp83640
->version
& TX_PTP_VER_MASK
) << TX_PTP_VER_SHIFT
;
1345 rxcfg0
= (dp83640
->version
& TX_PTP_VER_MASK
) << TX_PTP_VER_SHIFT
;
1347 if (dp83640
->layer
& PTP_CLASS_L2
) {
1351 if (dp83640
->layer
& PTP_CLASS_L4
) {
1352 txcfg0
|= TX_IPV6_EN
| TX_IPV4_EN
;
1353 rxcfg0
|= RX_IPV6_EN
| RX_IPV4_EN
;
1356 if (dp83640
->hwts_tx_en
)
1359 if (dp83640
->hwts_tx_en
== HWTSTAMP_TX_ONESTEP_SYNC
)
1360 txcfg0
|= SYNC_1STEP
| CHK_1STEP
;
1362 if (dp83640
->hwts_rx_en
)
1365 mutex_lock(&dp83640
->clock
->extreg_lock
);
1367 ext_write(0, phydev
, PAGE5
, PTP_TXCFG0
, txcfg0
);
1368 ext_write(0, phydev
, PAGE5
, PTP_RXCFG0
, rxcfg0
);
1370 mutex_unlock(&dp83640
->clock
->extreg_lock
);
1372 return copy_to_user(ifr
->ifr_data
, &cfg
, sizeof(cfg
)) ? -EFAULT
: 0;
1375 static void rx_timestamp_work(struct work_struct
*work
)
1377 struct dp83640_private
*dp83640
=
1378 container_of(work
, struct dp83640_private
, ts_work
.work
);
1379 struct sk_buff
*skb
;
1381 /* Deliver expired packets. */
1382 while ((skb
= skb_dequeue(&dp83640
->rx_queue
))) {
1383 struct dp83640_skb_info
*skb_info
;
1385 skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
1386 if (!time_after(jiffies
, skb_info
->tmo
)) {
1387 skb_queue_head(&dp83640
->rx_queue
, skb
);
1394 if (!skb_queue_empty(&dp83640
->rx_queue
))
1395 schedule_delayed_work(&dp83640
->ts_work
, SKB_TIMESTAMP_TIMEOUT
);
1398 static bool dp83640_rxtstamp(struct phy_device
*phydev
,
1399 struct sk_buff
*skb
, int type
)
1401 struct dp83640_private
*dp83640
= phydev
->priv
;
1402 struct dp83640_skb_info
*skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
1403 struct list_head
*this, *next
;
1405 struct skb_shared_hwtstamps
*shhwtstamps
= NULL
;
1406 unsigned long flags
;
1408 if (is_status_frame(skb
, type
)) {
1409 decode_status_frame(dp83640
, skb
);
1414 if (!dp83640
->hwts_rx_en
)
1417 if ((type
& dp83640
->version
) == 0 || (type
& dp83640
->layer
) == 0)
1420 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
1421 prune_rx_ts(dp83640
);
1422 list_for_each_safe(this, next
, &dp83640
->rxts
) {
1423 rxts
= list_entry(this, struct rxts
, list
);
1424 if (match(skb
, type
, rxts
)) {
1425 shhwtstamps
= skb_hwtstamps(skb
);
1426 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
1427 shhwtstamps
->hwtstamp
= ns_to_ktime(rxts
->ns
);
1429 list_del_init(&rxts
->list
);
1430 list_add(&rxts
->list
, &dp83640
->rxpool
);
1434 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
1437 skb_info
->ptp_type
= type
;
1438 skb_info
->tmo
= jiffies
+ SKB_TIMESTAMP_TIMEOUT
;
1439 skb_queue_tail(&dp83640
->rx_queue
, skb
);
1440 schedule_delayed_work(&dp83640
->ts_work
, SKB_TIMESTAMP_TIMEOUT
);
1448 static void dp83640_txtstamp(struct phy_device
*phydev
,
1449 struct sk_buff
*skb
, int type
)
1451 struct dp83640_private
*dp83640
= phydev
->priv
;
1453 switch (dp83640
->hwts_tx_en
) {
1455 case HWTSTAMP_TX_ONESTEP_SYNC
:
1456 if (is_sync(skb
, type
)) {
1461 case HWTSTAMP_TX_ON
:
1462 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1463 skb_queue_tail(&dp83640
->tx_queue
, skb
);
1466 case HWTSTAMP_TX_OFF
:
1473 static int dp83640_ts_info(struct phy_device
*dev
, struct ethtool_ts_info
*info
)
1475 struct dp83640_private
*dp83640
= dev
->priv
;
1477 info
->so_timestamping
=
1478 SOF_TIMESTAMPING_TX_HARDWARE
|
1479 SOF_TIMESTAMPING_RX_HARDWARE
|
1480 SOF_TIMESTAMPING_RAW_HARDWARE
;
1481 info
->phc_index
= ptp_clock_index(dp83640
->clock
->ptp_clock
);
1483 (1 << HWTSTAMP_TX_OFF
) |
1484 (1 << HWTSTAMP_TX_ON
) |
1485 (1 << HWTSTAMP_TX_ONESTEP_SYNC
);
1487 (1 << HWTSTAMP_FILTER_NONE
) |
1488 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
1489 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
) |
1490 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
1491 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
);
1495 static struct phy_driver dp83640_driver
= {
1496 .phy_id
= DP83640_PHY_ID
,
1497 .phy_id_mask
= 0xfffffff0,
1498 .name
= "NatSemi DP83640",
1499 .features
= PHY_BASIC_FEATURES
,
1500 .flags
= PHY_HAS_INTERRUPT
,
1501 .probe
= dp83640_probe
,
1502 .remove
= dp83640_remove
,
1503 .config_init
= dp83640_config_init
,
1504 .config_aneg
= genphy_config_aneg
,
1505 .read_status
= genphy_read_status
,
1506 .ack_interrupt
= dp83640_ack_interrupt
,
1507 .config_intr
= dp83640_config_intr
,
1508 .ts_info
= dp83640_ts_info
,
1509 .hwtstamp
= dp83640_hwtstamp
,
1510 .rxtstamp
= dp83640_rxtstamp
,
1511 .txtstamp
= dp83640_txtstamp
,
1514 static int __init
dp83640_init(void)
1516 return phy_driver_register(&dp83640_driver
, THIS_MODULE
);
1519 static void __exit
dp83640_exit(void)
1521 dp83640_free_clocks();
1522 phy_driver_unregister(&dp83640_driver
);
1525 MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
1526 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
1527 MODULE_LICENSE("GPL");
1529 module_init(dp83640_init
);
1530 module_exit(dp83640_exit
);
1532 static struct mdio_device_id __maybe_unused dp83640_tbl
[] = {
1533 { DP83640_PHY_ID
, 0xfffffff0 },
1537 MODULE_DEVICE_TABLE(mdio
, dp83640_tbl
);