2 * Driver for the National Semiconductor DP83640 PHYTER
4 * Copyright (C) 2010 OMICRON electronics GmbH
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/crc32.h>
24 #include <linux/ethtool.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/mii.h>
28 #include <linux/module.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_vlan.h>
32 #include <linux/phy.h>
33 #include <linux/ptp_classify.h>
34 #include <linux/ptp_clock_kernel.h>
36 #include "dp83640_reg.h"
38 #define DP83640_PHY_ID 0x20005ce1
44 #define PSF_EVNT 0x4000
50 #define DP83640_N_PINS 12
52 #define MII_DP83640_MICR 0x11
53 #define MII_DP83640_MISR 0x12
55 #define MII_DP83640_MICR_OE 0x1
56 #define MII_DP83640_MICR_IE 0x2
58 #define MII_DP83640_MISR_RHF_INT_EN 0x01
59 #define MII_DP83640_MISR_FHF_INT_EN 0x02
60 #define MII_DP83640_MISR_ANC_INT_EN 0x04
61 #define MII_DP83640_MISR_DUP_INT_EN 0x08
62 #define MII_DP83640_MISR_SPD_INT_EN 0x10
63 #define MII_DP83640_MISR_LINK_INT_EN 0x20
64 #define MII_DP83640_MISR_ED_INT_EN 0x40
65 #define MII_DP83640_MISR_LQ_INT_EN 0x80
67 /* phyter seems to miss the mark by 16 ns */
68 #define ADJTIME_FIX 16
70 #define SKB_TIMESTAMP_TIMEOUT 2 /* jiffies */
72 #if defined(__BIG_ENDIAN)
74 #elif defined(__LITTLE_ENDIAN)
75 #define ENDIAN_FLAG PSF_ENDIAN
78 struct dp83640_skb_info
{
84 u16 ns_lo
; /* ns[15:0] */
85 u16 ns_hi
; /* overflow[1:0], ns[29:16] */
86 u16 sec_lo
; /* sec[15:0] */
87 u16 sec_hi
; /* sec[31:16] */
88 u16 seqid
; /* sequenceId[15:0] */
89 u16 msgtype
; /* messageType[3:0], hash[11:0] */
93 u16 ns_lo
; /* ns[15:0] */
94 u16 ns_hi
; /* overflow[1:0], ns[29:16] */
95 u16 sec_lo
; /* sec[15:0] */
96 u16 sec_hi
; /* sec[31:16] */
100 struct list_head list
;
108 struct dp83640_clock
;
110 struct dp83640_private
{
111 struct list_head list
;
112 struct dp83640_clock
*clock
;
113 struct phy_device
*phydev
;
114 struct delayed_work ts_work
;
119 /* remember state of cfg0 during calibration */
121 /* remember the last event time stamp */
122 struct phy_txts edata
;
123 /* list of rx timestamps */
124 struct list_head rxts
;
125 struct list_head rxpool
;
126 struct rxts rx_pool_data
[MAX_RXTS
];
127 /* protects above three fields from concurrent access */
129 /* queues of incoming and outgoing packets */
130 struct sk_buff_head rx_queue
;
131 struct sk_buff_head tx_queue
;
134 struct dp83640_clock
{
135 /* keeps the instance in the 'phyter_clocks' list */
136 struct list_head list
;
137 /* we create one clock instance per MII bus */
139 /* protects extended registers from concurrent access */
140 struct mutex extreg_lock
;
141 /* remembers which page was last selected */
143 /* our advertised capabilities */
144 struct ptp_clock_info caps
;
145 /* protects the three fields below from concurrent access */
146 struct mutex clock_lock
;
147 /* the one phyter from which we shall read */
148 struct dp83640_private
*chosen
;
149 /* list of the other attached phyters, not chosen */
150 struct list_head phylist
;
151 /* reference to our PTP hardware clock */
152 struct ptp_clock
*ptp_clock
;
169 static int chosen_phy
= -1;
170 static ushort gpio_tab
[GPIO_TABLE_SIZE
] = {
171 1, 2, 3, 4, 8, 9, 10, 11
174 module_param(chosen_phy
, int, 0444);
175 module_param_array(gpio_tab
, ushort
, NULL
, 0444);
177 MODULE_PARM_DESC(chosen_phy
, \
178 "The address of the PHY to use for the ancillary clock features");
179 MODULE_PARM_DESC(gpio_tab
, \
180 "Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
182 static void dp83640_gpio_defaults(struct ptp_pin_desc
*pd
)
186 for (i
= 0; i
< DP83640_N_PINS
; i
++) {
187 snprintf(pd
[i
].name
, sizeof(pd
[i
].name
), "GPIO%d", 1 + i
);
191 for (i
= 0; i
< GPIO_TABLE_SIZE
; i
++) {
192 if (gpio_tab
[i
] < 1 || gpio_tab
[i
] > DP83640_N_PINS
) {
193 pr_err("gpio_tab[%d]=%hu out of range", i
, gpio_tab
[i
]);
198 index
= gpio_tab
[CALIBRATE_GPIO
] - 1;
199 pd
[index
].func
= PTP_PF_PHYSYNC
;
202 index
= gpio_tab
[PEROUT_GPIO
] - 1;
203 pd
[index
].func
= PTP_PF_PEROUT
;
206 for (i
= EXTTS0_GPIO
; i
< GPIO_TABLE_SIZE
; i
++) {
207 index
= gpio_tab
[i
] - 1;
208 pd
[index
].func
= PTP_PF_EXTTS
;
209 pd
[index
].chan
= i
- EXTTS0_GPIO
;
213 /* a list of clocks and a mutex to protect it */
214 static LIST_HEAD(phyter_clocks
);
215 static DEFINE_MUTEX(phyter_clocks_lock
);
217 static void rx_timestamp_work(struct work_struct
*work
);
219 /* extended register access functions */
221 #define BROADCAST_ADDR 31
223 static inline int broadcast_write(struct phy_device
*phydev
, u32 regnum
,
226 return mdiobus_write(phydev
->mdio
.bus
, BROADCAST_ADDR
, regnum
, val
);
229 /* Caller must hold extreg_lock. */
230 static int ext_read(struct phy_device
*phydev
, int page
, u32 regnum
)
232 struct dp83640_private
*dp83640
= phydev
->priv
;
235 if (dp83640
->clock
->page
!= page
) {
236 broadcast_write(phydev
, PAGESEL
, page
);
237 dp83640
->clock
->page
= page
;
239 val
= phy_read(phydev
, regnum
);
244 /* Caller must hold extreg_lock. */
245 static void ext_write(int broadcast
, struct phy_device
*phydev
,
246 int page
, u32 regnum
, u16 val
)
248 struct dp83640_private
*dp83640
= phydev
->priv
;
250 if (dp83640
->clock
->page
!= page
) {
251 broadcast_write(phydev
, PAGESEL
, page
);
252 dp83640
->clock
->page
= page
;
255 broadcast_write(phydev
, regnum
, val
);
257 phy_write(phydev
, regnum
, val
);
260 /* Caller must hold extreg_lock. */
261 static int tdr_write(int bc
, struct phy_device
*dev
,
262 const struct timespec64
*ts
, u16 cmd
)
264 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_nsec
& 0xffff);/* ns[15:0] */
265 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_nsec
>> 16); /* ns[31:16] */
266 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_sec
& 0xffff); /* sec[15:0] */
267 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_sec
>> 16); /* sec[31:16]*/
269 ext_write(bc
, dev
, PAGE4
, PTP_CTL
, cmd
);
274 /* convert phy timestamps into driver timestamps */
276 static void phy2rxts(struct phy_rxts
*p
, struct rxts
*rxts
)
281 sec
|= p
->sec_hi
<< 16;
284 rxts
->ns
|= (p
->ns_hi
& 0x3fff) << 16;
285 rxts
->ns
+= ((u64
)sec
) * 1000000000ULL;
286 rxts
->seqid
= p
->seqid
;
287 rxts
->msgtype
= (p
->msgtype
>> 12) & 0xf;
288 rxts
->hash
= p
->msgtype
& 0x0fff;
289 rxts
->tmo
= jiffies
+ SKB_TIMESTAMP_TIMEOUT
;
292 static u64
phy2txts(struct phy_txts
*p
)
298 sec
|= p
->sec_hi
<< 16;
301 ns
|= (p
->ns_hi
& 0x3fff) << 16;
302 ns
+= ((u64
)sec
) * 1000000000ULL;
307 static int periodic_output(struct dp83640_clock
*clock
,
308 struct ptp_clock_request
*clkreq
, bool on
,
311 struct dp83640_private
*dp83640
= clock
->chosen
;
312 struct phy_device
*phydev
= dp83640
->phydev
;
313 u32 sec
, nsec
, pwidth
;
314 u16 gpio
, ptp_trig
, val
;
317 gpio
= 1 + ptp_find_pin(clock
->ptp_clock
, PTP_PF_PEROUT
,
326 (trigger
& TRIG_CSEL_MASK
) << TRIG_CSEL_SHIFT
|
327 (gpio
& TRIG_GPIO_MASK
) << TRIG_GPIO_SHIFT
|
331 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
335 mutex_lock(&clock
->extreg_lock
);
336 ext_write(0, phydev
, PAGE5
, PTP_TRIG
, ptp_trig
);
337 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
338 mutex_unlock(&clock
->extreg_lock
);
342 sec
= clkreq
->perout
.start
.sec
;
343 nsec
= clkreq
->perout
.start
.nsec
;
344 pwidth
= clkreq
->perout
.period
.sec
* 1000000000UL;
345 pwidth
+= clkreq
->perout
.period
.nsec
;
348 mutex_lock(&clock
->extreg_lock
);
350 ext_write(0, phydev
, PAGE5
, PTP_TRIG
, ptp_trig
);
354 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
355 ext_write(0, phydev
, PAGE4
, PTP_TDR
, nsec
& 0xffff); /* ns[15:0] */
356 ext_write(0, phydev
, PAGE4
, PTP_TDR
, nsec
>> 16); /* ns[31:16] */
357 ext_write(0, phydev
, PAGE4
, PTP_TDR
, sec
& 0xffff); /* sec[15:0] */
358 ext_write(0, phydev
, PAGE4
, PTP_TDR
, sec
>> 16); /* sec[31:16] */
359 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
& 0xffff); /* ns[15:0] */
360 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
>> 16); /* ns[31:16] */
361 /* Triggers 0 and 1 has programmable pulsewidth2 */
363 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
& 0xffff);
364 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
>> 16);
370 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
372 mutex_unlock(&clock
->extreg_lock
);
376 /* ptp clock methods */
378 static int ptp_dp83640_adjfine(struct ptp_clock_info
*ptp
, long scaled_ppm
)
380 struct dp83640_clock
*clock
=
381 container_of(ptp
, struct dp83640_clock
, caps
);
382 struct phy_device
*phydev
= clock
->chosen
->phydev
;
387 if (scaled_ppm
< 0) {
389 scaled_ppm
= -scaled_ppm
;
393 rate
= div_u64(rate
, 15625);
395 hi
= (rate
>> 16) & PTP_RATE_HI_MASK
;
401 mutex_lock(&clock
->extreg_lock
);
403 ext_write(1, phydev
, PAGE4
, PTP_RATEH
, hi
);
404 ext_write(1, phydev
, PAGE4
, PTP_RATEL
, lo
);
406 mutex_unlock(&clock
->extreg_lock
);
411 static int ptp_dp83640_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
413 struct dp83640_clock
*clock
=
414 container_of(ptp
, struct dp83640_clock
, caps
);
415 struct phy_device
*phydev
= clock
->chosen
->phydev
;
416 struct timespec64 ts
;
419 delta
+= ADJTIME_FIX
;
421 ts
= ns_to_timespec64(delta
);
423 mutex_lock(&clock
->extreg_lock
);
425 err
= tdr_write(1, phydev
, &ts
, PTP_STEP_CLK
);
427 mutex_unlock(&clock
->extreg_lock
);
432 static int ptp_dp83640_gettime(struct ptp_clock_info
*ptp
,
433 struct timespec64
*ts
)
435 struct dp83640_clock
*clock
=
436 container_of(ptp
, struct dp83640_clock
, caps
);
437 struct phy_device
*phydev
= clock
->chosen
->phydev
;
440 mutex_lock(&clock
->extreg_lock
);
442 ext_write(0, phydev
, PAGE4
, PTP_CTL
, PTP_RD_CLK
);
444 val
[0] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* ns[15:0] */
445 val
[1] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* ns[31:16] */
446 val
[2] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* sec[15:0] */
447 val
[3] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* sec[31:16] */
449 mutex_unlock(&clock
->extreg_lock
);
451 ts
->tv_nsec
= val
[0] | (val
[1] << 16);
452 ts
->tv_sec
= val
[2] | (val
[3] << 16);
457 static int ptp_dp83640_settime(struct ptp_clock_info
*ptp
,
458 const struct timespec64
*ts
)
460 struct dp83640_clock
*clock
=
461 container_of(ptp
, struct dp83640_clock
, caps
);
462 struct phy_device
*phydev
= clock
->chosen
->phydev
;
465 mutex_lock(&clock
->extreg_lock
);
467 err
= tdr_write(1, phydev
, ts
, PTP_LOAD_CLK
);
469 mutex_unlock(&clock
->extreg_lock
);
474 static int ptp_dp83640_enable(struct ptp_clock_info
*ptp
,
475 struct ptp_clock_request
*rq
, int on
)
477 struct dp83640_clock
*clock
=
478 container_of(ptp
, struct dp83640_clock
, caps
);
479 struct phy_device
*phydev
= clock
->chosen
->phydev
;
481 u16 evnt
, event_num
, gpio_num
;
484 case PTP_CLK_REQ_EXTTS
:
485 index
= rq
->extts
.index
;
486 if (index
>= N_EXT_TS
)
488 event_num
= EXT_EVENT
+ index
;
489 evnt
= EVNT_WR
| (event_num
& EVNT_SEL_MASK
) << EVNT_SEL_SHIFT
;
491 gpio_num
= 1 + ptp_find_pin(clock
->ptp_clock
,
492 PTP_PF_EXTTS
, index
);
495 evnt
|= (gpio_num
& EVNT_GPIO_MASK
) << EVNT_GPIO_SHIFT
;
496 if (rq
->extts
.flags
& PTP_FALLING_EDGE
)
501 mutex_lock(&clock
->extreg_lock
);
502 ext_write(0, phydev
, PAGE5
, PTP_EVNT
, evnt
);
503 mutex_unlock(&clock
->extreg_lock
);
506 case PTP_CLK_REQ_PEROUT
:
507 if (rq
->perout
.index
>= N_PER_OUT
)
509 return periodic_output(clock
, rq
, on
, rq
->perout
.index
);
518 static int ptp_dp83640_verify(struct ptp_clock_info
*ptp
, unsigned int pin
,
519 enum ptp_pin_function func
, unsigned int chan
)
521 struct dp83640_clock
*clock
=
522 container_of(ptp
, struct dp83640_clock
, caps
);
524 if (clock
->caps
.pin_config
[pin
].func
== PTP_PF_PHYSYNC
&&
525 !list_empty(&clock
->phylist
))
528 if (func
== PTP_PF_PHYSYNC
)
534 static u8 status_frame_dst
[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
535 static u8 status_frame_src
[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
537 static void enable_status_frames(struct phy_device
*phydev
, bool on
)
539 struct dp83640_private
*dp83640
= phydev
->priv
;
540 struct dp83640_clock
*clock
= dp83640
->clock
;
544 cfg0
= PSF_EVNT_EN
| PSF_RXTS_EN
| PSF_TXTS_EN
| ENDIAN_FLAG
;
546 ver
= (PSF_PTPVER
& VERSIONPTP_MASK
) << VERSIONPTP_SHIFT
;
548 mutex_lock(&clock
->extreg_lock
);
550 ext_write(0, phydev
, PAGE5
, PSF_CFG0
, cfg0
);
551 ext_write(0, phydev
, PAGE6
, PSF_CFG1
, ver
);
553 mutex_unlock(&clock
->extreg_lock
);
555 if (!phydev
->attached_dev
) {
556 pr_warn("expected to find an attached netdevice\n");
561 if (dev_mc_add(phydev
->attached_dev
, status_frame_dst
))
562 pr_warn("failed to add mc address\n");
564 if (dev_mc_del(phydev
->attached_dev
, status_frame_dst
))
565 pr_warn("failed to delete mc address\n");
569 static bool is_status_frame(struct sk_buff
*skb
, int type
)
571 struct ethhdr
*h
= eth_hdr(skb
);
573 if (PTP_CLASS_V2_L2
== type
&&
574 !memcmp(h
->h_source
, status_frame_src
, sizeof(status_frame_src
)))
580 static int expired(struct rxts
*rxts
)
582 return time_after(jiffies
, rxts
->tmo
);
585 /* Caller must hold rx_lock. */
586 static void prune_rx_ts(struct dp83640_private
*dp83640
)
588 struct list_head
*this, *next
;
591 list_for_each_safe(this, next
, &dp83640
->rxts
) {
592 rxts
= list_entry(this, struct rxts
, list
);
594 list_del_init(&rxts
->list
);
595 list_add(&rxts
->list
, &dp83640
->rxpool
);
600 /* synchronize the phyters so they act as one clock */
602 static void enable_broadcast(struct phy_device
*phydev
, int init_page
, int on
)
605 phy_write(phydev
, PAGESEL
, 0);
606 val
= phy_read(phydev
, PHYCR2
);
611 phy_write(phydev
, PHYCR2
, val
);
612 phy_write(phydev
, PAGESEL
, init_page
);
615 static void recalibrate(struct dp83640_clock
*clock
)
618 struct phy_txts event_ts
;
619 struct timespec64 ts
;
620 struct list_head
*this;
621 struct dp83640_private
*tmp
;
622 struct phy_device
*master
= clock
->chosen
->phydev
;
623 u16 cal_gpio
, cfg0
, evnt
, ptp_trig
, trigger
, val
;
625 trigger
= CAL_TRIGGER
;
626 cal_gpio
= 1 + ptp_find_pin(clock
->ptp_clock
, PTP_PF_PHYSYNC
, 0);
628 pr_err("PHY calibration pin not available - PHY is not calibrated.");
632 mutex_lock(&clock
->extreg_lock
);
635 * enable broadcast, disable status frames, enable ptp clock
637 list_for_each(this, &clock
->phylist
) {
638 tmp
= list_entry(this, struct dp83640_private
, list
);
639 enable_broadcast(tmp
->phydev
, clock
->page
, 1);
640 tmp
->cfg0
= ext_read(tmp
->phydev
, PAGE5
, PSF_CFG0
);
641 ext_write(0, tmp
->phydev
, PAGE5
, PSF_CFG0
, 0);
642 ext_write(0, tmp
->phydev
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
644 enable_broadcast(master
, clock
->page
, 1);
645 cfg0
= ext_read(master
, PAGE5
, PSF_CFG0
);
646 ext_write(0, master
, PAGE5
, PSF_CFG0
, 0);
647 ext_write(0, master
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
650 * enable an event timestamp
652 evnt
= EVNT_WR
| EVNT_RISE
| EVNT_SINGLE
;
653 evnt
|= (CAL_EVENT
& EVNT_SEL_MASK
) << EVNT_SEL_SHIFT
;
654 evnt
|= (cal_gpio
& EVNT_GPIO_MASK
) << EVNT_GPIO_SHIFT
;
656 list_for_each(this, &clock
->phylist
) {
657 tmp
= list_entry(this, struct dp83640_private
, list
);
658 ext_write(0, tmp
->phydev
, PAGE5
, PTP_EVNT
, evnt
);
660 ext_write(0, master
, PAGE5
, PTP_EVNT
, evnt
);
663 * configure a trigger
665 ptp_trig
= TRIG_WR
| TRIG_IF_LATE
| TRIG_PULSE
;
666 ptp_trig
|= (trigger
& TRIG_CSEL_MASK
) << TRIG_CSEL_SHIFT
;
667 ptp_trig
|= (cal_gpio
& TRIG_GPIO_MASK
) << TRIG_GPIO_SHIFT
;
668 ext_write(0, master
, PAGE5
, PTP_TRIG
, ptp_trig
);
671 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
673 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
678 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
680 /* disable trigger */
681 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
683 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
686 * read out and correct offsets
688 val
= ext_read(master
, PAGE4
, PTP_STS
);
689 pr_info("master PTP_STS 0x%04hx\n", val
);
690 val
= ext_read(master
, PAGE4
, PTP_ESTS
);
691 pr_info("master PTP_ESTS 0x%04hx\n", val
);
692 event_ts
.ns_lo
= ext_read(master
, PAGE4
, PTP_EDATA
);
693 event_ts
.ns_hi
= ext_read(master
, PAGE4
, PTP_EDATA
);
694 event_ts
.sec_lo
= ext_read(master
, PAGE4
, PTP_EDATA
);
695 event_ts
.sec_hi
= ext_read(master
, PAGE4
, PTP_EDATA
);
696 now
= phy2txts(&event_ts
);
698 list_for_each(this, &clock
->phylist
) {
699 tmp
= list_entry(this, struct dp83640_private
, list
);
700 val
= ext_read(tmp
->phydev
, PAGE4
, PTP_STS
);
701 pr_info("slave PTP_STS 0x%04hx\n", val
);
702 val
= ext_read(tmp
->phydev
, PAGE4
, PTP_ESTS
);
703 pr_info("slave PTP_ESTS 0x%04hx\n", val
);
704 event_ts
.ns_lo
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
705 event_ts
.ns_hi
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
706 event_ts
.sec_lo
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
707 event_ts
.sec_hi
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
708 diff
= now
- (s64
) phy2txts(&event_ts
);
709 pr_info("slave offset %lld nanoseconds\n", diff
);
711 ts
= ns_to_timespec64(diff
);
712 tdr_write(0, tmp
->phydev
, &ts
, PTP_STEP_CLK
);
716 * restore status frames
718 list_for_each(this, &clock
->phylist
) {
719 tmp
= list_entry(this, struct dp83640_private
, list
);
720 ext_write(0, tmp
->phydev
, PAGE5
, PSF_CFG0
, tmp
->cfg0
);
722 ext_write(0, master
, PAGE5
, PSF_CFG0
, cfg0
);
724 mutex_unlock(&clock
->extreg_lock
);
727 /* time stamping methods */
729 static inline u16
exts_chan_to_edata(int ch
)
731 return 1 << ((ch
+ EXT_EVENT
) * 2);
734 static int decode_evnt(struct dp83640_private
*dp83640
,
735 void *data
, int len
, u16 ests
)
737 struct phy_txts
*phy_txts
;
738 struct ptp_clock_event event
;
740 int words
= (ests
>> EVNT_TS_LEN_SHIFT
) & EVNT_TS_LEN_MASK
;
743 /* calculate length of the event timestamp status message */
744 if (ests
& MULT_EVNT
)
745 parsed
= (words
+ 2) * sizeof(u16
);
747 parsed
= (words
+ 1) * sizeof(u16
);
749 /* check if enough data is available */
753 if (ests
& MULT_EVNT
) {
754 ext_status
= *(u16
*) data
;
755 data
+= sizeof(ext_status
);
760 switch (words
) { /* fall through in every case */
762 dp83640
->edata
.sec_hi
= phy_txts
->sec_hi
;
764 dp83640
->edata
.sec_lo
= phy_txts
->sec_lo
;
766 dp83640
->edata
.ns_hi
= phy_txts
->ns_hi
;
768 dp83640
->edata
.ns_lo
= phy_txts
->ns_lo
;
772 i
= ((ests
>> EVNT_NUM_SHIFT
) & EVNT_NUM_MASK
) - EXT_EVENT
;
773 ext_status
= exts_chan_to_edata(i
);
776 event
.type
= PTP_CLOCK_EXTTS
;
777 event
.timestamp
= phy2txts(&dp83640
->edata
);
779 /* Compensate for input path and synchronization delays */
780 event
.timestamp
-= 35;
782 for (i
= 0; i
< N_EXT_TS
; i
++) {
783 if (ext_status
& exts_chan_to_edata(i
)) {
785 ptp_clock_event(dp83640
->clock
->ptp_clock
, &event
);
792 #define DP83640_PACKET_HASH_OFFSET 20
793 #define DP83640_PACKET_HASH_LEN 10
795 static int match(struct sk_buff
*skb
, unsigned int type
, struct rxts
*rxts
)
798 unsigned int offset
= 0;
799 u8
*msgtype
, *data
= skb_mac_header(skb
);
801 /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
803 if (type
& PTP_CLASS_VLAN
)
806 switch (type
& PTP_CLASS_PMASK
) {
808 offset
+= ETH_HLEN
+ IPV4_HLEN(data
+ offset
) + UDP_HLEN
;
811 offset
+= ETH_HLEN
+ IP6_HLEN
+ UDP_HLEN
;
820 if (skb
->len
+ ETH_HLEN
< offset
+ OFF_PTP_SEQUENCE_ID
+ sizeof(*seqid
))
823 if (unlikely(type
& PTP_CLASS_V1
))
824 msgtype
= data
+ offset
+ OFF_PTP_CONTROL
;
826 msgtype
= data
+ offset
;
827 if (rxts
->msgtype
!= (*msgtype
& 0xf))
830 seqid
= (u16
*)(data
+ offset
+ OFF_PTP_SEQUENCE_ID
);
831 if (rxts
->seqid
!= ntohs(*seqid
))
834 hash
= ether_crc(DP83640_PACKET_HASH_LEN
,
835 data
+ offset
+ DP83640_PACKET_HASH_OFFSET
) >> 20;
836 if (rxts
->hash
!= hash
)
842 static void decode_rxts(struct dp83640_private
*dp83640
,
843 struct phy_rxts
*phy_rxts
)
846 struct skb_shared_hwtstamps
*shhwtstamps
= NULL
;
851 overflow
= (phy_rxts
->ns_hi
>> 14) & 0x3;
853 pr_debug("rx timestamp queue overflow, count %d\n", overflow
);
855 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
857 prune_rx_ts(dp83640
);
859 if (list_empty(&dp83640
->rxpool
)) {
860 pr_debug("rx timestamp pool is empty\n");
863 rxts
= list_first_entry(&dp83640
->rxpool
, struct rxts
, list
);
864 list_del_init(&rxts
->list
);
865 phy2rxts(phy_rxts
, rxts
);
867 spin_lock(&dp83640
->rx_queue
.lock
);
868 skb_queue_walk(&dp83640
->rx_queue
, skb
) {
869 struct dp83640_skb_info
*skb_info
;
871 skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
872 if (match(skb
, skb_info
->ptp_type
, rxts
)) {
873 __skb_unlink(skb
, &dp83640
->rx_queue
);
874 shhwtstamps
= skb_hwtstamps(skb
);
875 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
876 shhwtstamps
->hwtstamp
= ns_to_ktime(rxts
->ns
);
877 list_add(&rxts
->list
, &dp83640
->rxpool
);
881 spin_unlock(&dp83640
->rx_queue
.lock
);
884 list_add_tail(&rxts
->list
, &dp83640
->rxts
);
886 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
892 static void decode_txts(struct dp83640_private
*dp83640
,
893 struct phy_txts
*phy_txts
)
895 struct skb_shared_hwtstamps shhwtstamps
;
900 /* We must already have the skb that triggered this. */
902 skb
= skb_dequeue(&dp83640
->tx_queue
);
905 pr_debug("have timestamp but tx_queue empty\n");
909 overflow
= (phy_txts
->ns_hi
>> 14) & 0x3;
911 pr_debug("tx timestamp queue overflow, count %d\n", overflow
);
914 skb
= skb_dequeue(&dp83640
->tx_queue
);
919 ns
= phy2txts(phy_txts
);
920 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
921 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
922 skb_complete_tx_timestamp(skb
, &shhwtstamps
);
925 static void decode_status_frame(struct dp83640_private
*dp83640
,
928 struct phy_rxts
*phy_rxts
;
929 struct phy_txts
*phy_txts
;
936 for (len
= skb_headlen(skb
) - 2; len
> sizeof(type
); len
-= size
) {
939 ests
= type
& 0x0fff;
940 type
= type
& 0xf000;
944 if (PSF_RX
== type
&& len
>= sizeof(*phy_rxts
)) {
946 phy_rxts
= (struct phy_rxts
*) ptr
;
947 decode_rxts(dp83640
, phy_rxts
);
948 size
= sizeof(*phy_rxts
);
950 } else if (PSF_TX
== type
&& len
>= sizeof(*phy_txts
)) {
952 phy_txts
= (struct phy_txts
*) ptr
;
953 decode_txts(dp83640
, phy_txts
);
954 size
= sizeof(*phy_txts
);
956 } else if (PSF_EVNT
== type
) {
958 size
= decode_evnt(dp83640
, ptr
, len
, ests
);
968 static int is_sync(struct sk_buff
*skb
, int type
)
970 u8
*data
= skb
->data
, *msgtype
;
971 unsigned int offset
= 0;
973 if (type
& PTP_CLASS_VLAN
)
976 switch (type
& PTP_CLASS_PMASK
) {
978 offset
+= ETH_HLEN
+ IPV4_HLEN(data
+ offset
) + UDP_HLEN
;
981 offset
+= ETH_HLEN
+ IP6_HLEN
+ UDP_HLEN
;
990 if (type
& PTP_CLASS_V1
)
991 offset
+= OFF_PTP_CONTROL
;
993 if (skb
->len
< offset
+ 1)
996 msgtype
= data
+ offset
;
998 return (*msgtype
& 0xf) == 0;
1001 static void dp83640_free_clocks(void)
1003 struct dp83640_clock
*clock
;
1004 struct list_head
*this, *next
;
1006 mutex_lock(&phyter_clocks_lock
);
1008 list_for_each_safe(this, next
, &phyter_clocks
) {
1009 clock
= list_entry(this, struct dp83640_clock
, list
);
1010 if (!list_empty(&clock
->phylist
)) {
1011 pr_warn("phy list non-empty while unloading\n");
1014 list_del(&clock
->list
);
1015 mutex_destroy(&clock
->extreg_lock
);
1016 mutex_destroy(&clock
->clock_lock
);
1017 put_device(&clock
->bus
->dev
);
1018 kfree(clock
->caps
.pin_config
);
1022 mutex_unlock(&phyter_clocks_lock
);
1025 static void dp83640_clock_init(struct dp83640_clock
*clock
, struct mii_bus
*bus
)
1027 INIT_LIST_HEAD(&clock
->list
);
1029 mutex_init(&clock
->extreg_lock
);
1030 mutex_init(&clock
->clock_lock
);
1031 INIT_LIST_HEAD(&clock
->phylist
);
1032 clock
->caps
.owner
= THIS_MODULE
;
1033 sprintf(clock
->caps
.name
, "dp83640 timer");
1034 clock
->caps
.max_adj
= 1953124;
1035 clock
->caps
.n_alarm
= 0;
1036 clock
->caps
.n_ext_ts
= N_EXT_TS
;
1037 clock
->caps
.n_per_out
= N_PER_OUT
;
1038 clock
->caps
.n_pins
= DP83640_N_PINS
;
1039 clock
->caps
.pps
= 0;
1040 clock
->caps
.adjfine
= ptp_dp83640_adjfine
;
1041 clock
->caps
.adjtime
= ptp_dp83640_adjtime
;
1042 clock
->caps
.gettime64
= ptp_dp83640_gettime
;
1043 clock
->caps
.settime64
= ptp_dp83640_settime
;
1044 clock
->caps
.enable
= ptp_dp83640_enable
;
1045 clock
->caps
.verify
= ptp_dp83640_verify
;
1047 * Convert the module param defaults into a dynamic pin configuration.
1049 dp83640_gpio_defaults(clock
->caps
.pin_config
);
1051 * Get a reference to this bus instance.
1053 get_device(&bus
->dev
);
1056 static int choose_this_phy(struct dp83640_clock
*clock
,
1057 struct phy_device
*phydev
)
1059 if (chosen_phy
== -1 && !clock
->chosen
)
1062 if (chosen_phy
== phydev
->mdio
.addr
)
1068 static struct dp83640_clock
*dp83640_clock_get(struct dp83640_clock
*clock
)
1071 mutex_lock(&clock
->clock_lock
);
1076 * Look up and lock a clock by bus instance.
1077 * If there is no clock for this bus, then create it first.
1079 static struct dp83640_clock
*dp83640_clock_get_bus(struct mii_bus
*bus
)
1081 struct dp83640_clock
*clock
= NULL
, *tmp
;
1082 struct list_head
*this;
1084 mutex_lock(&phyter_clocks_lock
);
1086 list_for_each(this, &phyter_clocks
) {
1087 tmp
= list_entry(this, struct dp83640_clock
, list
);
1088 if (tmp
->bus
== bus
) {
1096 clock
= kzalloc(sizeof(struct dp83640_clock
), GFP_KERNEL
);
1100 clock
->caps
.pin_config
= kzalloc(sizeof(struct ptp_pin_desc
) *
1101 DP83640_N_PINS
, GFP_KERNEL
);
1102 if (!clock
->caps
.pin_config
) {
1107 dp83640_clock_init(clock
, bus
);
1108 list_add_tail(&phyter_clocks
, &clock
->list
);
1110 mutex_unlock(&phyter_clocks_lock
);
1112 return dp83640_clock_get(clock
);
1115 static void dp83640_clock_put(struct dp83640_clock
*clock
)
1117 mutex_unlock(&clock
->clock_lock
);
1120 static int dp83640_probe(struct phy_device
*phydev
)
1122 struct dp83640_clock
*clock
;
1123 struct dp83640_private
*dp83640
;
1124 int err
= -ENOMEM
, i
;
1126 if (phydev
->mdio
.addr
== BROADCAST_ADDR
)
1129 clock
= dp83640_clock_get_bus(phydev
->mdio
.bus
);
1133 dp83640
= kzalloc(sizeof(struct dp83640_private
), GFP_KERNEL
);
1137 dp83640
->phydev
= phydev
;
1138 INIT_DELAYED_WORK(&dp83640
->ts_work
, rx_timestamp_work
);
1140 INIT_LIST_HEAD(&dp83640
->rxts
);
1141 INIT_LIST_HEAD(&dp83640
->rxpool
);
1142 for (i
= 0; i
< MAX_RXTS
; i
++)
1143 list_add(&dp83640
->rx_pool_data
[i
].list
, &dp83640
->rxpool
);
1145 phydev
->priv
= dp83640
;
1147 spin_lock_init(&dp83640
->rx_lock
);
1148 skb_queue_head_init(&dp83640
->rx_queue
);
1149 skb_queue_head_init(&dp83640
->tx_queue
);
1151 dp83640
->clock
= clock
;
1153 if (choose_this_phy(clock
, phydev
)) {
1154 clock
->chosen
= dp83640
;
1155 clock
->ptp_clock
= ptp_clock_register(&clock
->caps
,
1157 if (IS_ERR(clock
->ptp_clock
)) {
1158 err
= PTR_ERR(clock
->ptp_clock
);
1162 list_add_tail(&dp83640
->list
, &clock
->phylist
);
1164 dp83640_clock_put(clock
);
1168 clock
->chosen
= NULL
;
1171 dp83640_clock_put(clock
);
1176 static void dp83640_remove(struct phy_device
*phydev
)
1178 struct dp83640_clock
*clock
;
1179 struct list_head
*this, *next
;
1180 struct dp83640_private
*tmp
, *dp83640
= phydev
->priv
;
1182 if (phydev
->mdio
.addr
== BROADCAST_ADDR
)
1185 enable_status_frames(phydev
, false);
1186 cancel_delayed_work_sync(&dp83640
->ts_work
);
1188 skb_queue_purge(&dp83640
->rx_queue
);
1189 skb_queue_purge(&dp83640
->tx_queue
);
1191 clock
= dp83640_clock_get(dp83640
->clock
);
1193 if (dp83640
== clock
->chosen
) {
1194 ptp_clock_unregister(clock
->ptp_clock
);
1195 clock
->chosen
= NULL
;
1197 list_for_each_safe(this, next
, &clock
->phylist
) {
1198 tmp
= list_entry(this, struct dp83640_private
, list
);
1199 if (tmp
== dp83640
) {
1200 list_del_init(&tmp
->list
);
1206 dp83640_clock_put(clock
);
1210 static int dp83640_config_init(struct phy_device
*phydev
)
1212 struct dp83640_private
*dp83640
= phydev
->priv
;
1213 struct dp83640_clock
*clock
= dp83640
->clock
;
1215 if (clock
->chosen
&& !list_empty(&clock
->phylist
))
1218 mutex_lock(&clock
->extreg_lock
);
1219 enable_broadcast(phydev
, clock
->page
, 1);
1220 mutex_unlock(&clock
->extreg_lock
);
1223 enable_status_frames(phydev
, true);
1225 mutex_lock(&clock
->extreg_lock
);
1226 ext_write(0, phydev
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
1227 mutex_unlock(&clock
->extreg_lock
);
1232 static int dp83640_ack_interrupt(struct phy_device
*phydev
)
1234 int err
= phy_read(phydev
, MII_DP83640_MISR
);
1242 static int dp83640_config_intr(struct phy_device
*phydev
)
1248 if (phydev
->interrupts
== PHY_INTERRUPT_ENABLED
) {
1249 misr
= phy_read(phydev
, MII_DP83640_MISR
);
1253 (MII_DP83640_MISR_ANC_INT_EN
|
1254 MII_DP83640_MISR_DUP_INT_EN
|
1255 MII_DP83640_MISR_SPD_INT_EN
|
1256 MII_DP83640_MISR_LINK_INT_EN
);
1257 err
= phy_write(phydev
, MII_DP83640_MISR
, misr
);
1261 micr
= phy_read(phydev
, MII_DP83640_MICR
);
1265 (MII_DP83640_MICR_OE
|
1266 MII_DP83640_MICR_IE
);
1267 return phy_write(phydev
, MII_DP83640_MICR
, micr
);
1269 micr
= phy_read(phydev
, MII_DP83640_MICR
);
1273 ~(MII_DP83640_MICR_OE
|
1274 MII_DP83640_MICR_IE
);
1275 err
= phy_write(phydev
, MII_DP83640_MICR
, micr
);
1279 misr
= phy_read(phydev
, MII_DP83640_MISR
);
1283 ~(MII_DP83640_MISR_ANC_INT_EN
|
1284 MII_DP83640_MISR_DUP_INT_EN
|
1285 MII_DP83640_MISR_SPD_INT_EN
|
1286 MII_DP83640_MISR_LINK_INT_EN
);
1287 return phy_write(phydev
, MII_DP83640_MISR
, misr
);
1291 static int dp83640_hwtstamp(struct phy_device
*phydev
, struct ifreq
*ifr
)
1293 struct dp83640_private
*dp83640
= phydev
->priv
;
1294 struct hwtstamp_config cfg
;
1297 if (copy_from_user(&cfg
, ifr
->ifr_data
, sizeof(cfg
)))
1300 if (cfg
.flags
) /* reserved for future extensions */
1303 if (cfg
.tx_type
< 0 || cfg
.tx_type
> HWTSTAMP_TX_ONESTEP_SYNC
)
1306 dp83640
->hwts_tx_en
= cfg
.tx_type
;
1308 switch (cfg
.rx_filter
) {
1309 case HWTSTAMP_FILTER_NONE
:
1310 dp83640
->hwts_rx_en
= 0;
1312 dp83640
->version
= 0;
1314 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1315 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1316 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1317 dp83640
->hwts_rx_en
= 1;
1318 dp83640
->layer
= PTP_CLASS_L4
;
1319 dp83640
->version
= PTP_CLASS_V1
;
1321 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1322 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1323 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1324 dp83640
->hwts_rx_en
= 1;
1325 dp83640
->layer
= PTP_CLASS_L4
;
1326 dp83640
->version
= PTP_CLASS_V2
;
1328 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1329 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1330 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1331 dp83640
->hwts_rx_en
= 1;
1332 dp83640
->layer
= PTP_CLASS_L2
;
1333 dp83640
->version
= PTP_CLASS_V2
;
1335 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1336 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1337 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1338 dp83640
->hwts_rx_en
= 1;
1339 dp83640
->layer
= PTP_CLASS_L4
| PTP_CLASS_L2
;
1340 dp83640
->version
= PTP_CLASS_V2
;
1346 txcfg0
= (dp83640
->version
& TX_PTP_VER_MASK
) << TX_PTP_VER_SHIFT
;
1347 rxcfg0
= (dp83640
->version
& TX_PTP_VER_MASK
) << TX_PTP_VER_SHIFT
;
1349 if (dp83640
->layer
& PTP_CLASS_L2
) {
1353 if (dp83640
->layer
& PTP_CLASS_L4
) {
1354 txcfg0
|= TX_IPV6_EN
| TX_IPV4_EN
;
1355 rxcfg0
|= RX_IPV6_EN
| RX_IPV4_EN
;
1358 if (dp83640
->hwts_tx_en
)
1361 if (dp83640
->hwts_tx_en
== HWTSTAMP_TX_ONESTEP_SYNC
)
1362 txcfg0
|= SYNC_1STEP
| CHK_1STEP
;
1364 if (dp83640
->hwts_rx_en
)
1367 mutex_lock(&dp83640
->clock
->extreg_lock
);
1369 ext_write(0, phydev
, PAGE5
, PTP_TXCFG0
, txcfg0
);
1370 ext_write(0, phydev
, PAGE5
, PTP_RXCFG0
, rxcfg0
);
1372 mutex_unlock(&dp83640
->clock
->extreg_lock
);
1374 return copy_to_user(ifr
->ifr_data
, &cfg
, sizeof(cfg
)) ? -EFAULT
: 0;
1377 static void rx_timestamp_work(struct work_struct
*work
)
1379 struct dp83640_private
*dp83640
=
1380 container_of(work
, struct dp83640_private
, ts_work
.work
);
1381 struct sk_buff
*skb
;
1383 /* Deliver expired packets. */
1384 while ((skb
= skb_dequeue(&dp83640
->rx_queue
))) {
1385 struct dp83640_skb_info
*skb_info
;
1387 skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
1388 if (!time_after(jiffies
, skb_info
->tmo
)) {
1389 skb_queue_head(&dp83640
->rx_queue
, skb
);
1396 if (!skb_queue_empty(&dp83640
->rx_queue
))
1397 schedule_delayed_work(&dp83640
->ts_work
, SKB_TIMESTAMP_TIMEOUT
);
1400 static bool dp83640_rxtstamp(struct phy_device
*phydev
,
1401 struct sk_buff
*skb
, int type
)
1403 struct dp83640_private
*dp83640
= phydev
->priv
;
1404 struct dp83640_skb_info
*skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
1405 struct list_head
*this, *next
;
1407 struct skb_shared_hwtstamps
*shhwtstamps
= NULL
;
1408 unsigned long flags
;
1410 if (is_status_frame(skb
, type
)) {
1411 decode_status_frame(dp83640
, skb
);
1416 if (!dp83640
->hwts_rx_en
)
1419 if ((type
& dp83640
->version
) == 0 || (type
& dp83640
->layer
) == 0)
1422 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
1423 prune_rx_ts(dp83640
);
1424 list_for_each_safe(this, next
, &dp83640
->rxts
) {
1425 rxts
= list_entry(this, struct rxts
, list
);
1426 if (match(skb
, type
, rxts
)) {
1427 shhwtstamps
= skb_hwtstamps(skb
);
1428 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
1429 shhwtstamps
->hwtstamp
= ns_to_ktime(rxts
->ns
);
1430 list_del_init(&rxts
->list
);
1431 list_add(&rxts
->list
, &dp83640
->rxpool
);
1435 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
1438 skb_info
->ptp_type
= type
;
1439 skb_info
->tmo
= jiffies
+ SKB_TIMESTAMP_TIMEOUT
;
1440 skb_queue_tail(&dp83640
->rx_queue
, skb
);
1441 schedule_delayed_work(&dp83640
->ts_work
, SKB_TIMESTAMP_TIMEOUT
);
1449 static void dp83640_txtstamp(struct phy_device
*phydev
,
1450 struct sk_buff
*skb
, int type
)
1452 struct dp83640_private
*dp83640
= phydev
->priv
;
1454 switch (dp83640
->hwts_tx_en
) {
1456 case HWTSTAMP_TX_ONESTEP_SYNC
:
1457 if (is_sync(skb
, type
)) {
1462 case HWTSTAMP_TX_ON
:
1463 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1464 skb_queue_tail(&dp83640
->tx_queue
, skb
);
1467 case HWTSTAMP_TX_OFF
:
1474 static int dp83640_ts_info(struct phy_device
*dev
, struct ethtool_ts_info
*info
)
1476 struct dp83640_private
*dp83640
= dev
->priv
;
1478 info
->so_timestamping
=
1479 SOF_TIMESTAMPING_TX_HARDWARE
|
1480 SOF_TIMESTAMPING_RX_HARDWARE
|
1481 SOF_TIMESTAMPING_RAW_HARDWARE
;
1482 info
->phc_index
= ptp_clock_index(dp83640
->clock
->ptp_clock
);
1484 (1 << HWTSTAMP_TX_OFF
) |
1485 (1 << HWTSTAMP_TX_ON
) |
1486 (1 << HWTSTAMP_TX_ONESTEP_SYNC
);
1488 (1 << HWTSTAMP_FILTER_NONE
) |
1489 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
1490 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
) |
1491 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
1492 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
);
1496 static struct phy_driver dp83640_driver
= {
1497 .phy_id
= DP83640_PHY_ID
,
1498 .phy_id_mask
= 0xfffffff0,
1499 .name
= "NatSemi DP83640",
1500 .features
= PHY_BASIC_FEATURES
,
1501 .flags
= PHY_HAS_INTERRUPT
,
1502 .probe
= dp83640_probe
,
1503 .remove
= dp83640_remove
,
1504 .config_init
= dp83640_config_init
,
1505 .ack_interrupt
= dp83640_ack_interrupt
,
1506 .config_intr
= dp83640_config_intr
,
1507 .ts_info
= dp83640_ts_info
,
1508 .hwtstamp
= dp83640_hwtstamp
,
1509 .rxtstamp
= dp83640_rxtstamp
,
1510 .txtstamp
= dp83640_txtstamp
,
1513 static int __init
dp83640_init(void)
1515 return phy_driver_register(&dp83640_driver
, THIS_MODULE
);
1518 static void __exit
dp83640_exit(void)
1520 dp83640_free_clocks();
1521 phy_driver_unregister(&dp83640_driver
);
1524 MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
1525 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
1526 MODULE_LICENSE("GPL");
1528 module_init(dp83640_init
);
1529 module_exit(dp83640_exit
);
1531 static struct mdio_device_id __maybe_unused dp83640_tbl
[] = {
1532 { DP83640_PHY_ID
, 0xfffffff0 },
1536 MODULE_DEVICE_TABLE(mdio
, dp83640_tbl
);