2 * Driver for the National Semiconductor DP83640 PHYTER
4 * Copyright (C) 2010 OMICRON electronics GmbH
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/ethtool.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/mii.h>
27 #include <linux/module.h>
28 #include <linux/net_tstamp.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_vlan.h>
31 #include <linux/phy.h>
32 #include <linux/ptp_classify.h>
33 #include <linux/ptp_clock_kernel.h>
35 #include "dp83640_reg.h"
37 #define DP83640_PHY_ID 0x20005ce1
45 #define PSF_EVNT 0x4000
51 #define DP83640_N_PINS 12
53 #define MII_DP83640_MICR 0x11
54 #define MII_DP83640_MISR 0x12
56 #define MII_DP83640_MICR_OE 0x1
57 #define MII_DP83640_MICR_IE 0x2
59 #define MII_DP83640_MISR_RHF_INT_EN 0x01
60 #define MII_DP83640_MISR_FHF_INT_EN 0x02
61 #define MII_DP83640_MISR_ANC_INT_EN 0x04
62 #define MII_DP83640_MISR_DUP_INT_EN 0x08
63 #define MII_DP83640_MISR_SPD_INT_EN 0x10
64 #define MII_DP83640_MISR_LINK_INT_EN 0x20
65 #define MII_DP83640_MISR_ED_INT_EN 0x40
66 #define MII_DP83640_MISR_LQ_INT_EN 0x80
68 /* phyter seems to miss the mark by 16 ns */
69 #define ADJTIME_FIX 16
71 #if defined(__BIG_ENDIAN)
73 #elif defined(__LITTLE_ENDIAN)
74 #define ENDIAN_FLAG PSF_ENDIAN
77 struct dp83640_skb_info
{
83 u16 ns_lo
; /* ns[15:0] */
84 u16 ns_hi
; /* overflow[1:0], ns[29:16] */
85 u16 sec_lo
; /* sec[15:0] */
86 u16 sec_hi
; /* sec[31:16] */
87 u16 seqid
; /* sequenceId[15:0] */
88 u16 msgtype
; /* messageType[3:0], hash[11:0] */
92 u16 ns_lo
; /* ns[15:0] */
93 u16 ns_hi
; /* overflow[1:0], ns[29:16] */
94 u16 sec_lo
; /* sec[15:0] */
95 u16 sec_hi
; /* sec[31:16] */
99 struct list_head list
;
107 struct dp83640_clock
;
109 struct dp83640_private
{
110 struct list_head list
;
111 struct dp83640_clock
*clock
;
112 struct phy_device
*phydev
;
113 struct work_struct ts_work
;
118 /* remember state of cfg0 during calibration */
120 /* remember the last event time stamp */
121 struct phy_txts edata
;
122 /* list of rx timestamps */
123 struct list_head rxts
;
124 struct list_head rxpool
;
125 struct rxts rx_pool_data
[MAX_RXTS
];
126 /* protects above three fields from concurrent access */
128 /* queues of incoming and outgoing packets */
129 struct sk_buff_head rx_queue
;
130 struct sk_buff_head tx_queue
;
133 struct dp83640_clock
{
134 /* keeps the instance in the 'phyter_clocks' list */
135 struct list_head list
;
136 /* we create one clock instance per MII bus */
138 /* protects extended registers from concurrent access */
139 struct mutex extreg_lock
;
140 /* remembers which page was last selected */
142 /* our advertised capabilities */
143 struct ptp_clock_info caps
;
144 /* protects the three fields below from concurrent access */
145 struct mutex clock_lock
;
146 /* the one phyter from which we shall read */
147 struct dp83640_private
*chosen
;
148 /* list of the other attached phyters, not chosen */
149 struct list_head phylist
;
150 /* reference to our PTP hardware clock */
151 struct ptp_clock
*ptp_clock
;
168 static int chosen_phy
= -1;
169 static ushort gpio_tab
[GPIO_TABLE_SIZE
] = {
170 1, 2, 3, 4, 8, 9, 10, 11
173 module_param(chosen_phy
, int, 0444);
174 module_param_array(gpio_tab
, ushort
, NULL
, 0444);
176 MODULE_PARM_DESC(chosen_phy
, \
177 "The address of the PHY to use for the ancillary clock features");
178 MODULE_PARM_DESC(gpio_tab
, \
179 "Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
181 static void dp83640_gpio_defaults(struct ptp_pin_desc
*pd
)
185 for (i
= 0; i
< DP83640_N_PINS
; i
++) {
186 snprintf(pd
[i
].name
, sizeof(pd
[i
].name
), "GPIO%d", 1 + i
);
190 for (i
= 0; i
< GPIO_TABLE_SIZE
; i
++) {
191 if (gpio_tab
[i
] < 1 || gpio_tab
[i
] > DP83640_N_PINS
) {
192 pr_err("gpio_tab[%d]=%hu out of range", i
, gpio_tab
[i
]);
197 index
= gpio_tab
[CALIBRATE_GPIO
] - 1;
198 pd
[index
].func
= PTP_PF_PHYSYNC
;
201 index
= gpio_tab
[PEROUT_GPIO
] - 1;
202 pd
[index
].func
= PTP_PF_PEROUT
;
205 for (i
= EXTTS0_GPIO
; i
< GPIO_TABLE_SIZE
; i
++) {
206 index
= gpio_tab
[i
] - 1;
207 pd
[index
].func
= PTP_PF_EXTTS
;
208 pd
[index
].chan
= i
- EXTTS0_GPIO
;
212 /* a list of clocks and a mutex to protect it */
213 static LIST_HEAD(phyter_clocks
);
214 static DEFINE_MUTEX(phyter_clocks_lock
);
216 static void rx_timestamp_work(struct work_struct
*work
);
218 /* extended register access functions */
220 #define BROADCAST_ADDR 31
222 static inline int broadcast_write(struct mii_bus
*bus
, u32 regnum
, u16 val
)
224 return mdiobus_write(bus
, BROADCAST_ADDR
, regnum
, val
);
227 /* Caller must hold extreg_lock. */
228 static int ext_read(struct phy_device
*phydev
, int page
, u32 regnum
)
230 struct dp83640_private
*dp83640
= phydev
->priv
;
233 if (dp83640
->clock
->page
!= page
) {
234 broadcast_write(phydev
->bus
, PAGESEL
, page
);
235 dp83640
->clock
->page
= page
;
237 val
= phy_read(phydev
, regnum
);
242 /* Caller must hold extreg_lock. */
243 static void ext_write(int broadcast
, struct phy_device
*phydev
,
244 int page
, u32 regnum
, u16 val
)
246 struct dp83640_private
*dp83640
= phydev
->priv
;
248 if (dp83640
->clock
->page
!= page
) {
249 broadcast_write(phydev
->bus
, PAGESEL
, page
);
250 dp83640
->clock
->page
= page
;
253 broadcast_write(phydev
->bus
, regnum
, val
);
255 phy_write(phydev
, regnum
, val
);
258 /* Caller must hold extreg_lock. */
259 static int tdr_write(int bc
, struct phy_device
*dev
,
260 const struct timespec64
*ts
, u16 cmd
)
262 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_nsec
& 0xffff);/* ns[15:0] */
263 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_nsec
>> 16); /* ns[31:16] */
264 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_sec
& 0xffff); /* sec[15:0] */
265 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_sec
>> 16); /* sec[31:16]*/
267 ext_write(bc
, dev
, PAGE4
, PTP_CTL
, cmd
);
272 /* convert phy timestamps into driver timestamps */
274 static void phy2rxts(struct phy_rxts
*p
, struct rxts
*rxts
)
279 sec
|= p
->sec_hi
<< 16;
282 rxts
->ns
|= (p
->ns_hi
& 0x3fff) << 16;
283 rxts
->ns
+= ((u64
)sec
) * 1000000000ULL;
284 rxts
->seqid
= p
->seqid
;
285 rxts
->msgtype
= (p
->msgtype
>> 12) & 0xf;
286 rxts
->hash
= p
->msgtype
& 0x0fff;
287 rxts
->tmo
= jiffies
+ 2;
290 static u64
phy2txts(struct phy_txts
*p
)
296 sec
|= p
->sec_hi
<< 16;
299 ns
|= (p
->ns_hi
& 0x3fff) << 16;
300 ns
+= ((u64
)sec
) * 1000000000ULL;
305 static int periodic_output(struct dp83640_clock
*clock
,
306 struct ptp_clock_request
*clkreq
, bool on
,
309 struct dp83640_private
*dp83640
= clock
->chosen
;
310 struct phy_device
*phydev
= dp83640
->phydev
;
311 u32 sec
, nsec
, pwidth
;
312 u16 gpio
, ptp_trig
, val
;
315 gpio
= 1 + ptp_find_pin(clock
->ptp_clock
, PTP_PF_PEROUT
,
324 (trigger
& TRIG_CSEL_MASK
) << TRIG_CSEL_SHIFT
|
325 (gpio
& TRIG_GPIO_MASK
) << TRIG_GPIO_SHIFT
|
329 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
333 mutex_lock(&clock
->extreg_lock
);
334 ext_write(0, phydev
, PAGE5
, PTP_TRIG
, ptp_trig
);
335 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
336 mutex_unlock(&clock
->extreg_lock
);
340 sec
= clkreq
->perout
.start
.sec
;
341 nsec
= clkreq
->perout
.start
.nsec
;
342 pwidth
= clkreq
->perout
.period
.sec
* 1000000000UL;
343 pwidth
+= clkreq
->perout
.period
.nsec
;
346 mutex_lock(&clock
->extreg_lock
);
348 ext_write(0, phydev
, PAGE5
, PTP_TRIG
, ptp_trig
);
352 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
353 ext_write(0, phydev
, PAGE4
, PTP_TDR
, nsec
& 0xffff); /* ns[15:0] */
354 ext_write(0, phydev
, PAGE4
, PTP_TDR
, nsec
>> 16); /* ns[31:16] */
355 ext_write(0, phydev
, PAGE4
, PTP_TDR
, sec
& 0xffff); /* sec[15:0] */
356 ext_write(0, phydev
, PAGE4
, PTP_TDR
, sec
>> 16); /* sec[31:16] */
357 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
& 0xffff); /* ns[15:0] */
358 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
>> 16); /* ns[31:16] */
359 /* Triggers 0 and 1 has programmable pulsewidth2 */
361 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
& 0xffff);
362 ext_write(0, phydev
, PAGE4
, PTP_TDR
, pwidth
>> 16);
368 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
370 mutex_unlock(&clock
->extreg_lock
);
374 /* ptp clock methods */
376 static int ptp_dp83640_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
378 struct dp83640_clock
*clock
=
379 container_of(ptp
, struct dp83640_clock
, caps
);
380 struct phy_device
*phydev
= clock
->chosen
->phydev
;
391 rate
= div_u64(rate
, 1953125);
393 hi
= (rate
>> 16) & PTP_RATE_HI_MASK
;
399 mutex_lock(&clock
->extreg_lock
);
401 ext_write(1, phydev
, PAGE4
, PTP_RATEH
, hi
);
402 ext_write(1, phydev
, PAGE4
, PTP_RATEL
, lo
);
404 mutex_unlock(&clock
->extreg_lock
);
409 static int ptp_dp83640_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
411 struct dp83640_clock
*clock
=
412 container_of(ptp
, struct dp83640_clock
, caps
);
413 struct phy_device
*phydev
= clock
->chosen
->phydev
;
414 struct timespec64 ts
;
417 delta
+= ADJTIME_FIX
;
419 ts
= ns_to_timespec64(delta
);
421 mutex_lock(&clock
->extreg_lock
);
423 err
= tdr_write(1, phydev
, &ts
, PTP_STEP_CLK
);
425 mutex_unlock(&clock
->extreg_lock
);
430 static int ptp_dp83640_gettime(struct ptp_clock_info
*ptp
,
431 struct timespec64
*ts
)
433 struct dp83640_clock
*clock
=
434 container_of(ptp
, struct dp83640_clock
, caps
);
435 struct phy_device
*phydev
= clock
->chosen
->phydev
;
438 mutex_lock(&clock
->extreg_lock
);
440 ext_write(0, phydev
, PAGE4
, PTP_CTL
, PTP_RD_CLK
);
442 val
[0] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* ns[15:0] */
443 val
[1] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* ns[31:16] */
444 val
[2] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* sec[15:0] */
445 val
[3] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* sec[31:16] */
447 mutex_unlock(&clock
->extreg_lock
);
449 ts
->tv_nsec
= val
[0] | (val
[1] << 16);
450 ts
->tv_sec
= val
[2] | (val
[3] << 16);
455 static int ptp_dp83640_settime(struct ptp_clock_info
*ptp
,
456 const struct timespec64
*ts
)
458 struct dp83640_clock
*clock
=
459 container_of(ptp
, struct dp83640_clock
, caps
);
460 struct phy_device
*phydev
= clock
->chosen
->phydev
;
463 mutex_lock(&clock
->extreg_lock
);
465 err
= tdr_write(1, phydev
, ts
, PTP_LOAD_CLK
);
467 mutex_unlock(&clock
->extreg_lock
);
472 static int ptp_dp83640_enable(struct ptp_clock_info
*ptp
,
473 struct ptp_clock_request
*rq
, int on
)
475 struct dp83640_clock
*clock
=
476 container_of(ptp
, struct dp83640_clock
, caps
);
477 struct phy_device
*phydev
= clock
->chosen
->phydev
;
479 u16 evnt
, event_num
, gpio_num
;
482 case PTP_CLK_REQ_EXTTS
:
483 index
= rq
->extts
.index
;
484 if (index
>= N_EXT_TS
)
486 event_num
= EXT_EVENT
+ index
;
487 evnt
= EVNT_WR
| (event_num
& EVNT_SEL_MASK
) << EVNT_SEL_SHIFT
;
489 gpio_num
= 1 + ptp_find_pin(clock
->ptp_clock
,
490 PTP_PF_EXTTS
, index
);
493 evnt
|= (gpio_num
& EVNT_GPIO_MASK
) << EVNT_GPIO_SHIFT
;
494 if (rq
->extts
.flags
& PTP_FALLING_EDGE
)
499 mutex_lock(&clock
->extreg_lock
);
500 ext_write(0, phydev
, PAGE5
, PTP_EVNT
, evnt
);
501 mutex_unlock(&clock
->extreg_lock
);
504 case PTP_CLK_REQ_PEROUT
:
505 if (rq
->perout
.index
>= N_PER_OUT
)
507 return periodic_output(clock
, rq
, on
, rq
->perout
.index
);
516 static int ptp_dp83640_verify(struct ptp_clock_info
*ptp
, unsigned int pin
,
517 enum ptp_pin_function func
, unsigned int chan
)
519 struct dp83640_clock
*clock
=
520 container_of(ptp
, struct dp83640_clock
, caps
);
522 if (clock
->caps
.pin_config
[pin
].func
== PTP_PF_PHYSYNC
&&
523 !list_empty(&clock
->phylist
))
526 if (func
== PTP_PF_PHYSYNC
)
532 static u8 status_frame_dst
[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
533 static u8 status_frame_src
[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
535 static void enable_status_frames(struct phy_device
*phydev
, bool on
)
537 struct dp83640_private
*dp83640
= phydev
->priv
;
538 struct dp83640_clock
*clock
= dp83640
->clock
;
542 cfg0
= PSF_EVNT_EN
| PSF_RXTS_EN
| PSF_TXTS_EN
| ENDIAN_FLAG
;
544 ver
= (PSF_PTPVER
& VERSIONPTP_MASK
) << VERSIONPTP_SHIFT
;
546 mutex_lock(&clock
->extreg_lock
);
548 ext_write(0, phydev
, PAGE5
, PSF_CFG0
, cfg0
);
549 ext_write(0, phydev
, PAGE6
, PSF_CFG1
, ver
);
551 mutex_unlock(&clock
->extreg_lock
);
553 if (!phydev
->attached_dev
) {
554 pr_warn("expected to find an attached netdevice\n");
559 if (dev_mc_add(phydev
->attached_dev
, status_frame_dst
))
560 pr_warn("failed to add mc address\n");
562 if (dev_mc_del(phydev
->attached_dev
, status_frame_dst
))
563 pr_warn("failed to delete mc address\n");
567 static bool is_status_frame(struct sk_buff
*skb
, int type
)
569 struct ethhdr
*h
= eth_hdr(skb
);
571 if (PTP_CLASS_V2_L2
== type
&&
572 !memcmp(h
->h_source
, status_frame_src
, sizeof(status_frame_src
)))
578 static int expired(struct rxts
*rxts
)
580 return time_after(jiffies
, rxts
->tmo
);
583 /* Caller must hold rx_lock. */
584 static void prune_rx_ts(struct dp83640_private
*dp83640
)
586 struct list_head
*this, *next
;
589 list_for_each_safe(this, next
, &dp83640
->rxts
) {
590 rxts
= list_entry(this, struct rxts
, list
);
592 list_del_init(&rxts
->list
);
593 list_add(&rxts
->list
, &dp83640
->rxpool
);
598 /* synchronize the phyters so they act as one clock */
600 static void enable_broadcast(struct phy_device
*phydev
, int init_page
, int on
)
603 phy_write(phydev
, PAGESEL
, 0);
604 val
= phy_read(phydev
, PHYCR2
);
609 phy_write(phydev
, PHYCR2
, val
);
610 phy_write(phydev
, PAGESEL
, init_page
);
613 static void recalibrate(struct dp83640_clock
*clock
)
616 struct phy_txts event_ts
;
617 struct timespec64 ts
;
618 struct list_head
*this;
619 struct dp83640_private
*tmp
;
620 struct phy_device
*master
= clock
->chosen
->phydev
;
621 u16 cal_gpio
, cfg0
, evnt
, ptp_trig
, trigger
, val
;
623 trigger
= CAL_TRIGGER
;
624 cal_gpio
= 1 + ptp_find_pin(clock
->ptp_clock
, PTP_PF_PHYSYNC
, 0);
626 pr_err("PHY calibration pin not available - PHY is not calibrated.");
630 mutex_lock(&clock
->extreg_lock
);
633 * enable broadcast, disable status frames, enable ptp clock
635 list_for_each(this, &clock
->phylist
) {
636 tmp
= list_entry(this, struct dp83640_private
, list
);
637 enable_broadcast(tmp
->phydev
, clock
->page
, 1);
638 tmp
->cfg0
= ext_read(tmp
->phydev
, PAGE5
, PSF_CFG0
);
639 ext_write(0, tmp
->phydev
, PAGE5
, PSF_CFG0
, 0);
640 ext_write(0, tmp
->phydev
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
642 enable_broadcast(master
, clock
->page
, 1);
643 cfg0
= ext_read(master
, PAGE5
, PSF_CFG0
);
644 ext_write(0, master
, PAGE5
, PSF_CFG0
, 0);
645 ext_write(0, master
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
648 * enable an event timestamp
650 evnt
= EVNT_WR
| EVNT_RISE
| EVNT_SINGLE
;
651 evnt
|= (CAL_EVENT
& EVNT_SEL_MASK
) << EVNT_SEL_SHIFT
;
652 evnt
|= (cal_gpio
& EVNT_GPIO_MASK
) << EVNT_GPIO_SHIFT
;
654 list_for_each(this, &clock
->phylist
) {
655 tmp
= list_entry(this, struct dp83640_private
, list
);
656 ext_write(0, tmp
->phydev
, PAGE5
, PTP_EVNT
, evnt
);
658 ext_write(0, master
, PAGE5
, PTP_EVNT
, evnt
);
661 * configure a trigger
663 ptp_trig
= TRIG_WR
| TRIG_IF_LATE
| TRIG_PULSE
;
664 ptp_trig
|= (trigger
& TRIG_CSEL_MASK
) << TRIG_CSEL_SHIFT
;
665 ptp_trig
|= (cal_gpio
& TRIG_GPIO_MASK
) << TRIG_GPIO_SHIFT
;
666 ext_write(0, master
, PAGE5
, PTP_TRIG
, ptp_trig
);
669 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
671 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
676 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
678 /* disable trigger */
679 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
681 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
684 * read out and correct offsets
686 val
= ext_read(master
, PAGE4
, PTP_STS
);
687 pr_info("master PTP_STS 0x%04hx\n", val
);
688 val
= ext_read(master
, PAGE4
, PTP_ESTS
);
689 pr_info("master PTP_ESTS 0x%04hx\n", val
);
690 event_ts
.ns_lo
= ext_read(master
, PAGE4
, PTP_EDATA
);
691 event_ts
.ns_hi
= ext_read(master
, PAGE4
, PTP_EDATA
);
692 event_ts
.sec_lo
= ext_read(master
, PAGE4
, PTP_EDATA
);
693 event_ts
.sec_hi
= ext_read(master
, PAGE4
, PTP_EDATA
);
694 now
= phy2txts(&event_ts
);
696 list_for_each(this, &clock
->phylist
) {
697 tmp
= list_entry(this, struct dp83640_private
, list
);
698 val
= ext_read(tmp
->phydev
, PAGE4
, PTP_STS
);
699 pr_info("slave PTP_STS 0x%04hx\n", val
);
700 val
= ext_read(tmp
->phydev
, PAGE4
, PTP_ESTS
);
701 pr_info("slave PTP_ESTS 0x%04hx\n", val
);
702 event_ts
.ns_lo
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
703 event_ts
.ns_hi
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
704 event_ts
.sec_lo
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
705 event_ts
.sec_hi
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
706 diff
= now
- (s64
) phy2txts(&event_ts
);
707 pr_info("slave offset %lld nanoseconds\n", diff
);
709 ts
= ns_to_timespec64(diff
);
710 tdr_write(0, tmp
->phydev
, &ts
, PTP_STEP_CLK
);
714 * restore status frames
716 list_for_each(this, &clock
->phylist
) {
717 tmp
= list_entry(this, struct dp83640_private
, list
);
718 ext_write(0, tmp
->phydev
, PAGE5
, PSF_CFG0
, tmp
->cfg0
);
720 ext_write(0, master
, PAGE5
, PSF_CFG0
, cfg0
);
722 mutex_unlock(&clock
->extreg_lock
);
725 /* time stamping methods */
727 static inline u16
exts_chan_to_edata(int ch
)
729 return 1 << ((ch
+ EXT_EVENT
) * 2);
732 static int decode_evnt(struct dp83640_private
*dp83640
,
733 void *data
, int len
, u16 ests
)
735 struct phy_txts
*phy_txts
;
736 struct ptp_clock_event event
;
738 int words
= (ests
>> EVNT_TS_LEN_SHIFT
) & EVNT_TS_LEN_MASK
;
741 /* calculate length of the event timestamp status message */
742 if (ests
& MULT_EVNT
)
743 parsed
= (words
+ 2) * sizeof(u16
);
745 parsed
= (words
+ 1) * sizeof(u16
);
747 /* check if enough data is available */
751 if (ests
& MULT_EVNT
) {
752 ext_status
= *(u16
*) data
;
753 data
+= sizeof(ext_status
);
758 switch (words
) { /* fall through in every case */
760 dp83640
->edata
.sec_hi
= phy_txts
->sec_hi
;
762 dp83640
->edata
.sec_lo
= phy_txts
->sec_lo
;
764 dp83640
->edata
.ns_hi
= phy_txts
->ns_hi
;
766 dp83640
->edata
.ns_lo
= phy_txts
->ns_lo
;
770 i
= ((ests
>> EVNT_NUM_SHIFT
) & EVNT_NUM_MASK
) - EXT_EVENT
;
771 ext_status
= exts_chan_to_edata(i
);
774 event
.type
= PTP_CLOCK_EXTTS
;
775 event
.timestamp
= phy2txts(&dp83640
->edata
);
777 /* Compensate for input path and synchronization delays */
778 event
.timestamp
-= 35;
780 for (i
= 0; i
< N_EXT_TS
; i
++) {
781 if (ext_status
& exts_chan_to_edata(i
)) {
783 ptp_clock_event(dp83640
->clock
->ptp_clock
, &event
);
790 static int match(struct sk_buff
*skb
, unsigned int type
, struct rxts
*rxts
)
793 unsigned int offset
= 0;
794 u8
*msgtype
, *data
= skb_mac_header(skb
);
796 /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
798 if (type
& PTP_CLASS_VLAN
)
801 switch (type
& PTP_CLASS_PMASK
) {
803 offset
+= ETH_HLEN
+ IPV4_HLEN(data
+ offset
) + UDP_HLEN
;
806 offset
+= ETH_HLEN
+ IP6_HLEN
+ UDP_HLEN
;
815 if (skb
->len
+ ETH_HLEN
< offset
+ OFF_PTP_SEQUENCE_ID
+ sizeof(*seqid
))
818 if (unlikely(type
& PTP_CLASS_V1
))
819 msgtype
= data
+ offset
+ OFF_PTP_CONTROL
;
821 msgtype
= data
+ offset
;
823 seqid
= (u16
*)(data
+ offset
+ OFF_PTP_SEQUENCE_ID
);
825 return rxts
->msgtype
== (*msgtype
& 0xf) &&
826 rxts
->seqid
== ntohs(*seqid
);
829 static void decode_rxts(struct dp83640_private
*dp83640
,
830 struct phy_rxts
*phy_rxts
)
833 struct skb_shared_hwtstamps
*shhwtstamps
= NULL
;
837 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
839 prune_rx_ts(dp83640
);
841 if (list_empty(&dp83640
->rxpool
)) {
842 pr_debug("rx timestamp pool is empty\n");
845 rxts
= list_first_entry(&dp83640
->rxpool
, struct rxts
, list
);
846 list_del_init(&rxts
->list
);
847 phy2rxts(phy_rxts
, rxts
);
849 spin_lock(&dp83640
->rx_queue
.lock
);
850 skb_queue_walk(&dp83640
->rx_queue
, skb
) {
851 struct dp83640_skb_info
*skb_info
;
853 skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
854 if (match(skb
, skb_info
->ptp_type
, rxts
)) {
855 __skb_unlink(skb
, &dp83640
->rx_queue
);
856 shhwtstamps
= skb_hwtstamps(skb
);
857 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
858 shhwtstamps
->hwtstamp
= ns_to_ktime(rxts
->ns
);
860 list_add(&rxts
->list
, &dp83640
->rxpool
);
864 spin_unlock(&dp83640
->rx_queue
.lock
);
867 list_add_tail(&rxts
->list
, &dp83640
->rxts
);
869 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
872 static void decode_txts(struct dp83640_private
*dp83640
,
873 struct phy_txts
*phy_txts
)
875 struct skb_shared_hwtstamps shhwtstamps
;
879 /* We must already have the skb that triggered this. */
881 skb
= skb_dequeue(&dp83640
->tx_queue
);
884 pr_debug("have timestamp but tx_queue empty\n");
887 ns
= phy2txts(phy_txts
);
888 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
889 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
890 skb_complete_tx_timestamp(skb
, &shhwtstamps
);
893 static void decode_status_frame(struct dp83640_private
*dp83640
,
896 struct phy_rxts
*phy_rxts
;
897 struct phy_txts
*phy_txts
;
904 for (len
= skb_headlen(skb
) - 2; len
> sizeof(type
); len
-= size
) {
907 ests
= type
& 0x0fff;
908 type
= type
& 0xf000;
912 if (PSF_RX
== type
&& len
>= sizeof(*phy_rxts
)) {
914 phy_rxts
= (struct phy_rxts
*) ptr
;
915 decode_rxts(dp83640
, phy_rxts
);
916 size
= sizeof(*phy_rxts
);
918 } else if (PSF_TX
== type
&& len
>= sizeof(*phy_txts
)) {
920 phy_txts
= (struct phy_txts
*) ptr
;
921 decode_txts(dp83640
, phy_txts
);
922 size
= sizeof(*phy_txts
);
924 } else if (PSF_EVNT
== type
) {
926 size
= decode_evnt(dp83640
, ptr
, len
, ests
);
936 static int is_sync(struct sk_buff
*skb
, int type
)
938 u8
*data
= skb
->data
, *msgtype
;
939 unsigned int offset
= 0;
941 if (type
& PTP_CLASS_VLAN
)
944 switch (type
& PTP_CLASS_PMASK
) {
946 offset
+= ETH_HLEN
+ IPV4_HLEN(data
+ offset
) + UDP_HLEN
;
949 offset
+= ETH_HLEN
+ IP6_HLEN
+ UDP_HLEN
;
958 if (type
& PTP_CLASS_V1
)
959 offset
+= OFF_PTP_CONTROL
;
961 if (skb
->len
< offset
+ 1)
964 msgtype
= data
+ offset
;
966 return (*msgtype
& 0xf) == 0;
969 static void dp83640_free_clocks(void)
971 struct dp83640_clock
*clock
;
972 struct list_head
*this, *next
;
974 mutex_lock(&phyter_clocks_lock
);
976 list_for_each_safe(this, next
, &phyter_clocks
) {
977 clock
= list_entry(this, struct dp83640_clock
, list
);
978 if (!list_empty(&clock
->phylist
)) {
979 pr_warn("phy list non-empty while unloading\n");
982 list_del(&clock
->list
);
983 mutex_destroy(&clock
->extreg_lock
);
984 mutex_destroy(&clock
->clock_lock
);
985 put_device(&clock
->bus
->dev
);
986 kfree(clock
->caps
.pin_config
);
990 mutex_unlock(&phyter_clocks_lock
);
993 static void dp83640_clock_init(struct dp83640_clock
*clock
, struct mii_bus
*bus
)
995 INIT_LIST_HEAD(&clock
->list
);
997 mutex_init(&clock
->extreg_lock
);
998 mutex_init(&clock
->clock_lock
);
999 INIT_LIST_HEAD(&clock
->phylist
);
1000 clock
->caps
.owner
= THIS_MODULE
;
1001 sprintf(clock
->caps
.name
, "dp83640 timer");
1002 clock
->caps
.max_adj
= 1953124;
1003 clock
->caps
.n_alarm
= 0;
1004 clock
->caps
.n_ext_ts
= N_EXT_TS
;
1005 clock
->caps
.n_per_out
= N_PER_OUT
;
1006 clock
->caps
.n_pins
= DP83640_N_PINS
;
1007 clock
->caps
.pps
= 0;
1008 clock
->caps
.adjfreq
= ptp_dp83640_adjfreq
;
1009 clock
->caps
.adjtime
= ptp_dp83640_adjtime
;
1010 clock
->caps
.gettime64
= ptp_dp83640_gettime
;
1011 clock
->caps
.settime64
= ptp_dp83640_settime
;
1012 clock
->caps
.enable
= ptp_dp83640_enable
;
1013 clock
->caps
.verify
= ptp_dp83640_verify
;
1015 * Convert the module param defaults into a dynamic pin configuration.
1017 dp83640_gpio_defaults(clock
->caps
.pin_config
);
1019 * Get a reference to this bus instance.
1021 get_device(&bus
->dev
);
1024 static int choose_this_phy(struct dp83640_clock
*clock
,
1025 struct phy_device
*phydev
)
1027 if (chosen_phy
== -1 && !clock
->chosen
)
1030 if (chosen_phy
== phydev
->addr
)
1036 static struct dp83640_clock
*dp83640_clock_get(struct dp83640_clock
*clock
)
1039 mutex_lock(&clock
->clock_lock
);
1044 * Look up and lock a clock by bus instance.
1045 * If there is no clock for this bus, then create it first.
1047 static struct dp83640_clock
*dp83640_clock_get_bus(struct mii_bus
*bus
)
1049 struct dp83640_clock
*clock
= NULL
, *tmp
;
1050 struct list_head
*this;
1052 mutex_lock(&phyter_clocks_lock
);
1054 list_for_each(this, &phyter_clocks
) {
1055 tmp
= list_entry(this, struct dp83640_clock
, list
);
1056 if (tmp
->bus
== bus
) {
1064 clock
= kzalloc(sizeof(struct dp83640_clock
), GFP_KERNEL
);
1068 clock
->caps
.pin_config
= kzalloc(sizeof(struct ptp_pin_desc
) *
1069 DP83640_N_PINS
, GFP_KERNEL
);
1070 if (!clock
->caps
.pin_config
) {
1075 dp83640_clock_init(clock
, bus
);
1076 list_add_tail(&phyter_clocks
, &clock
->list
);
1078 mutex_unlock(&phyter_clocks_lock
);
1080 return dp83640_clock_get(clock
);
1083 static void dp83640_clock_put(struct dp83640_clock
*clock
)
1085 mutex_unlock(&clock
->clock_lock
);
1088 static int dp83640_probe(struct phy_device
*phydev
)
1090 struct dp83640_clock
*clock
;
1091 struct dp83640_private
*dp83640
;
1092 int err
= -ENOMEM
, i
;
1094 if (phydev
->addr
== BROADCAST_ADDR
)
1097 clock
= dp83640_clock_get_bus(phydev
->bus
);
1101 dp83640
= kzalloc(sizeof(struct dp83640_private
), GFP_KERNEL
);
1105 dp83640
->phydev
= phydev
;
1106 INIT_WORK(&dp83640
->ts_work
, rx_timestamp_work
);
1108 INIT_LIST_HEAD(&dp83640
->rxts
);
1109 INIT_LIST_HEAD(&dp83640
->rxpool
);
1110 for (i
= 0; i
< MAX_RXTS
; i
++)
1111 list_add(&dp83640
->rx_pool_data
[i
].list
, &dp83640
->rxpool
);
1113 phydev
->priv
= dp83640
;
1115 spin_lock_init(&dp83640
->rx_lock
);
1116 skb_queue_head_init(&dp83640
->rx_queue
);
1117 skb_queue_head_init(&dp83640
->tx_queue
);
1119 dp83640
->clock
= clock
;
1121 if (choose_this_phy(clock
, phydev
)) {
1122 clock
->chosen
= dp83640
;
1123 clock
->ptp_clock
= ptp_clock_register(&clock
->caps
, &phydev
->dev
);
1124 if (IS_ERR(clock
->ptp_clock
)) {
1125 err
= PTR_ERR(clock
->ptp_clock
);
1129 list_add_tail(&dp83640
->list
, &clock
->phylist
);
1131 dp83640_clock_put(clock
);
1135 clock
->chosen
= NULL
;
1138 dp83640_clock_put(clock
);
1143 static void dp83640_remove(struct phy_device
*phydev
)
1145 struct dp83640_clock
*clock
;
1146 struct list_head
*this, *next
;
1147 struct dp83640_private
*tmp
, *dp83640
= phydev
->priv
;
1149 if (phydev
->addr
== BROADCAST_ADDR
)
1152 enable_status_frames(phydev
, false);
1153 cancel_work_sync(&dp83640
->ts_work
);
1155 skb_queue_purge(&dp83640
->rx_queue
);
1156 skb_queue_purge(&dp83640
->tx_queue
);
1158 clock
= dp83640_clock_get(dp83640
->clock
);
1160 if (dp83640
== clock
->chosen
) {
1161 ptp_clock_unregister(clock
->ptp_clock
);
1162 clock
->chosen
= NULL
;
1164 list_for_each_safe(this, next
, &clock
->phylist
) {
1165 tmp
= list_entry(this, struct dp83640_private
, list
);
1166 if (tmp
== dp83640
) {
1167 list_del_init(&tmp
->list
);
1173 dp83640_clock_put(clock
);
1177 static int dp83640_config_init(struct phy_device
*phydev
)
1179 struct dp83640_private
*dp83640
= phydev
->priv
;
1180 struct dp83640_clock
*clock
= dp83640
->clock
;
1182 if (clock
->chosen
&& !list_empty(&clock
->phylist
))
1185 mutex_lock(&clock
->extreg_lock
);
1186 enable_broadcast(phydev
, clock
->page
, 1);
1187 mutex_unlock(&clock
->extreg_lock
);
1190 enable_status_frames(phydev
, true);
1192 mutex_lock(&clock
->extreg_lock
);
1193 ext_write(0, phydev
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
1194 mutex_unlock(&clock
->extreg_lock
);
1199 static int dp83640_ack_interrupt(struct phy_device
*phydev
)
1201 int err
= phy_read(phydev
, MII_DP83640_MISR
);
1209 static int dp83640_config_intr(struct phy_device
*phydev
)
1215 if (phydev
->interrupts
== PHY_INTERRUPT_ENABLED
) {
1216 misr
= phy_read(phydev
, MII_DP83640_MISR
);
1220 (MII_DP83640_MISR_ANC_INT_EN
|
1221 MII_DP83640_MISR_DUP_INT_EN
|
1222 MII_DP83640_MISR_SPD_INT_EN
|
1223 MII_DP83640_MISR_LINK_INT_EN
);
1224 err
= phy_write(phydev
, MII_DP83640_MISR
, misr
);
1228 micr
= phy_read(phydev
, MII_DP83640_MICR
);
1232 (MII_DP83640_MICR_OE
|
1233 MII_DP83640_MICR_IE
);
1234 return phy_write(phydev
, MII_DP83640_MICR
, micr
);
1236 micr
= phy_read(phydev
, MII_DP83640_MICR
);
1240 ~(MII_DP83640_MICR_OE
|
1241 MII_DP83640_MICR_IE
);
1242 err
= phy_write(phydev
, MII_DP83640_MICR
, micr
);
1246 misr
= phy_read(phydev
, MII_DP83640_MISR
);
1250 ~(MII_DP83640_MISR_ANC_INT_EN
|
1251 MII_DP83640_MISR_DUP_INT_EN
|
1252 MII_DP83640_MISR_SPD_INT_EN
|
1253 MII_DP83640_MISR_LINK_INT_EN
);
1254 return phy_write(phydev
, MII_DP83640_MISR
, misr
);
1258 static int dp83640_hwtstamp(struct phy_device
*phydev
, struct ifreq
*ifr
)
1260 struct dp83640_private
*dp83640
= phydev
->priv
;
1261 struct hwtstamp_config cfg
;
1264 if (copy_from_user(&cfg
, ifr
->ifr_data
, sizeof(cfg
)))
1267 if (cfg
.flags
) /* reserved for future extensions */
1270 if (cfg
.tx_type
< 0 || cfg
.tx_type
> HWTSTAMP_TX_ONESTEP_SYNC
)
1273 dp83640
->hwts_tx_en
= cfg
.tx_type
;
1275 switch (cfg
.rx_filter
) {
1276 case HWTSTAMP_FILTER_NONE
:
1277 dp83640
->hwts_rx_en
= 0;
1279 dp83640
->version
= 0;
1281 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1282 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1283 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1284 dp83640
->hwts_rx_en
= 1;
1285 dp83640
->layer
= LAYER4
;
1286 dp83640
->version
= 1;
1288 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1289 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1290 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1291 dp83640
->hwts_rx_en
= 1;
1292 dp83640
->layer
= LAYER4
;
1293 dp83640
->version
= 2;
1295 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1296 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1297 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1298 dp83640
->hwts_rx_en
= 1;
1299 dp83640
->layer
= LAYER2
;
1300 dp83640
->version
= 2;
1302 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1303 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1304 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1305 dp83640
->hwts_rx_en
= 1;
1306 dp83640
->layer
= LAYER4
|LAYER2
;
1307 dp83640
->version
= 2;
1313 txcfg0
= (dp83640
->version
& TX_PTP_VER_MASK
) << TX_PTP_VER_SHIFT
;
1314 rxcfg0
= (dp83640
->version
& TX_PTP_VER_MASK
) << TX_PTP_VER_SHIFT
;
1316 if (dp83640
->layer
& LAYER2
) {
1320 if (dp83640
->layer
& LAYER4
) {
1321 txcfg0
|= TX_IPV6_EN
| TX_IPV4_EN
;
1322 rxcfg0
|= RX_IPV6_EN
| RX_IPV4_EN
;
1325 if (dp83640
->hwts_tx_en
)
1328 if (dp83640
->hwts_tx_en
== HWTSTAMP_TX_ONESTEP_SYNC
)
1329 txcfg0
|= SYNC_1STEP
| CHK_1STEP
;
1331 if (dp83640
->hwts_rx_en
)
1334 mutex_lock(&dp83640
->clock
->extreg_lock
);
1336 ext_write(0, phydev
, PAGE5
, PTP_TXCFG0
, txcfg0
);
1337 ext_write(0, phydev
, PAGE5
, PTP_RXCFG0
, rxcfg0
);
1339 mutex_unlock(&dp83640
->clock
->extreg_lock
);
1341 return copy_to_user(ifr
->ifr_data
, &cfg
, sizeof(cfg
)) ? -EFAULT
: 0;
1344 static void rx_timestamp_work(struct work_struct
*work
)
1346 struct dp83640_private
*dp83640
=
1347 container_of(work
, struct dp83640_private
, ts_work
);
1348 struct sk_buff
*skb
;
1350 /* Deliver expired packets. */
1351 while ((skb
= skb_dequeue(&dp83640
->rx_queue
))) {
1352 struct dp83640_skb_info
*skb_info
;
1354 skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
1355 if (!time_after(jiffies
, skb_info
->tmo
)) {
1356 skb_queue_head(&dp83640
->rx_queue
, skb
);
1363 if (!skb_queue_empty(&dp83640
->rx_queue
))
1364 schedule_work(&dp83640
->ts_work
);
1367 static bool dp83640_rxtstamp(struct phy_device
*phydev
,
1368 struct sk_buff
*skb
, int type
)
1370 struct dp83640_private
*dp83640
= phydev
->priv
;
1371 struct dp83640_skb_info
*skb_info
= (struct dp83640_skb_info
*)skb
->cb
;
1372 struct list_head
*this, *next
;
1374 struct skb_shared_hwtstamps
*shhwtstamps
= NULL
;
1375 unsigned long flags
;
1377 if (is_status_frame(skb
, type
)) {
1378 decode_status_frame(dp83640
, skb
);
1383 if (!dp83640
->hwts_rx_en
)
1386 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
1387 list_for_each_safe(this, next
, &dp83640
->rxts
) {
1388 rxts
= list_entry(this, struct rxts
, list
);
1389 if (match(skb
, type
, rxts
)) {
1390 shhwtstamps
= skb_hwtstamps(skb
);
1391 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
1392 shhwtstamps
->hwtstamp
= ns_to_ktime(rxts
->ns
);
1394 list_del_init(&rxts
->list
);
1395 list_add(&rxts
->list
, &dp83640
->rxpool
);
1399 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
1402 skb_info
->ptp_type
= type
;
1403 skb_info
->tmo
= jiffies
+ 2;
1404 skb_queue_tail(&dp83640
->rx_queue
, skb
);
1405 schedule_work(&dp83640
->ts_work
);
1411 static void dp83640_txtstamp(struct phy_device
*phydev
,
1412 struct sk_buff
*skb
, int type
)
1414 struct dp83640_private
*dp83640
= phydev
->priv
;
1416 switch (dp83640
->hwts_tx_en
) {
1418 case HWTSTAMP_TX_ONESTEP_SYNC
:
1419 if (is_sync(skb
, type
)) {
1424 case HWTSTAMP_TX_ON
:
1425 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1426 skb_queue_tail(&dp83640
->tx_queue
, skb
);
1429 case HWTSTAMP_TX_OFF
:
1436 static int dp83640_ts_info(struct phy_device
*dev
, struct ethtool_ts_info
*info
)
1438 struct dp83640_private
*dp83640
= dev
->priv
;
1440 info
->so_timestamping
=
1441 SOF_TIMESTAMPING_TX_HARDWARE
|
1442 SOF_TIMESTAMPING_RX_HARDWARE
|
1443 SOF_TIMESTAMPING_RAW_HARDWARE
;
1444 info
->phc_index
= ptp_clock_index(dp83640
->clock
->ptp_clock
);
1446 (1 << HWTSTAMP_TX_OFF
) |
1447 (1 << HWTSTAMP_TX_ON
) |
1448 (1 << HWTSTAMP_TX_ONESTEP_SYNC
);
1450 (1 << HWTSTAMP_FILTER_NONE
) |
1451 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
1452 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
1453 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
1454 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
) |
1455 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC
) |
1456 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
) |
1457 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
1458 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC
) |
1459 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
) |
1460 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
) |
1461 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC
) |
1462 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
);
1466 static struct phy_driver dp83640_driver
= {
1467 .phy_id
= DP83640_PHY_ID
,
1468 .phy_id_mask
= 0xfffffff0,
1469 .name
= "NatSemi DP83640",
1470 .features
= PHY_BASIC_FEATURES
,
1471 .flags
= PHY_HAS_INTERRUPT
,
1472 .probe
= dp83640_probe
,
1473 .remove
= dp83640_remove
,
1474 .config_init
= dp83640_config_init
,
1475 .config_aneg
= genphy_config_aneg
,
1476 .read_status
= genphy_read_status
,
1477 .ack_interrupt
= dp83640_ack_interrupt
,
1478 .config_intr
= dp83640_config_intr
,
1479 .ts_info
= dp83640_ts_info
,
1480 .hwtstamp
= dp83640_hwtstamp
,
1481 .rxtstamp
= dp83640_rxtstamp
,
1482 .txtstamp
= dp83640_txtstamp
,
1483 .driver
= {.owner
= THIS_MODULE
,}
1486 static int __init
dp83640_init(void)
1488 return phy_driver_register(&dp83640_driver
);
1491 static void __exit
dp83640_exit(void)
1493 dp83640_free_clocks();
1494 phy_driver_unregister(&dp83640_driver
);
1497 MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
1498 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
1499 MODULE_LICENSE("GPL");
1501 module_init(dp83640_init
);
1502 module_exit(dp83640_exit
);
1504 static struct mdio_device_id __maybe_unused dp83640_tbl
[] = {
1505 { DP83640_PHY_ID
, 0xfffffff0 },
1509 MODULE_DEVICE_TABLE(mdio
, dp83640_tbl
);