2 * Driver for the National Semiconductor DP83640 PHYTER
4 * Copyright (C) 2010 OMICRON electronics GmbH
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/ethtool.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/mii.h>
27 #include <linux/module.h>
28 #include <linux/net_tstamp.h>
29 #include <linux/netdevice.h>
30 #include <linux/phy.h>
31 #include <linux/ptp_classify.h>
32 #include <linux/ptp_clock_kernel.h>
34 #include "dp83640_reg.h"
36 #define DP83640_PHY_ID 0x20005ce1
43 #define PSF_EVNT 0x4000
51 #define MII_DP83640_MICR 0x11
52 #define MII_DP83640_MISR 0x12
54 #define MII_DP83640_MICR_OE 0x1
55 #define MII_DP83640_MICR_IE 0x2
57 #define MII_DP83640_MISR_RHF_INT_EN 0x01
58 #define MII_DP83640_MISR_FHF_INT_EN 0x02
59 #define MII_DP83640_MISR_ANC_INT_EN 0x04
60 #define MII_DP83640_MISR_DUP_INT_EN 0x08
61 #define MII_DP83640_MISR_SPD_INT_EN 0x10
62 #define MII_DP83640_MISR_LINK_INT_EN 0x20
63 #define MII_DP83640_MISR_ED_INT_EN 0x40
64 #define MII_DP83640_MISR_LQ_INT_EN 0x80
66 /* phyter seems to miss the mark by 16 ns */
67 #define ADJTIME_FIX 16
69 #if defined(__BIG_ENDIAN)
71 #elif defined(__LITTLE_ENDIAN)
72 #define ENDIAN_FLAG PSF_ENDIAN
75 #define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
78 u16 ns_lo
; /* ns[15:0] */
79 u16 ns_hi
; /* overflow[1:0], ns[29:16] */
80 u16 sec_lo
; /* sec[15:0] */
81 u16 sec_hi
; /* sec[31:16] */
82 u16 seqid
; /* sequenceId[15:0] */
83 u16 msgtype
; /* messageType[3:0], hash[11:0] */
87 u16 ns_lo
; /* ns[15:0] */
88 u16 ns_hi
; /* overflow[1:0], ns[29:16] */
89 u16 sec_lo
; /* sec[15:0] */
90 u16 sec_hi
; /* sec[31:16] */
94 struct list_head list
;
102 struct dp83640_clock
;
104 struct dp83640_private
{
105 struct list_head list
;
106 struct dp83640_clock
*clock
;
107 struct phy_device
*phydev
;
108 struct work_struct ts_work
;
113 /* remember state of cfg0 during calibration */
115 /* remember the last event time stamp */
116 struct phy_txts edata
;
117 /* list of rx timestamps */
118 struct list_head rxts
;
119 struct list_head rxpool
;
120 struct rxts rx_pool_data
[MAX_RXTS
];
121 /* protects above three fields from concurrent access */
123 /* queues of incoming and outgoing packets */
124 struct sk_buff_head rx_queue
;
125 struct sk_buff_head tx_queue
;
128 struct dp83640_clock
{
129 /* keeps the instance in the 'phyter_clocks' list */
130 struct list_head list
;
131 /* we create one clock instance per MII bus */
133 /* protects extended registers from concurrent access */
134 struct mutex extreg_lock
;
135 /* remembers which page was last selected */
137 /* our advertised capabilities */
138 struct ptp_clock_info caps
;
139 /* protects the three fields below from concurrent access */
140 struct mutex clock_lock
;
141 /* the one phyter from which we shall read */
142 struct dp83640_private
*chosen
;
143 /* list of the other attached phyters, not chosen */
144 struct list_head phylist
;
145 /* reference to our PTP hardware clock */
146 struct ptp_clock
*ptp_clock
;
163 static int chosen_phy
= -1;
164 static ushort gpio_tab
[GPIO_TABLE_SIZE
] = {
165 1, 2, 3, 4, 8, 9, 10, 11
168 module_param(chosen_phy
, int, 0444);
169 module_param_array(gpio_tab
, ushort
, NULL
, 0444);
171 MODULE_PARM_DESC(chosen_phy
, \
172 "The address of the PHY to use for the ancillary clock features");
173 MODULE_PARM_DESC(gpio_tab
, \
174 "Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
176 /* a list of clocks and a mutex to protect it */
177 static LIST_HEAD(phyter_clocks
);
178 static DEFINE_MUTEX(phyter_clocks_lock
);
180 static void rx_timestamp_work(struct work_struct
*work
);
182 /* extended register access functions */
184 #define BROADCAST_ADDR 31
186 static inline int broadcast_write(struct mii_bus
*bus
, u32 regnum
, u16 val
)
188 return mdiobus_write(bus
, BROADCAST_ADDR
, regnum
, val
);
191 /* Caller must hold extreg_lock. */
192 static int ext_read(struct phy_device
*phydev
, int page
, u32 regnum
)
194 struct dp83640_private
*dp83640
= phydev
->priv
;
197 if (dp83640
->clock
->page
!= page
) {
198 broadcast_write(phydev
->bus
, PAGESEL
, page
);
199 dp83640
->clock
->page
= page
;
201 val
= phy_read(phydev
, regnum
);
206 /* Caller must hold extreg_lock. */
207 static void ext_write(int broadcast
, struct phy_device
*phydev
,
208 int page
, u32 regnum
, u16 val
)
210 struct dp83640_private
*dp83640
= phydev
->priv
;
212 if (dp83640
->clock
->page
!= page
) {
213 broadcast_write(phydev
->bus
, PAGESEL
, page
);
214 dp83640
->clock
->page
= page
;
217 broadcast_write(phydev
->bus
, regnum
, val
);
219 phy_write(phydev
, regnum
, val
);
222 /* Caller must hold extreg_lock. */
223 static int tdr_write(int bc
, struct phy_device
*dev
,
224 const struct timespec
*ts
, u16 cmd
)
226 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_nsec
& 0xffff);/* ns[15:0] */
227 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_nsec
>> 16); /* ns[31:16] */
228 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_sec
& 0xffff); /* sec[15:0] */
229 ext_write(bc
, dev
, PAGE4
, PTP_TDR
, ts
->tv_sec
>> 16); /* sec[31:16]*/
231 ext_write(bc
, dev
, PAGE4
, PTP_CTL
, cmd
);
236 /* convert phy timestamps into driver timestamps */
238 static void phy2rxts(struct phy_rxts
*p
, struct rxts
*rxts
)
243 sec
|= p
->sec_hi
<< 16;
246 rxts
->ns
|= (p
->ns_hi
& 0x3fff) << 16;
247 rxts
->ns
+= ((u64
)sec
) * 1000000000ULL;
248 rxts
->seqid
= p
->seqid
;
249 rxts
->msgtype
= (p
->msgtype
>> 12) & 0xf;
250 rxts
->hash
= p
->msgtype
& 0x0fff;
251 rxts
->tmo
= jiffies
+ 2;
254 static u64
phy2txts(struct phy_txts
*p
)
260 sec
|= p
->sec_hi
<< 16;
263 ns
|= (p
->ns_hi
& 0x3fff) << 16;
264 ns
+= ((u64
)sec
) * 1000000000ULL;
269 static void periodic_output(struct dp83640_clock
*clock
,
270 struct ptp_clock_request
*clkreq
, bool on
)
272 struct dp83640_private
*dp83640
= clock
->chosen
;
273 struct phy_device
*phydev
= dp83640
->phydev
;
274 u32 sec
, nsec
, period
;
275 u16 gpio
, ptp_trig
, trigger
, val
;
277 gpio
= on
? gpio_tab
[PEROUT_GPIO
] : 0;
278 trigger
= PER_TRIGGER
;
281 (trigger
& TRIG_CSEL_MASK
) << TRIG_CSEL_SHIFT
|
282 (gpio
& TRIG_GPIO_MASK
) << TRIG_GPIO_SHIFT
|
286 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
290 mutex_lock(&clock
->extreg_lock
);
291 ext_write(0, phydev
, PAGE5
, PTP_TRIG
, ptp_trig
);
292 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
293 mutex_unlock(&clock
->extreg_lock
);
297 sec
= clkreq
->perout
.start
.sec
;
298 nsec
= clkreq
->perout
.start
.nsec
;
299 period
= clkreq
->perout
.period
.sec
* 1000000000UL;
300 period
+= clkreq
->perout
.period
.nsec
;
302 mutex_lock(&clock
->extreg_lock
);
304 ext_write(0, phydev
, PAGE5
, PTP_TRIG
, ptp_trig
);
308 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
309 ext_write(0, phydev
, PAGE4
, PTP_TDR
, nsec
& 0xffff); /* ns[15:0] */
310 ext_write(0, phydev
, PAGE4
, PTP_TDR
, nsec
>> 16); /* ns[31:16] */
311 ext_write(0, phydev
, PAGE4
, PTP_TDR
, sec
& 0xffff); /* sec[15:0] */
312 ext_write(0, phydev
, PAGE4
, PTP_TDR
, sec
>> 16); /* sec[31:16] */
313 ext_write(0, phydev
, PAGE4
, PTP_TDR
, period
& 0xffff); /* ns[15:0] */
314 ext_write(0, phydev
, PAGE4
, PTP_TDR
, period
>> 16); /* ns[31:16] */
319 ext_write(0, phydev
, PAGE4
, PTP_CTL
, val
);
321 mutex_unlock(&clock
->extreg_lock
);
324 /* ptp clock methods */
326 static int ptp_dp83640_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
328 struct dp83640_clock
*clock
=
329 container_of(ptp
, struct dp83640_clock
, caps
);
330 struct phy_device
*phydev
= clock
->chosen
->phydev
;
341 rate
= div_u64(rate
, 1953125);
343 hi
= (rate
>> 16) & PTP_RATE_HI_MASK
;
349 mutex_lock(&clock
->extreg_lock
);
351 ext_write(1, phydev
, PAGE4
, PTP_RATEH
, hi
);
352 ext_write(1, phydev
, PAGE4
, PTP_RATEL
, lo
);
354 mutex_unlock(&clock
->extreg_lock
);
359 static int ptp_dp83640_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
361 struct dp83640_clock
*clock
=
362 container_of(ptp
, struct dp83640_clock
, caps
);
363 struct phy_device
*phydev
= clock
->chosen
->phydev
;
367 delta
+= ADJTIME_FIX
;
369 ts
= ns_to_timespec(delta
);
371 mutex_lock(&clock
->extreg_lock
);
373 err
= tdr_write(1, phydev
, &ts
, PTP_STEP_CLK
);
375 mutex_unlock(&clock
->extreg_lock
);
380 static int ptp_dp83640_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
382 struct dp83640_clock
*clock
=
383 container_of(ptp
, struct dp83640_clock
, caps
);
384 struct phy_device
*phydev
= clock
->chosen
->phydev
;
387 mutex_lock(&clock
->extreg_lock
);
389 ext_write(0, phydev
, PAGE4
, PTP_CTL
, PTP_RD_CLK
);
391 val
[0] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* ns[15:0] */
392 val
[1] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* ns[31:16] */
393 val
[2] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* sec[15:0] */
394 val
[3] = ext_read(phydev
, PAGE4
, PTP_TDR
); /* sec[31:16] */
396 mutex_unlock(&clock
->extreg_lock
);
398 ts
->tv_nsec
= val
[0] | (val
[1] << 16);
399 ts
->tv_sec
= val
[2] | (val
[3] << 16);
404 static int ptp_dp83640_settime(struct ptp_clock_info
*ptp
,
405 const struct timespec
*ts
)
407 struct dp83640_clock
*clock
=
408 container_of(ptp
, struct dp83640_clock
, caps
);
409 struct phy_device
*phydev
= clock
->chosen
->phydev
;
412 mutex_lock(&clock
->extreg_lock
);
414 err
= tdr_write(1, phydev
, ts
, PTP_LOAD_CLK
);
416 mutex_unlock(&clock
->extreg_lock
);
421 static int ptp_dp83640_enable(struct ptp_clock_info
*ptp
,
422 struct ptp_clock_request
*rq
, int on
)
424 struct dp83640_clock
*clock
=
425 container_of(ptp
, struct dp83640_clock
, caps
);
426 struct phy_device
*phydev
= clock
->chosen
->phydev
;
428 u16 evnt
, event_num
, gpio_num
;
431 case PTP_CLK_REQ_EXTTS
:
432 index
= rq
->extts
.index
;
433 if (index
< 0 || index
>= N_EXT_TS
)
435 event_num
= EXT_EVENT
+ index
;
436 evnt
= EVNT_WR
| (event_num
& EVNT_SEL_MASK
) << EVNT_SEL_SHIFT
;
438 gpio_num
= gpio_tab
[EXTTS0_GPIO
+ index
];
439 evnt
|= (gpio_num
& EVNT_GPIO_MASK
) << EVNT_GPIO_SHIFT
;
440 if (rq
->extts
.flags
& PTP_FALLING_EDGE
)
445 ext_write(0, phydev
, PAGE5
, PTP_EVNT
, evnt
);
448 case PTP_CLK_REQ_PEROUT
:
449 if (rq
->perout
.index
!= 0)
451 periodic_output(clock
, rq
, on
);
461 static u8 status_frame_dst
[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
462 static u8 status_frame_src
[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
464 static void enable_status_frames(struct phy_device
*phydev
, bool on
)
469 cfg0
= PSF_EVNT_EN
| PSF_RXTS_EN
| PSF_TXTS_EN
| ENDIAN_FLAG
;
471 ver
= (PSF_PTPVER
& VERSIONPTP_MASK
) << VERSIONPTP_SHIFT
;
473 ext_write(0, phydev
, PAGE5
, PSF_CFG0
, cfg0
);
474 ext_write(0, phydev
, PAGE6
, PSF_CFG1
, ver
);
476 if (!phydev
->attached_dev
) {
477 pr_warn("expected to find an attached netdevice\n");
482 if (dev_mc_add(phydev
->attached_dev
, status_frame_dst
))
483 pr_warn("failed to add mc address\n");
485 if (dev_mc_del(phydev
->attached_dev
, status_frame_dst
))
486 pr_warn("failed to delete mc address\n");
490 static bool is_status_frame(struct sk_buff
*skb
, int type
)
492 struct ethhdr
*h
= eth_hdr(skb
);
494 if (PTP_CLASS_V2_L2
== type
&&
495 !memcmp(h
->h_source
, status_frame_src
, sizeof(status_frame_src
)))
501 static int expired(struct rxts
*rxts
)
503 return time_after(jiffies
, rxts
->tmo
);
506 /* Caller must hold rx_lock. */
507 static void prune_rx_ts(struct dp83640_private
*dp83640
)
509 struct list_head
*this, *next
;
512 list_for_each_safe(this, next
, &dp83640
->rxts
) {
513 rxts
= list_entry(this, struct rxts
, list
);
515 list_del_init(&rxts
->list
);
516 list_add(&rxts
->list
, &dp83640
->rxpool
);
521 /* synchronize the phyters so they act as one clock */
523 static void enable_broadcast(struct phy_device
*phydev
, int init_page
, int on
)
526 phy_write(phydev
, PAGESEL
, 0);
527 val
= phy_read(phydev
, PHYCR2
);
532 phy_write(phydev
, PHYCR2
, val
);
533 phy_write(phydev
, PAGESEL
, init_page
);
536 static void recalibrate(struct dp83640_clock
*clock
)
539 struct phy_txts event_ts
;
541 struct list_head
*this;
542 struct dp83640_private
*tmp
;
543 struct phy_device
*master
= clock
->chosen
->phydev
;
544 u16 cal_gpio
, cfg0
, evnt
, ptp_trig
, trigger
, val
;
546 trigger
= CAL_TRIGGER
;
547 cal_gpio
= gpio_tab
[CALIBRATE_GPIO
];
549 mutex_lock(&clock
->extreg_lock
);
552 * enable broadcast, disable status frames, enable ptp clock
554 list_for_each(this, &clock
->phylist
) {
555 tmp
= list_entry(this, struct dp83640_private
, list
);
556 enable_broadcast(tmp
->phydev
, clock
->page
, 1);
557 tmp
->cfg0
= ext_read(tmp
->phydev
, PAGE5
, PSF_CFG0
);
558 ext_write(0, tmp
->phydev
, PAGE5
, PSF_CFG0
, 0);
559 ext_write(0, tmp
->phydev
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
561 enable_broadcast(master
, clock
->page
, 1);
562 cfg0
= ext_read(master
, PAGE5
, PSF_CFG0
);
563 ext_write(0, master
, PAGE5
, PSF_CFG0
, 0);
564 ext_write(0, master
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
567 * enable an event timestamp
569 evnt
= EVNT_WR
| EVNT_RISE
| EVNT_SINGLE
;
570 evnt
|= (CAL_EVENT
& EVNT_SEL_MASK
) << EVNT_SEL_SHIFT
;
571 evnt
|= (cal_gpio
& EVNT_GPIO_MASK
) << EVNT_GPIO_SHIFT
;
573 list_for_each(this, &clock
->phylist
) {
574 tmp
= list_entry(this, struct dp83640_private
, list
);
575 ext_write(0, tmp
->phydev
, PAGE5
, PTP_EVNT
, evnt
);
577 ext_write(0, master
, PAGE5
, PTP_EVNT
, evnt
);
580 * configure a trigger
582 ptp_trig
= TRIG_WR
| TRIG_IF_LATE
| TRIG_PULSE
;
583 ptp_trig
|= (trigger
& TRIG_CSEL_MASK
) << TRIG_CSEL_SHIFT
;
584 ptp_trig
|= (cal_gpio
& TRIG_GPIO_MASK
) << TRIG_GPIO_SHIFT
;
585 ext_write(0, master
, PAGE5
, PTP_TRIG
, ptp_trig
);
588 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
590 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
595 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
597 /* disable trigger */
598 val
= (trigger
& TRIG_SEL_MASK
) << TRIG_SEL_SHIFT
;
600 ext_write(0, master
, PAGE4
, PTP_CTL
, val
);
603 * read out and correct offsets
605 val
= ext_read(master
, PAGE4
, PTP_STS
);
606 pr_info("master PTP_STS 0x%04hx\n", val
);
607 val
= ext_read(master
, PAGE4
, PTP_ESTS
);
608 pr_info("master PTP_ESTS 0x%04hx\n", val
);
609 event_ts
.ns_lo
= ext_read(master
, PAGE4
, PTP_EDATA
);
610 event_ts
.ns_hi
= ext_read(master
, PAGE4
, PTP_EDATA
);
611 event_ts
.sec_lo
= ext_read(master
, PAGE4
, PTP_EDATA
);
612 event_ts
.sec_hi
= ext_read(master
, PAGE4
, PTP_EDATA
);
613 now
= phy2txts(&event_ts
);
615 list_for_each(this, &clock
->phylist
) {
616 tmp
= list_entry(this, struct dp83640_private
, list
);
617 val
= ext_read(tmp
->phydev
, PAGE4
, PTP_STS
);
618 pr_info("slave PTP_STS 0x%04hx\n", val
);
619 val
= ext_read(tmp
->phydev
, PAGE4
, PTP_ESTS
);
620 pr_info("slave PTP_ESTS 0x%04hx\n", val
);
621 event_ts
.ns_lo
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
622 event_ts
.ns_hi
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
623 event_ts
.sec_lo
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
624 event_ts
.sec_hi
= ext_read(tmp
->phydev
, PAGE4
, PTP_EDATA
);
625 diff
= now
- (s64
) phy2txts(&event_ts
);
626 pr_info("slave offset %lld nanoseconds\n", diff
);
628 ts
= ns_to_timespec(diff
);
629 tdr_write(0, tmp
->phydev
, &ts
, PTP_STEP_CLK
);
633 * restore status frames
635 list_for_each(this, &clock
->phylist
) {
636 tmp
= list_entry(this, struct dp83640_private
, list
);
637 ext_write(0, tmp
->phydev
, PAGE5
, PSF_CFG0
, tmp
->cfg0
);
639 ext_write(0, master
, PAGE5
, PSF_CFG0
, cfg0
);
641 mutex_unlock(&clock
->extreg_lock
);
644 /* time stamping methods */
646 static inline u16
exts_chan_to_edata(int ch
)
648 return 1 << ((ch
+ EXT_EVENT
) * 2);
651 static int decode_evnt(struct dp83640_private
*dp83640
,
652 void *data
, u16 ests
)
654 struct phy_txts
*phy_txts
;
655 struct ptp_clock_event event
;
657 int words
= (ests
>> EVNT_TS_LEN_SHIFT
) & EVNT_TS_LEN_MASK
;
660 if (ests
& MULT_EVNT
) {
661 ext_status
= *(u16
*) data
;
662 data
+= sizeof(ext_status
);
667 switch (words
) { /* fall through in every case */
669 dp83640
->edata
.sec_hi
= phy_txts
->sec_hi
;
671 dp83640
->edata
.sec_lo
= phy_txts
->sec_lo
;
673 dp83640
->edata
.ns_hi
= phy_txts
->ns_hi
;
675 dp83640
->edata
.ns_lo
= phy_txts
->ns_lo
;
682 i
= ((ests
>> EVNT_NUM_SHIFT
) & EVNT_NUM_MASK
) - EXT_EVENT
;
683 ext_status
= exts_chan_to_edata(i
);
686 event
.type
= PTP_CLOCK_EXTTS
;
687 event
.timestamp
= phy2txts(&dp83640
->edata
);
689 for (i
= 0; i
< N_EXT_TS
; i
++) {
690 if (ext_status
& exts_chan_to_edata(i
)) {
692 ptp_clock_event(dp83640
->clock
->ptp_clock
, &event
);
696 return parsed
* sizeof(u16
);
699 static void decode_rxts(struct dp83640_private
*dp83640
,
700 struct phy_rxts
*phy_rxts
)
705 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
707 prune_rx_ts(dp83640
);
709 if (list_empty(&dp83640
->rxpool
)) {
710 pr_debug("rx timestamp pool is empty\n");
713 rxts
= list_first_entry(&dp83640
->rxpool
, struct rxts
, list
);
714 list_del_init(&rxts
->list
);
715 phy2rxts(phy_rxts
, rxts
);
716 list_add_tail(&rxts
->list
, &dp83640
->rxts
);
718 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
721 static void decode_txts(struct dp83640_private
*dp83640
,
722 struct phy_txts
*phy_txts
)
724 struct skb_shared_hwtstamps shhwtstamps
;
728 /* We must already have the skb that triggered this. */
730 skb
= skb_dequeue(&dp83640
->tx_queue
);
733 pr_debug("have timestamp but tx_queue empty\n");
736 ns
= phy2txts(phy_txts
);
737 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
738 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
739 skb_complete_tx_timestamp(skb
, &shhwtstamps
);
742 static void decode_status_frame(struct dp83640_private
*dp83640
,
745 struct phy_rxts
*phy_rxts
;
746 struct phy_txts
*phy_txts
;
753 for (len
= skb_headlen(skb
) - 2; len
> sizeof(type
); len
-= size
) {
756 ests
= type
& 0x0fff;
757 type
= type
& 0xf000;
761 if (PSF_RX
== type
&& len
>= sizeof(*phy_rxts
)) {
763 phy_rxts
= (struct phy_rxts
*) ptr
;
764 decode_rxts(dp83640
, phy_rxts
);
765 size
= sizeof(*phy_rxts
);
767 } else if (PSF_TX
== type
&& len
>= sizeof(*phy_txts
)) {
769 phy_txts
= (struct phy_txts
*) ptr
;
770 decode_txts(dp83640
, phy_txts
);
771 size
= sizeof(*phy_txts
);
773 } else if (PSF_EVNT
== type
&& len
>= sizeof(*phy_txts
)) {
775 size
= decode_evnt(dp83640
, ptr
, ests
);
785 static int is_sync(struct sk_buff
*skb
, int type
)
787 u8
*data
= skb
->data
, *msgtype
;
788 unsigned int offset
= 0;
791 case PTP_CLASS_V1_IPV4
:
792 case PTP_CLASS_V2_IPV4
:
793 offset
= ETH_HLEN
+ IPV4_HLEN(data
) + UDP_HLEN
;
795 case PTP_CLASS_V1_IPV6
:
796 case PTP_CLASS_V2_IPV6
:
799 case PTP_CLASS_V2_L2
:
802 case PTP_CLASS_V2_VLAN
:
803 offset
= ETH_HLEN
+ VLAN_HLEN
;
809 if (type
& PTP_CLASS_V1
)
810 offset
+= OFF_PTP_CONTROL
;
812 if (skb
->len
< offset
+ 1)
815 msgtype
= data
+ offset
;
817 return (*msgtype
& 0xf) == 0;
820 static int match(struct sk_buff
*skb
, unsigned int type
, struct rxts
*rxts
)
824 u8
*msgtype
, *data
= skb_mac_header(skb
);
826 /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
829 case PTP_CLASS_V1_IPV4
:
830 case PTP_CLASS_V2_IPV4
:
831 offset
= ETH_HLEN
+ IPV4_HLEN(data
) + UDP_HLEN
;
833 case PTP_CLASS_V1_IPV6
:
834 case PTP_CLASS_V2_IPV6
:
837 case PTP_CLASS_V2_L2
:
840 case PTP_CLASS_V2_VLAN
:
841 offset
= ETH_HLEN
+ VLAN_HLEN
;
847 if (skb
->len
+ ETH_HLEN
< offset
+ OFF_PTP_SEQUENCE_ID
+ sizeof(*seqid
))
850 if (unlikely(type
& PTP_CLASS_V1
))
851 msgtype
= data
+ offset
+ OFF_PTP_CONTROL
;
853 msgtype
= data
+ offset
;
855 seqid
= (u16
*)(data
+ offset
+ OFF_PTP_SEQUENCE_ID
);
857 return rxts
->msgtype
== (*msgtype
& 0xf) &&
858 rxts
->seqid
== ntohs(*seqid
);
861 static void dp83640_free_clocks(void)
863 struct dp83640_clock
*clock
;
864 struct list_head
*this, *next
;
866 mutex_lock(&phyter_clocks_lock
);
868 list_for_each_safe(this, next
, &phyter_clocks
) {
869 clock
= list_entry(this, struct dp83640_clock
, list
);
870 if (!list_empty(&clock
->phylist
)) {
871 pr_warn("phy list non-empty while unloading\n");
874 list_del(&clock
->list
);
875 mutex_destroy(&clock
->extreg_lock
);
876 mutex_destroy(&clock
->clock_lock
);
877 put_device(&clock
->bus
->dev
);
881 mutex_unlock(&phyter_clocks_lock
);
884 static void dp83640_clock_init(struct dp83640_clock
*clock
, struct mii_bus
*bus
)
886 INIT_LIST_HEAD(&clock
->list
);
888 mutex_init(&clock
->extreg_lock
);
889 mutex_init(&clock
->clock_lock
);
890 INIT_LIST_HEAD(&clock
->phylist
);
891 clock
->caps
.owner
= THIS_MODULE
;
892 sprintf(clock
->caps
.name
, "dp83640 timer");
893 clock
->caps
.max_adj
= 1953124;
894 clock
->caps
.n_alarm
= 0;
895 clock
->caps
.n_ext_ts
= N_EXT_TS
;
896 clock
->caps
.n_per_out
= 1;
898 clock
->caps
.adjfreq
= ptp_dp83640_adjfreq
;
899 clock
->caps
.adjtime
= ptp_dp83640_adjtime
;
900 clock
->caps
.gettime
= ptp_dp83640_gettime
;
901 clock
->caps
.settime
= ptp_dp83640_settime
;
902 clock
->caps
.enable
= ptp_dp83640_enable
;
904 * Get a reference to this bus instance.
906 get_device(&bus
->dev
);
909 static int choose_this_phy(struct dp83640_clock
*clock
,
910 struct phy_device
*phydev
)
912 if (chosen_phy
== -1 && !clock
->chosen
)
915 if (chosen_phy
== phydev
->addr
)
921 static struct dp83640_clock
*dp83640_clock_get(struct dp83640_clock
*clock
)
924 mutex_lock(&clock
->clock_lock
);
929 * Look up and lock a clock by bus instance.
930 * If there is no clock for this bus, then create it first.
932 static struct dp83640_clock
*dp83640_clock_get_bus(struct mii_bus
*bus
)
934 struct dp83640_clock
*clock
= NULL
, *tmp
;
935 struct list_head
*this;
937 mutex_lock(&phyter_clocks_lock
);
939 list_for_each(this, &phyter_clocks
) {
940 tmp
= list_entry(this, struct dp83640_clock
, list
);
941 if (tmp
->bus
== bus
) {
949 clock
= kzalloc(sizeof(struct dp83640_clock
), GFP_KERNEL
);
953 dp83640_clock_init(clock
, bus
);
954 list_add_tail(&phyter_clocks
, &clock
->list
);
956 mutex_unlock(&phyter_clocks_lock
);
958 return dp83640_clock_get(clock
);
961 static void dp83640_clock_put(struct dp83640_clock
*clock
)
963 mutex_unlock(&clock
->clock_lock
);
966 static int dp83640_probe(struct phy_device
*phydev
)
968 struct dp83640_clock
*clock
;
969 struct dp83640_private
*dp83640
;
970 int err
= -ENOMEM
, i
;
972 if (phydev
->addr
== BROADCAST_ADDR
)
975 clock
= dp83640_clock_get_bus(phydev
->bus
);
979 dp83640
= kzalloc(sizeof(struct dp83640_private
), GFP_KERNEL
);
983 dp83640
->phydev
= phydev
;
984 INIT_WORK(&dp83640
->ts_work
, rx_timestamp_work
);
986 INIT_LIST_HEAD(&dp83640
->rxts
);
987 INIT_LIST_HEAD(&dp83640
->rxpool
);
988 for (i
= 0; i
< MAX_RXTS
; i
++)
989 list_add(&dp83640
->rx_pool_data
[i
].list
, &dp83640
->rxpool
);
991 phydev
->priv
= dp83640
;
993 spin_lock_init(&dp83640
->rx_lock
);
994 skb_queue_head_init(&dp83640
->rx_queue
);
995 skb_queue_head_init(&dp83640
->tx_queue
);
997 dp83640
->clock
= clock
;
999 if (choose_this_phy(clock
, phydev
)) {
1000 clock
->chosen
= dp83640
;
1001 clock
->ptp_clock
= ptp_clock_register(&clock
->caps
, &phydev
->dev
);
1002 if (IS_ERR(clock
->ptp_clock
)) {
1003 err
= PTR_ERR(clock
->ptp_clock
);
1007 list_add_tail(&dp83640
->list
, &clock
->phylist
);
1009 if (clock
->chosen
&& !list_empty(&clock
->phylist
))
1012 enable_broadcast(dp83640
->phydev
, clock
->page
, 1);
1014 dp83640_clock_put(clock
);
1018 clock
->chosen
= NULL
;
1021 dp83640_clock_put(clock
);
1026 static void dp83640_remove(struct phy_device
*phydev
)
1028 struct dp83640_clock
*clock
;
1029 struct list_head
*this, *next
;
1030 struct dp83640_private
*tmp
, *dp83640
= phydev
->priv
;
1031 struct sk_buff
*skb
;
1033 if (phydev
->addr
== BROADCAST_ADDR
)
1036 enable_status_frames(phydev
, false);
1037 cancel_work_sync(&dp83640
->ts_work
);
1039 while ((skb
= skb_dequeue(&dp83640
->rx_queue
)) != NULL
)
1042 while ((skb
= skb_dequeue(&dp83640
->tx_queue
)) != NULL
)
1043 skb_complete_tx_timestamp(skb
, NULL
);
1045 clock
= dp83640_clock_get(dp83640
->clock
);
1047 if (dp83640
== clock
->chosen
) {
1048 ptp_clock_unregister(clock
->ptp_clock
);
1049 clock
->chosen
= NULL
;
1051 list_for_each_safe(this, next
, &clock
->phylist
) {
1052 tmp
= list_entry(this, struct dp83640_private
, list
);
1053 if (tmp
== dp83640
) {
1054 list_del_init(&tmp
->list
);
1060 dp83640_clock_put(clock
);
1064 static int dp83640_config_init(struct phy_device
*phydev
)
1066 enable_status_frames(phydev
, true);
1067 ext_write(0, phydev
, PAGE4
, PTP_CTL
, PTP_ENABLE
);
1071 static int dp83640_ack_interrupt(struct phy_device
*phydev
)
1073 int err
= phy_read(phydev
, MII_DP83640_MISR
);
1081 static int dp83640_config_intr(struct phy_device
*phydev
)
1087 if (phydev
->interrupts
== PHY_INTERRUPT_ENABLED
) {
1088 misr
= phy_read(phydev
, MII_DP83640_MISR
);
1092 (MII_DP83640_MISR_ANC_INT_EN
|
1093 MII_DP83640_MISR_DUP_INT_EN
|
1094 MII_DP83640_MISR_SPD_INT_EN
|
1095 MII_DP83640_MISR_LINK_INT_EN
);
1096 err
= phy_write(phydev
, MII_DP83640_MISR
, misr
);
1100 micr
= phy_read(phydev
, MII_DP83640_MICR
);
1104 (MII_DP83640_MICR_OE
|
1105 MII_DP83640_MICR_IE
);
1106 return phy_write(phydev
, MII_DP83640_MICR
, micr
);
1108 micr
= phy_read(phydev
, MII_DP83640_MICR
);
1112 ~(MII_DP83640_MICR_OE
|
1113 MII_DP83640_MICR_IE
);
1114 err
= phy_write(phydev
, MII_DP83640_MICR
, micr
);
1118 misr
= phy_read(phydev
, MII_DP83640_MISR
);
1122 ~(MII_DP83640_MISR_ANC_INT_EN
|
1123 MII_DP83640_MISR_DUP_INT_EN
|
1124 MII_DP83640_MISR_SPD_INT_EN
|
1125 MII_DP83640_MISR_LINK_INT_EN
);
1126 return phy_write(phydev
, MII_DP83640_MISR
, misr
);
1130 static int dp83640_hwtstamp(struct phy_device
*phydev
, struct ifreq
*ifr
)
1132 struct dp83640_private
*dp83640
= phydev
->priv
;
1133 struct hwtstamp_config cfg
;
1136 if (copy_from_user(&cfg
, ifr
->ifr_data
, sizeof(cfg
)))
1139 if (cfg
.flags
) /* reserved for future extensions */
1142 if (cfg
.tx_type
< 0 || cfg
.tx_type
> HWTSTAMP_TX_ONESTEP_SYNC
)
1145 dp83640
->hwts_tx_en
= cfg
.tx_type
;
1147 switch (cfg
.rx_filter
) {
1148 case HWTSTAMP_FILTER_NONE
:
1149 dp83640
->hwts_rx_en
= 0;
1151 dp83640
->version
= 0;
1153 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1154 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1155 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1156 dp83640
->hwts_rx_en
= 1;
1157 dp83640
->layer
= LAYER4
;
1158 dp83640
->version
= 1;
1160 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1161 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1162 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1163 dp83640
->hwts_rx_en
= 1;
1164 dp83640
->layer
= LAYER4
;
1165 dp83640
->version
= 2;
1167 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1168 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1169 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1170 dp83640
->hwts_rx_en
= 1;
1171 dp83640
->layer
= LAYER2
;
1172 dp83640
->version
= 2;
1174 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1175 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1176 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1177 dp83640
->hwts_rx_en
= 1;
1178 dp83640
->layer
= LAYER4
|LAYER2
;
1179 dp83640
->version
= 2;
1185 txcfg0
= (dp83640
->version
& TX_PTP_VER_MASK
) << TX_PTP_VER_SHIFT
;
1186 rxcfg0
= (dp83640
->version
& TX_PTP_VER_MASK
) << TX_PTP_VER_SHIFT
;
1188 if (dp83640
->layer
& LAYER2
) {
1192 if (dp83640
->layer
& LAYER4
) {
1193 txcfg0
|= TX_IPV6_EN
| TX_IPV4_EN
;
1194 rxcfg0
|= RX_IPV6_EN
| RX_IPV4_EN
;
1197 if (dp83640
->hwts_tx_en
)
1200 if (dp83640
->hwts_tx_en
== HWTSTAMP_TX_ONESTEP_SYNC
)
1201 txcfg0
|= SYNC_1STEP
| CHK_1STEP
;
1203 if (dp83640
->hwts_rx_en
)
1206 mutex_lock(&dp83640
->clock
->extreg_lock
);
1208 ext_write(0, phydev
, PAGE5
, PTP_TXCFG0
, txcfg0
);
1209 ext_write(0, phydev
, PAGE5
, PTP_RXCFG0
, rxcfg0
);
1211 mutex_unlock(&dp83640
->clock
->extreg_lock
);
1213 return copy_to_user(ifr
->ifr_data
, &cfg
, sizeof(cfg
)) ? -EFAULT
: 0;
1216 static void rx_timestamp_work(struct work_struct
*work
)
1218 struct dp83640_private
*dp83640
=
1219 container_of(work
, struct dp83640_private
, ts_work
);
1220 struct list_head
*this, *next
;
1222 struct skb_shared_hwtstamps
*shhwtstamps
;
1223 struct sk_buff
*skb
;
1225 unsigned long flags
;
1227 /* Deliver each deferred packet, with or without a time stamp. */
1229 while ((skb
= skb_dequeue(&dp83640
->rx_queue
)) != NULL
) {
1230 type
= SKB_PTP_TYPE(skb
);
1231 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
1232 list_for_each_safe(this, next
, &dp83640
->rxts
) {
1233 rxts
= list_entry(this, struct rxts
, list
);
1234 if (match(skb
, type
, rxts
)) {
1235 shhwtstamps
= skb_hwtstamps(skb
);
1236 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
1237 shhwtstamps
->hwtstamp
= ns_to_ktime(rxts
->ns
);
1238 list_del_init(&rxts
->list
);
1239 list_add(&rxts
->list
, &dp83640
->rxpool
);
1243 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
1247 /* Clear out expired time stamps. */
1249 spin_lock_irqsave(&dp83640
->rx_lock
, flags
);
1250 prune_rx_ts(dp83640
);
1251 spin_unlock_irqrestore(&dp83640
->rx_lock
, flags
);
1254 static bool dp83640_rxtstamp(struct phy_device
*phydev
,
1255 struct sk_buff
*skb
, int type
)
1257 struct dp83640_private
*dp83640
= phydev
->priv
;
1259 if (!dp83640
->hwts_rx_en
)
1262 if (is_status_frame(skb
, type
)) {
1263 decode_status_frame(dp83640
, skb
);
1268 SKB_PTP_TYPE(skb
) = type
;
1269 skb_queue_tail(&dp83640
->rx_queue
, skb
);
1270 schedule_work(&dp83640
->ts_work
);
1275 static void dp83640_txtstamp(struct phy_device
*phydev
,
1276 struct sk_buff
*skb
, int type
)
1278 struct dp83640_private
*dp83640
= phydev
->priv
;
1280 switch (dp83640
->hwts_tx_en
) {
1282 case HWTSTAMP_TX_ONESTEP_SYNC
:
1283 if (is_sync(skb
, type
)) {
1284 skb_complete_tx_timestamp(skb
, NULL
);
1288 case HWTSTAMP_TX_ON
:
1289 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1290 skb_queue_tail(&dp83640
->tx_queue
, skb
);
1291 schedule_work(&dp83640
->ts_work
);
1294 case HWTSTAMP_TX_OFF
:
1296 skb_complete_tx_timestamp(skb
, NULL
);
1301 static int dp83640_ts_info(struct phy_device
*dev
, struct ethtool_ts_info
*info
)
1303 struct dp83640_private
*dp83640
= dev
->priv
;
1305 info
->so_timestamping
=
1306 SOF_TIMESTAMPING_TX_HARDWARE
|
1307 SOF_TIMESTAMPING_RX_HARDWARE
|
1308 SOF_TIMESTAMPING_RAW_HARDWARE
;
1309 info
->phc_index
= ptp_clock_index(dp83640
->clock
->ptp_clock
);
1311 (1 << HWTSTAMP_TX_OFF
) |
1312 (1 << HWTSTAMP_TX_ON
) |
1313 (1 << HWTSTAMP_TX_ONESTEP_SYNC
);
1315 (1 << HWTSTAMP_FILTER_NONE
) |
1316 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
1317 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
1318 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
1319 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
) |
1320 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC
) |
1321 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
) |
1322 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
1323 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC
) |
1324 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
) |
1325 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
) |
1326 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC
) |
1327 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
);
1331 static struct phy_driver dp83640_driver
= {
1332 .phy_id
= DP83640_PHY_ID
,
1333 .phy_id_mask
= 0xfffffff0,
1334 .name
= "NatSemi DP83640",
1335 .features
= PHY_BASIC_FEATURES
,
1336 .flags
= PHY_HAS_INTERRUPT
,
1337 .probe
= dp83640_probe
,
1338 .remove
= dp83640_remove
,
1339 .config_init
= dp83640_config_init
,
1340 .config_aneg
= genphy_config_aneg
,
1341 .read_status
= genphy_read_status
,
1342 .ack_interrupt
= dp83640_ack_interrupt
,
1343 .config_intr
= dp83640_config_intr
,
1344 .ts_info
= dp83640_ts_info
,
1345 .hwtstamp
= dp83640_hwtstamp
,
1346 .rxtstamp
= dp83640_rxtstamp
,
1347 .txtstamp
= dp83640_txtstamp
,
1348 .driver
= {.owner
= THIS_MODULE
,}
1351 static int __init
dp83640_init(void)
1353 return phy_driver_register(&dp83640_driver
);
1356 static void __exit
dp83640_exit(void)
1358 dp83640_free_clocks();
1359 phy_driver_unregister(&dp83640_driver
);
1362 MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
1363 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.at>");
1364 MODULE_LICENSE("GPL");
1366 module_init(dp83640_init
);
1367 module_exit(dp83640_exit
);
1369 static struct mdio_device_id __maybe_unused dp83640_tbl
[] = {
1370 { DP83640_PHY_ID
, 0xfffffff0 },
1374 MODULE_DEVICE_TABLE(mdio
, dp83640_tbl
);