2 * 1588 PTP support for Cadence GEM device.
4 * Copyright (C) 2017 Cadence Design Systems - http://www.cadence.com
6 * Authors: Rafal Ozieblo <rafalo@cadence.com>
7 * Bartosz Folta <bfolta@cadence.com>
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/clk.h>
24 #include <linux/device.h>
25 #include <linux/etherdevice.h>
26 #include <linux/platform_device.h>
27 #include <linux/time64.h>
28 #include <linux/ptp_classify.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/net_tstamp.h>
32 #include <linux/circ_buf.h>
33 #include <linux/spinlock.h>
37 #define GEM_PTP_TIMER_NAME "gem-ptp-timer"
39 static struct macb_dma_desc_ptp
*macb_ptp_desc(struct macb
*bp
,
40 struct macb_dma_desc
*desc
)
42 if (bp
->hw_dma_cap
== HW_DMA_CAP_PTP
)
43 return (struct macb_dma_desc_ptp
*)
44 ((u8
*)desc
+ sizeof(struct macb_dma_desc
));
45 if (bp
->hw_dma_cap
== HW_DMA_CAP_64B_PTP
)
46 return (struct macb_dma_desc_ptp
*)
47 ((u8
*)desc
+ sizeof(struct macb_dma_desc
)
48 + sizeof(struct macb_dma_desc_64
));
52 static int gem_tsu_get_time(struct ptp_clock_info
*ptp
, struct timespec64
*ts
)
54 struct macb
*bp
= container_of(ptp
, struct macb
, ptp_clock_info
);
59 spin_lock_irqsave(&bp
->tsu_clk_lock
, flags
);
60 first
= gem_readl(bp
, TN
);
61 secl
= gem_readl(bp
, TSL
);
62 sech
= gem_readl(bp
, TSH
);
63 second
= gem_readl(bp
, TN
);
65 /* test for nsec rollover */
67 /* if so, use later read & re-read seconds
68 * (assume all done within 1s)
70 ts
->tv_nsec
= gem_readl(bp
, TN
);
71 secl
= gem_readl(bp
, TSL
);
72 sech
= gem_readl(bp
, TSH
);
77 spin_unlock_irqrestore(&bp
->tsu_clk_lock
, flags
);
78 ts
->tv_sec
= (((u64
)sech
<< GEM_TSL_SIZE
) | secl
)
83 static int gem_tsu_set_time(struct ptp_clock_info
*ptp
,
84 const struct timespec64
*ts
)
86 struct macb
*bp
= container_of(ptp
, struct macb
, ptp_clock_info
);
90 secl
= (u32
)ts
->tv_sec
;
91 sech
= (ts
->tv_sec
>> GEM_TSL_SIZE
) & ((1 << GEM_TSH_SIZE
) - 1);
94 spin_lock_irqsave(&bp
->tsu_clk_lock
, flags
);
96 /* TSH doesn't latch the time and no atomicity! */
97 gem_writel(bp
, TN
, 0); /* clear to avoid overflow */
98 gem_writel(bp
, TSH
, sech
);
99 /* write lower bits 2nd, for synchronized secs update */
100 gem_writel(bp
, TSL
, secl
);
101 gem_writel(bp
, TN
, ns
);
103 spin_unlock_irqrestore(&bp
->tsu_clk_lock
, flags
);
108 static int gem_tsu_incr_set(struct macb
*bp
, struct tsu_incr
*incr_spec
)
112 /* tsu_timer_incr register must be written after
113 * the tsu_timer_incr_sub_ns register and the write operation
114 * will cause the value written to the tsu_timer_incr_sub_ns register
117 spin_lock_irqsave(&bp
->tsu_clk_lock
, flags
);
118 gem_writel(bp
, TISUBN
, GEM_BF(SUBNSINCR
, incr_spec
->sub_ns
));
119 gem_writel(bp
, TI
, GEM_BF(NSINCR
, incr_spec
->ns
));
120 spin_unlock_irqrestore(&bp
->tsu_clk_lock
, flags
);
125 static int gem_ptp_adjfine(struct ptp_clock_info
*ptp
, long scaled_ppm
)
127 struct macb
*bp
= container_of(ptp
, struct macb
, ptp_clock_info
);
128 struct tsu_incr incr_spec
;
129 bool neg_adj
= false;
133 if (scaled_ppm
< 0) {
135 scaled_ppm
= -scaled_ppm
;
138 /* Adjustment is relative to base frequency */
139 incr_spec
.sub_ns
= bp
->tsu_incr
.sub_ns
;
140 incr_spec
.ns
= bp
->tsu_incr
.ns
;
142 /* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */
143 word
= ((u64
)incr_spec
.ns
<< GEM_SUBNSINCR_SIZE
) + incr_spec
.sub_ns
;
144 adj
= (u64
)scaled_ppm
* word
;
145 /* Divide with rounding, equivalent to floating dividing:
146 * (temp / USEC_PER_SEC) + 0.5
148 adj
+= (USEC_PER_SEC
>> 1);
149 adj
>>= GEM_SUBNSINCR_SIZE
; /* remove fractions */
150 adj
= div_u64(adj
, USEC_PER_SEC
);
151 adj
= neg_adj
? (word
- adj
) : (word
+ adj
);
153 incr_spec
.ns
= (adj
>> GEM_SUBNSINCR_SIZE
)
154 & ((1 << GEM_NSINCR_SIZE
) - 1);
155 incr_spec
.sub_ns
= adj
& ((1 << GEM_SUBNSINCR_SIZE
) - 1);
156 gem_tsu_incr_set(bp
, &incr_spec
);
160 static int gem_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
162 struct macb
*bp
= container_of(ptp
, struct macb
, ptp_clock_info
);
163 struct timespec64 now
, then
= ns_to_timespec64(delta
);
171 if (delta
> TSU_NSEC_MAX_VAL
) {
172 gem_tsu_get_time(&bp
->ptp_clock_info
, &now
);
174 now
= timespec64_sub(now
, then
);
176 now
= timespec64_add(now
, then
);
178 gem_tsu_set_time(&bp
->ptp_clock_info
,
179 (const struct timespec64
*)&now
);
181 adj
= (sign
<< GEM_ADDSUB_OFFSET
) | delta
;
183 gem_writel(bp
, TA
, adj
);
189 static int gem_ptp_enable(struct ptp_clock_info
*ptp
,
190 struct ptp_clock_request
*rq
, int on
)
195 static struct ptp_clock_info gem_ptp_caps_template
= {
196 .owner
= THIS_MODULE
,
197 .name
= GEM_PTP_TIMER_NAME
,
204 .adjfine
= gem_ptp_adjfine
,
205 .adjtime
= gem_ptp_adjtime
,
206 .gettime64
= gem_tsu_get_time
,
207 .settime64
= gem_tsu_set_time
,
208 .enable
= gem_ptp_enable
,
211 static void gem_ptp_init_timer(struct macb
*bp
)
216 bp
->tsu_incr
.ns
= div_u64_rem(NSEC_PER_SEC
, bp
->tsu_rate
, &rem
);
219 adj
<<= GEM_SUBNSINCR_SIZE
;
220 bp
->tsu_incr
.sub_ns
= div_u64(adj
, bp
->tsu_rate
);
222 bp
->tsu_incr
.sub_ns
= 0;
226 static void gem_ptp_init_tsu(struct macb
*bp
)
228 struct timespec64 ts
;
230 /* 1. get current system time */
231 ts
= ns_to_timespec64(ktime_to_ns(ktime_get_real()));
233 /* 2. set ptp timer */
234 gem_tsu_set_time(&bp
->ptp_clock_info
, &ts
);
236 /* 3. set PTP timer increment value to BASE_INCREMENT */
237 gem_tsu_incr_set(bp
, &bp
->tsu_incr
);
239 gem_writel(bp
, TA
, 0);
242 static void gem_ptp_clear_timer(struct macb
*bp
)
244 bp
->tsu_incr
.sub_ns
= 0;
247 gem_writel(bp
, TISUBN
, GEM_BF(SUBNSINCR
, 0));
248 gem_writel(bp
, TI
, GEM_BF(NSINCR
, 0));
249 gem_writel(bp
, TA
, 0);
252 static int gem_hw_timestamp(struct macb
*bp
, u32 dma_desc_ts_1
,
253 u32 dma_desc_ts_2
, struct timespec64
*ts
)
255 struct timespec64 tsu
;
257 ts
->tv_sec
= (GEM_BFEXT(DMA_SECH
, dma_desc_ts_2
) << GEM_DMA_SECL_SIZE
) |
258 GEM_BFEXT(DMA_SECL
, dma_desc_ts_1
);
259 ts
->tv_nsec
= GEM_BFEXT(DMA_NSEC
, dma_desc_ts_1
);
261 /* TSU overlapping workaround
262 * The timestamp only contains lower few bits of seconds,
263 * so add value from 1588 timer
265 gem_tsu_get_time(&bp
->ptp_clock_info
, &tsu
);
267 /* If the top bit is set in the timestamp,
268 * but not in 1588 timer, it has rolled over,
269 * so subtract max size
271 if ((ts
->tv_sec
& (GEM_DMA_SEC_TOP
>> 1)) &&
272 !(tsu
.tv_sec
& (GEM_DMA_SEC_TOP
>> 1)))
273 ts
->tv_sec
-= GEM_DMA_SEC_TOP
;
275 ts
->tv_sec
+= ((~GEM_DMA_SEC_MASK
) & tsu
.tv_sec
);
280 void gem_ptp_rxstamp(struct macb
*bp
, struct sk_buff
*skb
,
281 struct macb_dma_desc
*desc
)
283 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
284 struct macb_dma_desc_ptp
*desc_ptp
;
285 struct timespec64 ts
;
287 if (GEM_BFEXT(DMA_RXVALID
, desc
->addr
)) {
288 desc_ptp
= macb_ptp_desc(bp
, desc
);
289 gem_hw_timestamp(bp
, desc_ptp
->ts_1
, desc_ptp
->ts_2
, &ts
);
290 memset(shhwtstamps
, 0, sizeof(struct skb_shared_hwtstamps
));
291 shhwtstamps
->hwtstamp
= ktime_set(ts
.tv_sec
, ts
.tv_nsec
);
295 static void gem_tstamp_tx(struct macb
*bp
, struct sk_buff
*skb
,
296 struct macb_dma_desc_ptp
*desc_ptp
)
298 struct skb_shared_hwtstamps shhwtstamps
;
299 struct timespec64 ts
;
301 gem_hw_timestamp(bp
, desc_ptp
->ts_1
, desc_ptp
->ts_2
, &ts
);
302 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
303 shhwtstamps
.hwtstamp
= ktime_set(ts
.tv_sec
, ts
.tv_nsec
);
304 skb_tstamp_tx(skb
, &shhwtstamps
);
307 int gem_ptp_txstamp(struct macb_queue
*queue
, struct sk_buff
*skb
,
308 struct macb_dma_desc
*desc
)
310 unsigned long tail
= READ_ONCE(queue
->tx_ts_tail
);
311 unsigned long head
= queue
->tx_ts_head
;
312 struct macb_dma_desc_ptp
*desc_ptp
;
313 struct gem_tx_ts
*tx_timestamp
;
315 if (!GEM_BFEXT(DMA_TXVALID
, desc
->ctrl
))
318 if (CIRC_SPACE(head
, tail
, PTP_TS_BUFFER_SIZE
) == 0)
321 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
322 desc_ptp
= macb_ptp_desc(queue
->bp
, desc
);
323 tx_timestamp
= &queue
->tx_timestamps
[head
];
324 tx_timestamp
->skb
= skb
;
325 tx_timestamp
->desc_ptp
.ts_1
= desc_ptp
->ts_1
;
326 tx_timestamp
->desc_ptp
.ts_2
= desc_ptp
->ts_2
;
328 smp_store_release(&queue
->tx_ts_head
,
329 (head
+ 1) & (PTP_TS_BUFFER_SIZE
- 1));
331 schedule_work(&queue
->tx_ts_task
);
335 static void gem_tx_timestamp_flush(struct work_struct
*work
)
337 struct macb_queue
*queue
=
338 container_of(work
, struct macb_queue
, tx_ts_task
);
339 unsigned long head
, tail
;
340 struct gem_tx_ts
*tx_ts
;
342 /* take current head */
343 head
= smp_load_acquire(&queue
->tx_ts_head
);
344 tail
= queue
->tx_ts_tail
;
346 while (CIRC_CNT(head
, tail
, PTP_TS_BUFFER_SIZE
)) {
347 tx_ts
= &queue
->tx_timestamps
[tail
];
348 gem_tstamp_tx(queue
->bp
, tx_ts
->skb
, &tx_ts
->desc_ptp
);
350 dev_kfree_skb_any(tx_ts
->skb
);
351 /* remove old tail */
352 smp_store_release(&queue
->tx_ts_tail
,
353 (tail
+ 1) & (PTP_TS_BUFFER_SIZE
- 1));
354 tail
= queue
->tx_ts_tail
;
358 void gem_ptp_init(struct net_device
*dev
)
360 struct macb
*bp
= netdev_priv(dev
);
361 struct macb_queue
*queue
;
364 bp
->ptp_clock_info
= gem_ptp_caps_template
;
366 /* nominal frequency and maximum adjustment in ppb */
367 bp
->tsu_rate
= bp
->ptp_info
->get_tsu_rate(bp
);
368 bp
->ptp_clock_info
.max_adj
= bp
->ptp_info
->get_ptp_max_adj();
369 gem_ptp_init_timer(bp
);
370 bp
->ptp_clock
= ptp_clock_register(&bp
->ptp_clock_info
, &dev
->dev
);
371 if (IS_ERR(bp
->ptp_clock
)) {
372 pr_err("ptp clock register failed: %ld\n",
373 PTR_ERR(bp
->ptp_clock
));
374 bp
->ptp_clock
= NULL
;
376 } else if (bp
->ptp_clock
== NULL
) {
377 pr_err("ptp clock register failed\n");
381 spin_lock_init(&bp
->tsu_clk_lock
);
382 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
383 queue
->tx_ts_head
= 0;
384 queue
->tx_ts_tail
= 0;
385 INIT_WORK(&queue
->tx_ts_task
, gem_tx_timestamp_flush
);
388 gem_ptp_init_tsu(bp
);
390 dev_info(&bp
->pdev
->dev
, "%s ptp clock registered.\n",
394 void gem_ptp_remove(struct net_device
*ndev
)
396 struct macb
*bp
= netdev_priv(ndev
);
399 ptp_clock_unregister(bp
->ptp_clock
);
401 gem_ptp_clear_timer(bp
);
403 dev_info(&bp
->pdev
->dev
, "%s ptp clock unregistered.\n",
407 static int gem_ptp_set_ts_mode(struct macb
*bp
,
408 enum macb_bd_control tx_bd_control
,
409 enum macb_bd_control rx_bd_control
)
411 gem_writel(bp
, TXBDCTRL
, GEM_BF(TXTSMODE
, tx_bd_control
));
412 gem_writel(bp
, RXBDCTRL
, GEM_BF(RXTSMODE
, rx_bd_control
));
417 int gem_get_hwtst(struct net_device
*dev
, struct ifreq
*rq
)
419 struct hwtstamp_config
*tstamp_config
;
420 struct macb
*bp
= netdev_priv(dev
);
422 tstamp_config
= &bp
->tstamp_config
;
423 if ((bp
->hw_dma_cap
& HW_DMA_CAP_PTP
) == 0)
426 if (copy_to_user(rq
->ifr_data
, tstamp_config
, sizeof(*tstamp_config
)))
432 static int gem_ptp_set_one_step_sync(struct macb
*bp
, u8 enable
)
436 reg_val
= macb_readl(bp
, NCR
);
439 macb_writel(bp
, NCR
, reg_val
| MACB_BIT(OSSMODE
));
441 macb_writel(bp
, NCR
, reg_val
& ~MACB_BIT(OSSMODE
));
446 int gem_set_hwtst(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
448 enum macb_bd_control tx_bd_control
= TSTAMP_DISABLED
;
449 enum macb_bd_control rx_bd_control
= TSTAMP_DISABLED
;
450 struct hwtstamp_config
*tstamp_config
;
451 struct macb
*bp
= netdev_priv(dev
);
454 tstamp_config
= &bp
->tstamp_config
;
455 if ((bp
->hw_dma_cap
& HW_DMA_CAP_PTP
) == 0)
458 if (copy_from_user(tstamp_config
, ifr
->ifr_data
,
459 sizeof(*tstamp_config
)))
462 /* reserved for future extensions */
463 if (tstamp_config
->flags
)
466 switch (tstamp_config
->tx_type
) {
467 case HWTSTAMP_TX_OFF
:
469 case HWTSTAMP_TX_ONESTEP_SYNC
:
470 if (gem_ptp_set_one_step_sync(bp
, 1) != 0)
473 tx_bd_control
= TSTAMP_ALL_FRAMES
;
479 switch (tstamp_config
->rx_filter
) {
480 case HWTSTAMP_FILTER_NONE
:
482 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
484 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
486 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
487 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
488 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
489 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
490 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
491 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
492 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
493 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
494 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
495 rx_bd_control
= TSTAMP_ALL_PTP_FRAMES
;
496 tstamp_config
->rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
497 regval
= macb_readl(bp
, NCR
);
498 macb_writel(bp
, NCR
, (regval
| MACB_BIT(SRTSM
)));
500 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
501 case HWTSTAMP_FILTER_ALL
:
502 rx_bd_control
= TSTAMP_ALL_FRAMES
;
503 tstamp_config
->rx_filter
= HWTSTAMP_FILTER_ALL
;
506 tstamp_config
->rx_filter
= HWTSTAMP_FILTER_NONE
;
510 if (gem_ptp_set_ts_mode(bp
, tx_bd_control
, rx_bd_control
) != 0)
513 if (copy_to_user(ifr
->ifr_data
, tstamp_config
, sizeof(*tstamp_config
)))