1 // SPDX-License-Identifier: GPL-2.0
2 /* TI K3 AM65x Common Platform Time Sync
4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
9 #include <linux/clk-provider.h>
10 #include <linux/err.h>
11 #include <linux/if_vlan.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/net_tstamp.h>
17 #include <linux/of_irq.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/ptp_classify.h>
21 #include <linux/ptp_clock_kernel.h>
23 #include "am65-cpts.h"
25 struct am65_genf_regs
{
26 u32 comp_lo
; /* Comparison Low Value 0:31 */
27 u32 comp_hi
; /* Comparison High Value 32:63 */
28 u32 control
; /* control */
29 u32 length
; /* Length */
30 u32 ppm_low
; /* PPM Load Low Value 0:31 */
31 u32 ppm_hi
; /* PPM Load High Value 32:63 */
32 u32 ts_nudge
; /* Nudge value */
33 } __aligned(32) __packed
;
35 #define AM65_CPTS_GENF_MAX_NUM 9
36 #define AM65_CPTS_ESTF_MAX_NUM 8
38 struct am65_cpts_regs
{
39 u32 idver
; /* Identification and version */
40 u32 control
; /* Time sync control */
41 u32 rftclk_sel
; /* Reference Clock Select Register */
42 u32 ts_push
; /* Time stamp event push */
43 u32 ts_load_val_lo
; /* Time Stamp Load Low Value 0:31 */
44 u32 ts_load_en
; /* Time stamp load enable */
45 u32 ts_comp_lo
; /* Time Stamp Comparison Low Value 0:31 */
46 u32 ts_comp_length
; /* Time Stamp Comparison Length */
47 u32 intstat_raw
; /* Time sync interrupt status raw */
48 u32 intstat_masked
; /* Time sync interrupt status masked */
49 u32 int_enable
; /* Time sync interrupt enable */
50 u32 ts_comp_nudge
; /* Time Stamp Comparison Nudge Value */
51 u32 event_pop
; /* Event interrupt pop */
52 u32 event_0
; /* Event Time Stamp lo 0:31 */
53 u32 event_1
; /* Event Type Fields */
54 u32 event_2
; /* Event Type Fields domain */
55 u32 event_3
; /* Event Time Stamp hi 32:63 */
56 u32 ts_load_val_hi
; /* Time Stamp Load High Value 32:63 */
57 u32 ts_comp_hi
; /* Time Stamp Comparison High Value 32:63 */
58 u32 ts_add_val
; /* Time Stamp Add value */
59 u32 ts_ppm_low
; /* Time Stamp PPM Load Low Value 0:31 */
60 u32 ts_ppm_hi
; /* Time Stamp PPM Load High Value 32:63 */
61 u32 ts_nudge
; /* Time Stamp Nudge value */
63 struct am65_genf_regs genf
[AM65_CPTS_GENF_MAX_NUM
];
64 struct am65_genf_regs estf
[AM65_CPTS_ESTF_MAX_NUM
];
68 #define AM65_CPTS_CONTROL_EN BIT(0)
69 #define AM65_CPTS_CONTROL_INT_TEST BIT(1)
70 #define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
71 #define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
72 #define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
73 #define AM65_CPTS_CONTROL_64MODE BIT(5)
74 #define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
75 #define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
76 #define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
77 #define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
78 #define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
79 #define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
80 #define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
81 #define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
82 #define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
83 #define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
84 #define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
86 #define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
88 #define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
89 #define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
92 #define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
95 #define AM65_CPTS_TS_PUSH BIT(0)
98 #define AM65_CPTS_TS_LOAD_EN BIT(0)
100 /* INTSTAT_RAW_REG */
101 #define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
103 /* INTSTAT_MASKED_REG */
104 #define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
107 #define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
109 /* TS_COMP_NUDGE_REG */
110 #define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
113 #define AM65_CPTS_EVENT_POP BIT(0)
116 #define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
118 #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
119 #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
121 #define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
122 #define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
124 #define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
125 #define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
128 #define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
129 #define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
132 AM65_CPTS_EV_PUSH
, /* Time Stamp Push Event */
133 AM65_CPTS_EV_ROLL
, /* Time Stamp Rollover Event */
134 AM65_CPTS_EV_HALF
, /* Time Stamp Half Rollover Event */
135 AM65_CPTS_EV_HW
, /* Hardware Time Stamp Push Event */
136 AM65_CPTS_EV_RX
, /* Ethernet Receive Event */
137 AM65_CPTS_EV_TX
, /* Ethernet Transmit Event */
138 AM65_CPTS_EV_TS_COMP
, /* Time Stamp Compare Event */
139 AM65_CPTS_EV_HOST
, /* Host Transmit Event */
142 struct am65_cpts_event
{
143 struct list_head list
;
150 #define AM65_CPTS_FIFO_DEPTH (16)
151 #define AM65_CPTS_MAX_EVENTS (32)
152 #define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
153 #define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
154 #define AM65_CPTS_MIN_PPM 0x400
158 struct am65_cpts_regs __iomem
*reg
;
159 struct ptp_clock_info ptp_info
;
160 struct ptp_clock
*ptp_clock
;
162 struct clk_hw
*clk_mux_hw
;
163 struct device_node
*clk_mux_np
;
166 struct list_head events
;
167 struct list_head pool
;
168 struct am65_cpts_event pool_data
[AM65_CPTS_MAX_EVENTS
];
169 spinlock_t lock
; /* protects events lists*/
174 struct mutex ptp_clk_lock
; /* PHC access sync */
179 struct sk_buff_head txq
;
184 /* context save/restore */
192 struct am65_genf_regs sr_genf
[AM65_CPTS_GENF_MAX_NUM
];
193 struct am65_genf_regs sr_estf
[AM65_CPTS_ESTF_MAX_NUM
];
196 struct am65_cpts_skb_cb_data
{
201 #define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
202 #define am65_cpts_read32(c, r) readl(&(c)->reg->r)
204 static void am65_cpts_settime(struct am65_cpts
*cpts
, u64 start_tstamp
)
208 val
= upper_32_bits(start_tstamp
);
209 am65_cpts_write32(cpts
, val
, ts_load_val_hi
);
210 val
= lower_32_bits(start_tstamp
);
211 am65_cpts_write32(cpts
, val
, ts_load_val_lo
);
213 am65_cpts_write32(cpts
, AM65_CPTS_TS_LOAD_EN
, ts_load_en
);
216 static void am65_cpts_set_add_val(struct am65_cpts
*cpts
)
218 /* select coefficient according to the rate */
219 cpts
->ts_add_val
= (NSEC_PER_SEC
/ cpts
->refclk_freq
- 1) & 0x7;
221 am65_cpts_write32(cpts
, cpts
->ts_add_val
, ts_add_val
);
224 static void am65_cpts_disable(struct am65_cpts
*cpts
)
226 am65_cpts_write32(cpts
, 0, control
);
227 am65_cpts_write32(cpts
, 0, int_enable
);
230 static int am65_cpts_event_get_port(struct am65_cpts_event
*event
)
232 return (event
->event1
& AM65_CPTS_EVENT_1_PORT_NUMBER_MASK
) >>
233 AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT
;
236 static int am65_cpts_event_get_type(struct am65_cpts_event
*event
)
238 return (event
->event1
& AM65_CPTS_EVENT_1_EVENT_TYPE_MASK
) >>
239 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT
;
242 static int am65_cpts_cpts_purge_events(struct am65_cpts
*cpts
)
244 struct list_head
*this, *next
;
245 struct am65_cpts_event
*event
;
248 list_for_each_safe(this, next
, &cpts
->events
) {
249 event
= list_entry(this, struct am65_cpts_event
, list
);
250 if (time_after(jiffies
, event
->tmo
)) {
251 list_del_init(&event
->list
);
252 list_add(&event
->list
, &cpts
->pool
);
258 dev_dbg(cpts
->dev
, "event pool cleaned up %d\n", removed
);
259 return removed
? 0 : -1;
262 static bool am65_cpts_fifo_pop_event(struct am65_cpts
*cpts
,
263 struct am65_cpts_event
*event
)
265 u32 r
= am65_cpts_read32(cpts
, intstat_raw
);
267 if (r
& AM65_CPTS_INTSTAT_RAW_TS_PEND
) {
268 event
->timestamp
= am65_cpts_read32(cpts
, event_0
);
269 event
->event1
= am65_cpts_read32(cpts
, event_1
);
270 event
->event2
= am65_cpts_read32(cpts
, event_2
);
271 event
->timestamp
|= (u64
)am65_cpts_read32(cpts
, event_3
) << 32;
272 am65_cpts_write32(cpts
, AM65_CPTS_EVENT_POP
, event_pop
);
278 static int __am65_cpts_fifo_read(struct am65_cpts
*cpts
)
280 struct ptp_clock_event pevent
;
281 struct am65_cpts_event
*event
;
282 bool schedule
= false;
283 int i
, type
, ret
= 0;
285 for (i
= 0; i
< AM65_CPTS_FIFO_DEPTH
; i
++) {
286 event
= list_first_entry_or_null(&cpts
->pool
,
287 struct am65_cpts_event
, list
);
290 if (am65_cpts_cpts_purge_events(cpts
)) {
291 dev_err(cpts
->dev
, "cpts: event pool empty\n");
298 if (am65_cpts_fifo_pop_event(cpts
, event
))
301 type
= am65_cpts_event_get_type(event
);
303 case AM65_CPTS_EV_PUSH
:
304 cpts
->timestamp
= event
->timestamp
;
305 dev_dbg(cpts
->dev
, "AM65_CPTS_EV_PUSH t:%llu\n",
308 case AM65_CPTS_EV_RX
:
309 case AM65_CPTS_EV_TX
:
310 event
->tmo
= jiffies
+
311 msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT
);
313 list_move_tail(&event
->list
, &cpts
->events
);
316 "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
317 event
->event1
, event
->event2
,
321 case AM65_CPTS_EV_HW
:
322 pevent
.index
= am65_cpts_event_get_port(event
) - 1;
323 pevent
.timestamp
= event
->timestamp
;
324 if (cpts
->pps_enabled
&& pevent
.index
== cpts
->pps_hw_ts_idx
) {
325 pevent
.type
= PTP_CLOCK_PPSUSR
;
326 pevent
.pps_times
.ts_real
= ns_to_timespec64(pevent
.timestamp
);
328 pevent
.type
= PTP_CLOCK_EXTTS
;
330 dev_dbg(cpts
->dev
, "AM65_CPTS_EV_HW:%s p:%d t:%llu\n",
331 pevent
.type
== PTP_CLOCK_EXTTS
?
333 pevent
.index
, event
->timestamp
);
335 ptp_clock_event(cpts
->ptp_clock
, &pevent
);
337 case AM65_CPTS_EV_HOST
:
339 case AM65_CPTS_EV_ROLL
:
340 case AM65_CPTS_EV_HALF
:
341 case AM65_CPTS_EV_TS_COMP
:
343 "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
345 event
->event1
, event
->event2
,
349 dev_err(cpts
->dev
, "cpts: unknown event type\n");
357 ptp_schedule_worker(cpts
->ptp_clock
, 0);
362 static int am65_cpts_fifo_read(struct am65_cpts
*cpts
)
367 spin_lock_irqsave(&cpts
->lock
, flags
);
368 ret
= __am65_cpts_fifo_read(cpts
);
369 spin_unlock_irqrestore(&cpts
->lock
, flags
);
374 static u64
am65_cpts_gettime(struct am65_cpts
*cpts
,
375 struct ptp_system_timestamp
*sts
)
380 /* temporarily disable cpts interrupt to avoid intentional
381 * doubled read. Interrupt can be in-flight - it's Ok.
383 am65_cpts_write32(cpts
, 0, int_enable
);
385 /* use spin_lock_irqsave() here as it has to run very fast */
386 spin_lock_irqsave(&cpts
->lock
, flags
);
387 ptp_read_system_prets(sts
);
388 am65_cpts_write32(cpts
, AM65_CPTS_TS_PUSH
, ts_push
);
389 am65_cpts_read32(cpts
, ts_push
);
390 ptp_read_system_postts(sts
);
391 spin_unlock_irqrestore(&cpts
->lock
, flags
);
393 am65_cpts_fifo_read(cpts
);
395 am65_cpts_write32(cpts
, AM65_CPTS_INT_ENABLE_TS_PEND_EN
, int_enable
);
397 val
= cpts
->timestamp
;
402 static irqreturn_t
am65_cpts_interrupt(int irq
, void *dev_id
)
404 struct am65_cpts
*cpts
= dev_id
;
406 if (am65_cpts_fifo_read(cpts
))
407 dev_dbg(cpts
->dev
, "cpts: unable to obtain a time stamp\n");
412 /* PTP clock operations */
413 static int am65_cpts_ptp_adjfine(struct ptp_clock_info
*ptp
, long scaled_ppm
)
415 struct am65_cpts
*cpts
= container_of(ptp
, struct am65_cpts
, ptp_info
);
416 u32 estf_ctrl_val
= 0, estf_ppm_hi
= 0, estf_ppm_low
= 0;
417 s32 ppb
= scaled_ppm_to_ppb(scaled_ppm
);
418 int pps_index
= cpts
->pps_genf_idx
;
419 u64 adj_period
, pps_adj_period
;
420 u32 ctrl_val
, ppm_hi
, ppm_low
;
429 /* base freq = 1GHz = 1 000 000 000
430 * ppb_norm = ppb * base_freq / clock_freq;
431 * ppm_norm = ppb_norm / 1000
432 * adj_period = 1 000 000 / ppm_norm
433 * adj_period = 1 000 000 000 / ppb_norm
434 * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
435 * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
436 * adj_period = clock_freq / ppb
438 adj_period
= div_u64(cpts
->refclk_freq
, ppb
);
440 mutex_lock(&cpts
->ptp_clk_lock
);
442 ctrl_val
= am65_cpts_read32(cpts
, control
);
444 ctrl_val
|= AM65_CPTS_CONTROL_TS_PPM_DIR
;
446 ctrl_val
&= ~AM65_CPTS_CONTROL_TS_PPM_DIR
;
448 ppm_hi
= upper_32_bits(adj_period
) & 0x3FF;
449 ppm_low
= lower_32_bits(adj_period
);
451 if (cpts
->pps_enabled
) {
452 estf_ctrl_val
= am65_cpts_read32(cpts
, genf
[pps_index
].control
);
454 estf_ctrl_val
&= ~BIT(1);
456 estf_ctrl_val
|= BIT(1);
458 /* GenF PPM will do correction using cpts refclk tick which is
459 * (cpts->ts_add_val + 1) ns, so GenF length PPM adj period
460 * need to be corrected.
462 pps_adj_period
= adj_period
* (cpts
->ts_add_val
+ 1);
463 estf_ppm_hi
= upper_32_bits(pps_adj_period
) & 0x3FF;
464 estf_ppm_low
= lower_32_bits(pps_adj_period
);
467 spin_lock_irqsave(&cpts
->lock
, flags
);
469 /* All below writes must be done extremely fast:
470 * - delay between PPM dir and PPM value changes can cause err due old
471 * PPM correction applied in wrong direction
472 * - delay between CPTS-clock PPM cfg and GenF PPM cfg can cause err
473 * due CPTS-clock PPM working with new cfg while GenF PPM cfg still
474 * with old for short period of time
477 am65_cpts_write32(cpts
, ctrl_val
, control
);
478 am65_cpts_write32(cpts
, ppm_hi
, ts_ppm_hi
);
479 am65_cpts_write32(cpts
, ppm_low
, ts_ppm_low
);
481 if (cpts
->pps_enabled
) {
482 am65_cpts_write32(cpts
, estf_ctrl_val
, genf
[pps_index
].control
);
483 am65_cpts_write32(cpts
, estf_ppm_hi
, genf
[pps_index
].ppm_hi
);
484 am65_cpts_write32(cpts
, estf_ppm_low
, genf
[pps_index
].ppm_low
);
487 for (i
= 0; i
< AM65_CPTS_ESTF_MAX_NUM
; i
++) {
488 if (cpts
->estf_enable
& BIT(i
)) {
489 am65_cpts_write32(cpts
, estf_ctrl_val
, estf
[i
].control
);
490 am65_cpts_write32(cpts
, estf_ppm_hi
, estf
[i
].ppm_hi
);
491 am65_cpts_write32(cpts
, estf_ppm_low
, estf
[i
].ppm_low
);
494 /* All GenF/EstF can be updated here the same way */
495 spin_unlock_irqrestore(&cpts
->lock
, flags
);
497 mutex_unlock(&cpts
->ptp_clk_lock
);
502 static int am65_cpts_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
504 struct am65_cpts
*cpts
= container_of(ptp
, struct am65_cpts
, ptp_info
);
507 mutex_lock(&cpts
->ptp_clk_lock
);
508 ns
= am65_cpts_gettime(cpts
, NULL
);
510 am65_cpts_settime(cpts
, ns
);
511 mutex_unlock(&cpts
->ptp_clk_lock
);
516 static int am65_cpts_ptp_gettimex(struct ptp_clock_info
*ptp
,
517 struct timespec64
*ts
,
518 struct ptp_system_timestamp
*sts
)
520 struct am65_cpts
*cpts
= container_of(ptp
, struct am65_cpts
, ptp_info
);
523 mutex_lock(&cpts
->ptp_clk_lock
);
524 ns
= am65_cpts_gettime(cpts
, sts
);
525 mutex_unlock(&cpts
->ptp_clk_lock
);
526 *ts
= ns_to_timespec64(ns
);
531 u64
am65_cpts_ns_gettime(struct am65_cpts
*cpts
)
535 /* reuse ptp_clk_lock as it serialize ts push */
536 mutex_lock(&cpts
->ptp_clk_lock
);
537 ns
= am65_cpts_gettime(cpts
, NULL
);
538 mutex_unlock(&cpts
->ptp_clk_lock
);
542 EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime
);
544 static int am65_cpts_ptp_settime(struct ptp_clock_info
*ptp
,
545 const struct timespec64
*ts
)
547 struct am65_cpts
*cpts
= container_of(ptp
, struct am65_cpts
, ptp_info
);
550 ns
= timespec64_to_ns(ts
);
551 mutex_lock(&cpts
->ptp_clk_lock
);
552 am65_cpts_settime(cpts
, ns
);
553 mutex_unlock(&cpts
->ptp_clk_lock
);
558 static void am65_cpts_extts_enable_hw(struct am65_cpts
*cpts
, u32 index
, int on
)
562 v
= am65_cpts_read32(cpts
, control
);
564 v
|= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET
+ index
);
565 cpts
->hw_ts_enable
|= BIT(index
);
567 v
&= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET
+ index
);
568 cpts
->hw_ts_enable
&= ~BIT(index
);
570 am65_cpts_write32(cpts
, v
, control
);
573 static int am65_cpts_extts_enable(struct am65_cpts
*cpts
, u32 index
, int on
)
575 if (index
>= cpts
->ptp_info
.n_ext_ts
)
578 if (cpts
->pps_present
&& index
== cpts
->pps_hw_ts_idx
)
581 if (((cpts
->hw_ts_enable
& BIT(index
)) >> index
) == on
)
584 mutex_lock(&cpts
->ptp_clk_lock
);
585 am65_cpts_extts_enable_hw(cpts
, index
, on
);
586 mutex_unlock(&cpts
->ptp_clk_lock
);
588 dev_dbg(cpts
->dev
, "%s: ExtTS:%u %s\n",
589 __func__
, index
, on
? "enabled" : "disabled");
594 int am65_cpts_estf_enable(struct am65_cpts
*cpts
, int idx
,
595 struct am65_cpts_estf_cfg
*cfg
)
600 cycles
= cfg
->ns_period
* cpts
->refclk_freq
;
601 cycles
= DIV_ROUND_UP(cycles
, NSEC_PER_SEC
);
602 if (cycles
> U32_MAX
)
605 /* according to TRM should be zeroed */
606 am65_cpts_write32(cpts
, 0, estf
[idx
].length
);
608 val
= upper_32_bits(cfg
->ns_start
);
609 am65_cpts_write32(cpts
, val
, estf
[idx
].comp_hi
);
610 val
= lower_32_bits(cfg
->ns_start
);
611 am65_cpts_write32(cpts
, val
, estf
[idx
].comp_lo
);
612 val
= lower_32_bits(cycles
);
613 am65_cpts_write32(cpts
, val
, estf
[idx
].length
);
614 am65_cpts_write32(cpts
, 0, estf
[idx
].control
);
615 am65_cpts_write32(cpts
, 0, estf
[idx
].ppm_hi
);
616 am65_cpts_write32(cpts
, 0, estf
[idx
].ppm_low
);
618 cpts
->estf_enable
|= BIT(idx
);
620 dev_dbg(cpts
->dev
, "%s: ESTF:%u enabled\n", __func__
, idx
);
624 EXPORT_SYMBOL_GPL(am65_cpts_estf_enable
);
626 void am65_cpts_estf_disable(struct am65_cpts
*cpts
, int idx
)
628 am65_cpts_write32(cpts
, 0, estf
[idx
].length
);
629 cpts
->estf_enable
&= ~BIT(idx
);
631 dev_dbg(cpts
->dev
, "%s: ESTF:%u disabled\n", __func__
, idx
);
633 EXPORT_SYMBOL_GPL(am65_cpts_estf_disable
);
635 static void am65_cpts_perout_enable_hw(struct am65_cpts
*cpts
,
636 struct ptp_perout_request
*req
, int on
)
638 u64 ns_period
, ns_start
, cycles
;
639 struct timespec64 ts
;
643 ts
.tv_sec
= req
->period
.sec
;
644 ts
.tv_nsec
= req
->period
.nsec
;
645 ns_period
= timespec64_to_ns(&ts
);
647 cycles
= (ns_period
* cpts
->refclk_freq
) / NSEC_PER_SEC
;
649 ts
.tv_sec
= req
->start
.sec
;
650 ts
.tv_nsec
= req
->start
.nsec
;
651 ns_start
= timespec64_to_ns(&ts
);
653 val
= upper_32_bits(ns_start
);
654 am65_cpts_write32(cpts
, val
, genf
[req
->index
].comp_hi
);
655 val
= lower_32_bits(ns_start
);
656 am65_cpts_write32(cpts
, val
, genf
[req
->index
].comp_lo
);
657 val
= lower_32_bits(cycles
);
658 am65_cpts_write32(cpts
, val
, genf
[req
->index
].length
);
660 am65_cpts_write32(cpts
, 0, genf
[req
->index
].control
);
661 am65_cpts_write32(cpts
, 0, genf
[req
->index
].ppm_hi
);
662 am65_cpts_write32(cpts
, 0, genf
[req
->index
].ppm_low
);
664 cpts
->genf_enable
|= BIT(req
->index
);
666 am65_cpts_write32(cpts
, 0, genf
[req
->index
].length
);
668 cpts
->genf_enable
&= ~BIT(req
->index
);
672 static int am65_cpts_perout_enable(struct am65_cpts
*cpts
,
673 struct ptp_perout_request
*req
, int on
)
675 if (req
->index
>= cpts
->ptp_info
.n_per_out
)
678 if (cpts
->pps_present
&& req
->index
== cpts
->pps_genf_idx
)
681 if (!!(cpts
->genf_enable
& BIT(req
->index
)) == !!on
)
684 mutex_lock(&cpts
->ptp_clk_lock
);
685 am65_cpts_perout_enable_hw(cpts
, req
, on
);
686 mutex_unlock(&cpts
->ptp_clk_lock
);
688 dev_dbg(cpts
->dev
, "%s: GenF:%u %s\n",
689 __func__
, req
->index
, on
? "enabled" : "disabled");
694 static int am65_cpts_pps_enable(struct am65_cpts
*cpts
, int on
)
697 struct timespec64 ts
;
698 struct ptp_clock_request rq
;
701 if (!cpts
->pps_present
)
704 if (cpts
->pps_enabled
== !!on
)
707 mutex_lock(&cpts
->ptp_clk_lock
);
710 am65_cpts_extts_enable_hw(cpts
, cpts
->pps_hw_ts_idx
, on
);
712 ns
= am65_cpts_gettime(cpts
, NULL
);
713 ts
= ns_to_timespec64(ns
);
714 rq
.perout
.period
.sec
= 1;
715 rq
.perout
.period
.nsec
= 0;
716 rq
.perout
.start
.sec
= ts
.tv_sec
+ 2;
717 rq
.perout
.start
.nsec
= 0;
718 rq
.perout
.index
= cpts
->pps_genf_idx
;
720 am65_cpts_perout_enable_hw(cpts
, &rq
.perout
, on
);
721 cpts
->pps_enabled
= true;
723 rq
.perout
.index
= cpts
->pps_genf_idx
;
724 am65_cpts_perout_enable_hw(cpts
, &rq
.perout
, on
);
725 am65_cpts_extts_enable_hw(cpts
, cpts
->pps_hw_ts_idx
, on
);
726 cpts
->pps_enabled
= false;
729 mutex_unlock(&cpts
->ptp_clk_lock
);
731 dev_dbg(cpts
->dev
, "%s: pps: %s\n",
732 __func__
, on
? "enabled" : "disabled");
736 static int am65_cpts_ptp_enable(struct ptp_clock_info
*ptp
,
737 struct ptp_clock_request
*rq
, int on
)
739 struct am65_cpts
*cpts
= container_of(ptp
, struct am65_cpts
, ptp_info
);
742 case PTP_CLK_REQ_EXTTS
:
743 return am65_cpts_extts_enable(cpts
, rq
->extts
.index
, on
);
744 case PTP_CLK_REQ_PEROUT
:
745 return am65_cpts_perout_enable(cpts
, &rq
->perout
, on
);
746 case PTP_CLK_REQ_PPS
:
747 return am65_cpts_pps_enable(cpts
, on
);
755 static long am65_cpts_ts_work(struct ptp_clock_info
*ptp
);
757 static struct ptp_clock_info am65_ptp_info
= {
758 .owner
= THIS_MODULE
,
759 .name
= "CTPS timer",
760 .adjfine
= am65_cpts_ptp_adjfine
,
761 .adjtime
= am65_cpts_ptp_adjtime
,
762 .gettimex64
= am65_cpts_ptp_gettimex
,
763 .settime64
= am65_cpts_ptp_settime
,
764 .enable
= am65_cpts_ptp_enable
,
765 .do_aux_work
= am65_cpts_ts_work
,
768 static bool am65_cpts_match_tx_ts(struct am65_cpts
*cpts
,
769 struct am65_cpts_event
*event
)
771 struct sk_buff_head txq_list
;
772 struct sk_buff
*skb
, *tmp
;
777 mtype_seqid
= event
->event1
&
778 (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK
|
779 AM65_CPTS_EVENT_1_EVENT_TYPE_MASK
|
780 AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK
);
782 __skb_queue_head_init(&txq_list
);
784 spin_lock_irqsave(&cpts
->txq
.lock
, flags
);
785 skb_queue_splice_init(&cpts
->txq
, &txq_list
);
786 spin_unlock_irqrestore(&cpts
->txq
.lock
, flags
);
788 /* no need to grab txq.lock as access is always done under cpts->lock */
789 skb_queue_walk_safe(&txq_list
, skb
, tmp
) {
790 struct skb_shared_hwtstamps ssh
;
791 struct am65_cpts_skb_cb_data
*skb_cb
=
792 (struct am65_cpts_skb_cb_data
*)skb
->cb
;
794 if ((ptp_classify_raw(skb
) & PTP_CLASS_V1
) &&
795 ((mtype_seqid
& AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK
) ==
796 (skb_cb
->skb_mtype_seqid
& AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK
)))
797 mtype_seqid
= skb_cb
->skb_mtype_seqid
;
799 if (mtype_seqid
== skb_cb
->skb_mtype_seqid
) {
800 u64 ns
= event
->timestamp
;
802 memset(&ssh
, 0, sizeof(ssh
));
803 ssh
.hwtstamp
= ns_to_ktime(ns
);
804 skb_tstamp_tx(skb
, &ssh
);
806 __skb_unlink(skb
, &txq_list
);
807 dev_consume_skb_any(skb
);
809 "match tx timestamp mtype_seqid %08x\n",
814 if (time_after(jiffies
, skb_cb
->tmo
)) {
815 /* timeout any expired skbs over 100 ms */
817 "expiring tx timestamp mtype_seqid %08x\n",
819 __skb_unlink(skb
, &txq_list
);
820 dev_consume_skb_any(skb
);
824 spin_lock_irqsave(&cpts
->txq
.lock
, flags
);
825 skb_queue_splice(&txq_list
, &cpts
->txq
);
826 spin_unlock_irqrestore(&cpts
->txq
.lock
, flags
);
831 static void am65_cpts_find_ts(struct am65_cpts
*cpts
)
833 struct am65_cpts_event
*event
;
834 struct list_head
*this, *next
;
835 LIST_HEAD(events_free
);
839 spin_lock_irqsave(&cpts
->lock
, flags
);
840 list_splice_init(&cpts
->events
, &events
);
841 spin_unlock_irqrestore(&cpts
->lock
, flags
);
843 list_for_each_safe(this, next
, &events
) {
844 event
= list_entry(this, struct am65_cpts_event
, list
);
845 if (am65_cpts_match_tx_ts(cpts
, event
) ||
846 time_after(jiffies
, event
->tmo
)) {
847 list_del_init(&event
->list
);
848 list_add(&event
->list
, &events_free
);
852 spin_lock_irqsave(&cpts
->lock
, flags
);
853 list_splice_tail(&events
, &cpts
->events
);
854 list_splice_tail(&events_free
, &cpts
->pool
);
855 spin_unlock_irqrestore(&cpts
->lock
, flags
);
858 static long am65_cpts_ts_work(struct ptp_clock_info
*ptp
)
860 struct am65_cpts
*cpts
= container_of(ptp
, struct am65_cpts
, ptp_info
);
864 am65_cpts_find_ts(cpts
);
866 spin_lock_irqsave(&cpts
->txq
.lock
, flags
);
867 if (!skb_queue_empty(&cpts
->txq
))
868 delay
= AM65_CPTS_SKB_TX_WORK_TIMEOUT
;
869 spin_unlock_irqrestore(&cpts
->txq
.lock
, flags
);
874 static int am65_skb_get_mtype_seqid(struct sk_buff
*skb
, u32
*mtype_seqid
)
876 unsigned int ptp_class
= ptp_classify_raw(skb
);
877 struct ptp_header
*hdr
;
881 if (ptp_class
== PTP_CLASS_NONE
)
884 hdr
= ptp_parse_header(skb
, ptp_class
);
888 msgtype
= ptp_get_msgtype(hdr
, ptp_class
);
889 seqid
= ntohs(hdr
->sequence_id
);
891 *mtype_seqid
= (msgtype
<< AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT
) &
892 AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK
;
893 *mtype_seqid
|= (seqid
& AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK
);
898 static u64
am65_cpts_find_rx_ts(struct am65_cpts
*cpts
, u32 skb_mtype_seqid
)
900 struct list_head
*this, *next
;
901 struct am65_cpts_event
*event
;
906 spin_lock_irqsave(&cpts
->lock
, flags
);
907 __am65_cpts_fifo_read(cpts
);
908 list_for_each_safe(this, next
, &cpts
->events
) {
909 event
= list_entry(this, struct am65_cpts_event
, list
);
910 if (time_after(jiffies
, event
->tmo
)) {
911 list_move(&event
->list
, &cpts
->pool
);
915 mtype_seqid
= event
->event1
&
916 (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK
|
917 AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK
|
918 AM65_CPTS_EVENT_1_EVENT_TYPE_MASK
);
920 if (mtype_seqid
== skb_mtype_seqid
) {
921 ns
= event
->timestamp
;
922 list_move(&event
->list
, &cpts
->pool
);
926 spin_unlock_irqrestore(&cpts
->lock
, flags
);
931 void am65_cpts_rx_timestamp(struct am65_cpts
*cpts
, struct sk_buff
*skb
)
933 struct am65_cpts_skb_cb_data
*skb_cb
= (struct am65_cpts_skb_cb_data
*)skb
->cb
;
934 struct skb_shared_hwtstamps
*ssh
;
938 /* am65_cpts_rx_timestamp() is called before eth_type_trans(), so
939 * skb MAC Hdr properties are not configured yet. Hence need to
940 * reset skb MAC header here
942 skb_reset_mac_header(skb
);
943 ret
= am65_skb_get_mtype_seqid(skb
, &skb_cb
->skb_mtype_seqid
);
945 return; /* if not PTP class packet */
947 skb_cb
->skb_mtype_seqid
|= (AM65_CPTS_EV_RX
<< AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT
);
949 dev_dbg(cpts
->dev
, "%s mtype seqid %08x\n", __func__
, skb_cb
->skb_mtype_seqid
);
951 ns
= am65_cpts_find_rx_ts(cpts
, skb_cb
->skb_mtype_seqid
);
955 ssh
= skb_hwtstamps(skb
);
956 memset(ssh
, 0, sizeof(*ssh
));
957 ssh
->hwtstamp
= ns_to_ktime(ns
);
959 EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp
);
962 * am65_cpts_tx_timestamp - save tx packet for timestamping
966 * This functions saves tx packet for timestamping if packet can be timestamped.
967 * The future processing is done in from PTP auxiliary worker.
969 void am65_cpts_tx_timestamp(struct am65_cpts
*cpts
, struct sk_buff
*skb
)
971 struct am65_cpts_skb_cb_data
*skb_cb
= (void *)skb
->cb
;
973 if (!(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
976 /* add frame to queue for processing later.
977 * The periodic FIFO check will handle this.
980 /* get the timestamp for timeouts */
981 skb_cb
->tmo
= jiffies
+ msecs_to_jiffies(100);
982 skb_queue_tail(&cpts
->txq
, skb
);
983 ptp_schedule_worker(cpts
->ptp_clock
, 0);
985 EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp
);
988 * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
992 * This functions should be called from .xmit().
993 * It checks if packet can be timestamped, fills internal cpts data
994 * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
996 void am65_cpts_prep_tx_timestamp(struct am65_cpts
*cpts
, struct sk_buff
*skb
)
998 struct am65_cpts_skb_cb_data
*skb_cb
= (void *)skb
->cb
;
1001 if (!(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
))
1004 ret
= am65_skb_get_mtype_seqid(skb
, &skb_cb
->skb_mtype_seqid
);
1007 skb_cb
->skb_mtype_seqid
|= (AM65_CPTS_EV_TX
<<
1008 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT
);
1010 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1012 EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp
);
1014 int am65_cpts_phc_index(struct am65_cpts
*cpts
)
1016 return cpts
->phc_index
;
1018 EXPORT_SYMBOL_GPL(am65_cpts_phc_index
);
1020 static void cpts_free_clk_mux(void *data
)
1022 struct am65_cpts
*cpts
= data
;
1024 of_clk_del_provider(cpts
->clk_mux_np
);
1025 clk_hw_unregister_mux(cpts
->clk_mux_hw
);
1026 of_node_put(cpts
->clk_mux_np
);
1029 static int cpts_of_mux_clk_setup(struct am65_cpts
*cpts
,
1030 struct device_node
*node
)
1032 unsigned int num_parents
;
1033 const char **parent_names
;
1038 cpts
->clk_mux_np
= of_get_child_by_name(node
, "refclk-mux");
1039 if (!cpts
->clk_mux_np
)
1042 num_parents
= of_clk_get_parent_count(cpts
->clk_mux_np
);
1043 if (num_parents
< 1) {
1044 dev_err(cpts
->dev
, "mux-clock %pOF must have parents\n",
1049 parent_names
= devm_kcalloc(cpts
->dev
, sizeof(char *), num_parents
,
1051 if (!parent_names
) {
1056 of_clk_parent_fill(cpts
->clk_mux_np
, parent_names
, num_parents
);
1058 clk_mux_name
= devm_kasprintf(cpts
->dev
, GFP_KERNEL
, "%s.%pOFn",
1059 dev_name(cpts
->dev
), cpts
->clk_mux_np
);
1060 if (!clk_mux_name
) {
1065 reg
= &cpts
->reg
->rftclk_sel
;
1066 /* dev must be NULL to avoid recursive incrementing
1069 cpts
->clk_mux_hw
= clk_hw_register_mux(NULL
, clk_mux_name
,
1070 parent_names
, num_parents
,
1071 0, reg
, 0, 5, 0, NULL
);
1072 if (IS_ERR(cpts
->clk_mux_hw
)) {
1073 ret
= PTR_ERR(cpts
->clk_mux_hw
);
1077 ret
= of_clk_add_hw_provider(cpts
->clk_mux_np
, of_clk_hw_simple_get
,
1080 goto clk_hw_register
;
1082 ret
= devm_add_action_or_reset(cpts
->dev
, cpts_free_clk_mux
, cpts
);
1084 dev_err(cpts
->dev
, "failed to add clkmux reset action %d", ret
);
1089 clk_hw_unregister_mux(cpts
->clk_mux_hw
);
1091 of_node_put(cpts
->clk_mux_np
);
1095 static int am65_cpts_of_parse(struct am65_cpts
*cpts
, struct device_node
*node
)
1099 if (!of_property_read_u32(node
, "ti,cpts-ext-ts-inputs", &prop
[0]))
1100 cpts
->ext_ts_inputs
= prop
[0];
1102 if (!of_property_read_u32(node
, "ti,cpts-periodic-outputs", &prop
[0]))
1103 cpts
->genf_num
= prop
[0];
1105 if (!of_property_read_u32_array(node
, "ti,pps", prop
, 2)) {
1106 cpts
->pps_present
= true;
1109 dev_err(cpts
->dev
, "invalid HWx_TS_PUSH index: %u provided\n", prop
[0]);
1110 cpts
->pps_present
= false;
1113 dev_err(cpts
->dev
, "invalid GENFy index: %u provided\n", prop
[1]);
1114 cpts
->pps_present
= false;
1116 if (cpts
->pps_present
) {
1117 cpts
->pps_hw_ts_idx
= prop
[0];
1118 cpts
->pps_genf_idx
= prop
[1];
1122 return cpts_of_mux_clk_setup(cpts
, node
);
1125 void am65_cpts_release(struct am65_cpts
*cpts
)
1127 ptp_clock_unregister(cpts
->ptp_clock
);
1128 am65_cpts_disable(cpts
);
1129 clk_disable_unprepare(cpts
->refclk
);
1131 EXPORT_SYMBOL_GPL(am65_cpts_release
);
1133 struct am65_cpts
*am65_cpts_create(struct device
*dev
, void __iomem
*regs
,
1134 struct device_node
*node
)
1136 struct am65_cpts
*cpts
;
1139 cpts
= devm_kzalloc(dev
, sizeof(*cpts
), GFP_KERNEL
);
1141 return ERR_PTR(-ENOMEM
);
1144 cpts
->reg
= (struct am65_cpts_regs __iomem
*)regs
;
1146 cpts
->irq
= of_irq_get_byname(node
, "cpts");
1147 if (cpts
->irq
<= 0) {
1148 ret
= cpts
->irq
?: -ENXIO
;
1149 dev_err_probe(dev
, ret
, "Failed to get IRQ number\n");
1150 return ERR_PTR(ret
);
1153 ret
= am65_cpts_of_parse(cpts
, node
);
1155 return ERR_PTR(ret
);
1157 mutex_init(&cpts
->ptp_clk_lock
);
1158 INIT_LIST_HEAD(&cpts
->events
);
1159 INIT_LIST_HEAD(&cpts
->pool
);
1160 spin_lock_init(&cpts
->lock
);
1161 skb_queue_head_init(&cpts
->txq
);
1163 for (i
= 0; i
< AM65_CPTS_MAX_EVENTS
; i
++)
1164 list_add(&cpts
->pool_data
[i
].list
, &cpts
->pool
);
1166 cpts
->refclk
= devm_get_clk_from_child(dev
, node
, "cpts");
1167 if (IS_ERR(cpts
->refclk
)) {
1168 ret
= PTR_ERR(cpts
->refclk
);
1169 dev_err_probe(dev
, ret
, "Failed to get refclk\n");
1170 return ERR_PTR(ret
);
1173 ret
= clk_prepare_enable(cpts
->refclk
);
1175 dev_err(dev
, "Failed to enable refclk %d\n", ret
);
1176 return ERR_PTR(ret
);
1179 cpts
->refclk_freq
= clk_get_rate(cpts
->refclk
);
1181 am65_ptp_info
.max_adj
= cpts
->refclk_freq
/ AM65_CPTS_MIN_PPM
;
1182 cpts
->ptp_info
= am65_ptp_info
;
1184 if (cpts
->ext_ts_inputs
)
1185 cpts
->ptp_info
.n_ext_ts
= cpts
->ext_ts_inputs
;
1187 cpts
->ptp_info
.n_per_out
= cpts
->genf_num
;
1188 if (cpts
->pps_present
)
1189 cpts
->ptp_info
.pps
= 1;
1191 am65_cpts_set_add_val(cpts
);
1193 am65_cpts_write32(cpts
, AM65_CPTS_CONTROL_EN
|
1194 AM65_CPTS_CONTROL_64MODE
|
1195 AM65_CPTS_CONTROL_TX_GENF_CLR_EN
,
1197 am65_cpts_write32(cpts
, AM65_CPTS_INT_ENABLE_TS_PEND_EN
, int_enable
);
1199 /* set time to the current system time */
1200 am65_cpts_settime(cpts
, ktime_to_ns(ktime_get_real()));
1202 cpts
->ptp_clock
= ptp_clock_register(&cpts
->ptp_info
, cpts
->dev
);
1203 if (IS_ERR_OR_NULL(cpts
->ptp_clock
)) {
1204 dev_err(dev
, "Failed to register ptp clk %ld\n",
1205 PTR_ERR(cpts
->ptp_clock
));
1206 ret
= cpts
->ptp_clock
? PTR_ERR(cpts
->ptp_clock
) : -ENODEV
;
1207 goto refclk_disable
;
1209 cpts
->phc_index
= ptp_clock_index(cpts
->ptp_clock
);
1211 ret
= devm_request_threaded_irq(dev
, cpts
->irq
, NULL
,
1212 am65_cpts_interrupt
,
1213 IRQF_ONESHOT
, dev_name(dev
), cpts
);
1215 dev_err(cpts
->dev
, "error attaching irq %d\n", ret
);
1219 dev_info(dev
, "CPTS ver 0x%08x, freq:%u, add_val:%u pps:%d\n",
1220 am65_cpts_read32(cpts
, idver
),
1221 cpts
->refclk_freq
, cpts
->ts_add_val
, cpts
->pps_present
);
1226 am65_cpts_release(cpts
);
1228 clk_disable_unprepare(cpts
->refclk
);
1229 return ERR_PTR(ret
);
1231 EXPORT_SYMBOL_GPL(am65_cpts_create
);
1233 void am65_cpts_suspend(struct am65_cpts
*cpts
)
1235 /* save state and disable CPTS */
1236 cpts
->sr_control
= am65_cpts_read32(cpts
, control
);
1237 cpts
->sr_int_enable
= am65_cpts_read32(cpts
, int_enable
);
1238 cpts
->sr_rftclk_sel
= am65_cpts_read32(cpts
, rftclk_sel
);
1239 cpts
->sr_ts_ppm_hi
= am65_cpts_read32(cpts
, ts_ppm_hi
);
1240 cpts
->sr_ts_ppm_low
= am65_cpts_read32(cpts
, ts_ppm_low
);
1241 cpts
->sr_cpts_ns
= am65_cpts_gettime(cpts
, NULL
);
1242 cpts
->sr_ktime_ns
= ktime_to_ns(ktime_get_real());
1243 am65_cpts_disable(cpts
);
1244 clk_disable(cpts
->refclk
);
1246 /* Save GENF state */
1247 memcpy_fromio(&cpts
->sr_genf
, &cpts
->reg
->genf
, sizeof(cpts
->sr_genf
));
1249 /* Save ESTF state */
1250 memcpy_fromio(&cpts
->sr_estf
, &cpts
->reg
->estf
, sizeof(cpts
->sr_estf
));
1252 EXPORT_SYMBOL_GPL(am65_cpts_suspend
);
1254 void am65_cpts_resume(struct am65_cpts
*cpts
)
1259 /* restore state and enable CPTS */
1260 clk_enable(cpts
->refclk
);
1261 am65_cpts_write32(cpts
, cpts
->sr_rftclk_sel
, rftclk_sel
);
1262 am65_cpts_set_add_val(cpts
);
1263 am65_cpts_write32(cpts
, cpts
->sr_control
, control
);
1264 am65_cpts_write32(cpts
, cpts
->sr_int_enable
, int_enable
);
1266 /* Restore time to saved CPTS time + time in suspend/resume */
1267 ktime_ns
= ktime_to_ns(ktime_get_real());
1268 ktime_ns
-= cpts
->sr_ktime_ns
;
1269 am65_cpts_settime(cpts
, cpts
->sr_cpts_ns
+ ktime_ns
);
1271 /* Restore compensation (PPM) */
1272 am65_cpts_write32(cpts
, cpts
->sr_ts_ppm_hi
, ts_ppm_hi
);
1273 am65_cpts_write32(cpts
, cpts
->sr_ts_ppm_low
, ts_ppm_low
);
1275 /* Restore GENF state */
1276 for (i
= 0; i
< AM65_CPTS_GENF_MAX_NUM
; i
++) {
1277 am65_cpts_write32(cpts
, 0, genf
[i
].length
); /* TRM sequence */
1278 am65_cpts_write32(cpts
, cpts
->sr_genf
[i
].comp_hi
, genf
[i
].comp_hi
);
1279 am65_cpts_write32(cpts
, cpts
->sr_genf
[i
].comp_lo
, genf
[i
].comp_lo
);
1280 am65_cpts_write32(cpts
, cpts
->sr_genf
[i
].length
, genf
[i
].length
);
1281 am65_cpts_write32(cpts
, cpts
->sr_genf
[i
].control
, genf
[i
].control
);
1282 am65_cpts_write32(cpts
, cpts
->sr_genf
[i
].ppm_hi
, genf
[i
].ppm_hi
);
1283 am65_cpts_write32(cpts
, cpts
->sr_genf
[i
].ppm_low
, genf
[i
].ppm_low
);
1286 /* Restore ESTTF state */
1287 for (i
= 0; i
< AM65_CPTS_ESTF_MAX_NUM
; i
++) {
1288 am65_cpts_write32(cpts
, 0, estf
[i
].length
); /* TRM sequence */
1289 am65_cpts_write32(cpts
, cpts
->sr_estf
[i
].comp_hi
, estf
[i
].comp_hi
);
1290 am65_cpts_write32(cpts
, cpts
->sr_estf
[i
].comp_lo
, estf
[i
].comp_lo
);
1291 am65_cpts_write32(cpts
, cpts
->sr_estf
[i
].length
, estf
[i
].length
);
1292 am65_cpts_write32(cpts
, cpts
->sr_estf
[i
].control
, estf
[i
].control
);
1293 am65_cpts_write32(cpts
, cpts
->sr_estf
[i
].ppm_hi
, estf
[i
].ppm_hi
);
1294 am65_cpts_write32(cpts
, cpts
->sr_estf
[i
].ppm_low
, estf
[i
].ppm_low
);
1297 EXPORT_SYMBOL_GPL(am65_cpts_resume
);
1299 static int am65_cpts_probe(struct platform_device
*pdev
)
1301 struct device_node
*node
= pdev
->dev
.of_node
;
1302 struct device
*dev
= &pdev
->dev
;
1303 struct am65_cpts
*cpts
;
1306 base
= devm_platform_ioremap_resource_byname(pdev
, "cpts");
1308 return PTR_ERR(base
);
1310 cpts
= am65_cpts_create(dev
, base
, node
);
1311 return PTR_ERR_OR_ZERO(cpts
);
1314 static const struct of_device_id am65_cpts_of_match
[] = {
1315 { .compatible
= "ti,am65-cpts", },
1316 { .compatible
= "ti,j721e-cpts", },
1319 MODULE_DEVICE_TABLE(of
, am65_cpts_of_match
);
1321 static struct platform_driver am65_cpts_driver
= {
1322 .probe
= am65_cpts_probe
,
1324 .name
= "am65-cpts",
1325 .of_match_table
= am65_cpts_of_match
,
1328 module_platform_driver(am65_cpts_driver
);
1330 MODULE_LICENSE("GPL v2");
1331 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
1332 MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");