4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
13 #include "greybus_trace.h"
16 * Minimum inter-strobe value of one millisecond is chosen because it
17 * just-about fits the common definition of a jiffy.
19 * Maximum value OTOH is constrained by the number of bits the SVC can fit
20 * into a 16 bit up-counter. The SVC configures the timer in microseconds
21 * so the maximum allowable value is 65535 microseconds. We clip that value
22 * to 10000 microseconds for the sake of using nice round base 10 numbers
23 * and since right-now there's no imaginable use-case requiring anything
24 * other than a one millisecond inter-strobe time, let alone something
25 * higher than ten milliseconds.
27 #define GB_TIMESYNC_STROBE_DELAY_US 1000
28 #define GB_TIMESYNC_DEFAULT_OFFSET_US 1000
30 /* Work queue timers long, short and SVC strobe timeout */
31 #define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(10)
32 #define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1)
33 #define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000)
34 #define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000)
35 #define GB_TIMESYNC_MAX_KTIME_CONVERSION 15
37 /* Maximum number of times we'll retry a failed synchronous sync */
38 #define GB_TIMESYNC_MAX_RETRIES 5
40 /* Reported nanoseconds/femtoseconds per clock */
41 static u64 gb_timesync_ns_per_clock
;
42 static u64 gb_timesync_fs_per_clock
;
44 /* Maximum difference we will accept converting FrameTime to ktime */
45 static u32 gb_timesync_max_ktime_diff
;
47 /* Reported clock rate */
48 static unsigned long gb_timesync_clock_rate
;
51 static void gb_timesync_worker(struct work_struct
*work
);
53 /* List of SVCs with one FrameTime per SVC */
54 static LIST_HEAD(gb_timesync_svc_list
);
56 /* Synchronize parallel contexts accessing a valid timesync_svc pointer */
57 static DEFINE_MUTEX(gb_timesync_svc_list_mutex
);
59 /* Structure to convert from FrameTime to timespec/ktime */
60 struct gb_timesync_frame_time_data
{
65 struct gb_timesync_svc
{
66 struct list_head list
;
67 struct list_head interface_list
;
69 struct gb_timesync_host_device
*timesync_hd
;
71 spinlock_t spinlock
; /* Per SVC spinlock to sync with ISR */
72 struct mutex mutex
; /* Per SVC mutex for regular synchronization */
74 struct dentry
*frame_time_dentry
;
75 struct dentry
*frame_ktime_dentry
;
76 struct workqueue_struct
*work_queue
;
77 wait_queue_head_t wait_queue
;
78 struct delayed_work delayed_work
;
79 struct timer_list ktime_timer
;
81 /* The current local FrameTime */
82 u64 frame_time_offset
;
83 struct gb_timesync_frame_time_data strobe_data
[GB_TIMESYNC_MAX_STROBES
];
84 struct gb_timesync_frame_time_data ktime_data
;
86 /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
87 u64 svc_ping_frame_time
;
88 u64 ap_ping_frame_time
;
90 /* Transitory settings */
101 struct gb_timesync_host_device
{
102 struct list_head list
;
103 struct gb_host_device
*hd
;
107 struct gb_timesync_interface
{
108 struct list_head list
;
109 struct gb_interface
*interface
;
113 enum gb_timesync_state
{
114 GB_TIMESYNC_STATE_INVALID
= 0,
115 GB_TIMESYNC_STATE_INACTIVE
= 1,
116 GB_TIMESYNC_STATE_INIT
= 2,
117 GB_TIMESYNC_STATE_WAIT_SVC
= 3,
118 GB_TIMESYNC_STATE_AUTHORITATIVE
= 4,
119 GB_TIMESYNC_STATE_PING
= 5,
120 GB_TIMESYNC_STATE_ACTIVE
= 6,
123 static void gb_timesync_ktime_timer_fn(unsigned long data
);
125 static u64
gb_timesync_adjust_count(struct gb_timesync_svc
*timesync_svc
,
128 if (timesync_svc
->offset_down
)
129 return counts
- timesync_svc
->frame_time_offset
;
131 return counts
+ timesync_svc
->frame_time_offset
;
135 * This function provides the authoritative FrameTime to a calling function. It
136 * is designed to be lockless and should remain that way the caller is assumed
139 static u64
__gb_timesync_get_frame_time(struct gb_timesync_svc
*timesync_svc
)
141 u64 clocks
= gb_timesync_platform_get_counter();
143 return gb_timesync_adjust_count(timesync_svc
, clocks
);
146 static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
149 queue_delayed_work(timesync_svc
->work_queue
,
150 ×ync_svc
->delayed_work
,
151 GB_TIMESYNC_MAX_WAIT_SVC
);
154 static void gb_timesync_set_state(struct gb_timesync_svc
*timesync_svc
,
158 case GB_TIMESYNC_STATE_INVALID
:
159 timesync_svc
->state
= state
;
160 wake_up(×ync_svc
->wait_queue
);
162 case GB_TIMESYNC_STATE_INACTIVE
:
163 timesync_svc
->state
= state
;
164 wake_up(×ync_svc
->wait_queue
);
166 case GB_TIMESYNC_STATE_INIT
:
167 if (timesync_svc
->state
!= GB_TIMESYNC_STATE_INVALID
) {
168 timesync_svc
->strobe
= 0;
169 timesync_svc
->frame_time_offset
= 0;
170 timesync_svc
->state
= state
;
171 cancel_delayed_work(×ync_svc
->delayed_work
);
172 queue_delayed_work(timesync_svc
->work_queue
,
173 ×ync_svc
->delayed_work
,
174 GB_TIMESYNC_DELAYED_WORK_LONG
);
177 case GB_TIMESYNC_STATE_WAIT_SVC
:
178 if (timesync_svc
->state
== GB_TIMESYNC_STATE_INIT
)
179 timesync_svc
->state
= state
;
181 case GB_TIMESYNC_STATE_AUTHORITATIVE
:
182 if (timesync_svc
->state
== GB_TIMESYNC_STATE_WAIT_SVC
) {
183 timesync_svc
->state
= state
;
184 cancel_delayed_work(×ync_svc
->delayed_work
);
185 queue_delayed_work(timesync_svc
->work_queue
,
186 ×ync_svc
->delayed_work
, 0);
189 case GB_TIMESYNC_STATE_PING
:
190 if (timesync_svc
->state
== GB_TIMESYNC_STATE_ACTIVE
) {
191 timesync_svc
->state
= state
;
192 queue_delayed_work(timesync_svc
->work_queue
,
193 ×ync_svc
->delayed_work
,
194 GB_TIMESYNC_DELAYED_WORK_SHORT
);
197 case GB_TIMESYNC_STATE_ACTIVE
:
198 if (timesync_svc
->state
== GB_TIMESYNC_STATE_AUTHORITATIVE
||
199 timesync_svc
->state
== GB_TIMESYNC_STATE_PING
) {
200 timesync_svc
->state
= state
;
201 wake_up(×ync_svc
->wait_queue
);
206 if (WARN_ON(timesync_svc
->state
!= state
)) {
207 pr_err("Invalid state transition %d=>%d\n",
208 timesync_svc
->state
, state
);
212 static void gb_timesync_set_state_atomic(struct gb_timesync_svc
*timesync_svc
,
217 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
218 gb_timesync_set_state(timesync_svc
, state
);
219 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
222 static u64
gb_timesync_diff(u64 x
, u64 y
)
230 static void gb_timesync_adjust_to_svc(struct gb_timesync_svc
*svc
,
231 u64 svc_frame_time
, u64 ap_frame_time
)
233 if (svc_frame_time
> ap_frame_time
) {
234 svc
->frame_time_offset
= svc_frame_time
- ap_frame_time
;
235 svc
->offset_down
= false;
237 svc
->frame_time_offset
= ap_frame_time
- svc_frame_time
;
238 svc
->offset_down
= true;
243 * Associate a FrameTime with a ktime timestamp represented as struct timespec
244 * Requires the calling context to hold timesync_svc->mutex
246 static void gb_timesync_store_ktime(struct gb_timesync_svc
*timesync_svc
,
247 struct timespec ts
, u64 frame_time
)
249 timesync_svc
->ktime_data
.ts
= ts
;
250 timesync_svc
->ktime_data
.frame_time
= frame_time
;
254 * Find the two pulses that best-match our expected inter-strobe gap and
255 * then calculate the difference between the SVC time at the second pulse
256 * to the local time at the second pulse.
258 static void gb_timesync_collate_frame_time(struct gb_timesync_svc
*timesync_svc
,
262 u64 delta
, ap_frame_time
;
263 u64 strobe_delay_ns
= GB_TIMESYNC_STROBE_DELAY_US
* NSEC_PER_USEC
;
266 for (i
= 1; i
< GB_TIMESYNC_MAX_STROBES
; i
++) {
267 delta
= timesync_svc
->strobe_data
[i
].frame_time
-
268 timesync_svc
->strobe_data
[i
- 1].frame_time
;
269 delta
*= gb_timesync_ns_per_clock
;
270 delta
= gb_timesync_diff(delta
, strobe_delay_ns
);
272 if (!least
|| delta
< least
) {
274 gb_timesync_adjust_to_svc(timesync_svc
, frame_time
[i
],
275 timesync_svc
->strobe_data
[i
].frame_time
);
277 ap_frame_time
= timesync_svc
->strobe_data
[i
].frame_time
;
278 ap_frame_time
= gb_timesync_adjust_count(timesync_svc
,
280 gb_timesync_store_ktime(timesync_svc
,
281 timesync_svc
->strobe_data
[i
].ts
,
284 pr_debug("adjust %s local %llu svc %llu delta %llu\n",
285 timesync_svc
->offset_down
? "down" : "up",
286 timesync_svc
->strobe_data
[i
].frame_time
,
287 frame_time
[i
], delta
);
292 static void gb_timesync_teardown(struct gb_timesync_svc
*timesync_svc
)
294 struct gb_timesync_interface
*timesync_interface
;
295 struct gb_svc
*svc
= timesync_svc
->svc
;
296 struct gb_interface
*interface
;
297 struct gb_host_device
*hd
;
300 list_for_each_entry(timesync_interface
,
301 ×ync_svc
->interface_list
, list
) {
302 interface
= timesync_interface
->interface
;
303 ret
= gb_interface_timesync_disable(interface
);
305 dev_err(&interface
->dev
,
306 "interface timesync_disable %d\n", ret
);
310 hd
= timesync_svc
->timesync_hd
->hd
;
311 ret
= hd
->driver
->timesync_disable(hd
);
313 dev_err(&hd
->dev
, "host timesync_disable %d\n",
317 gb_svc_timesync_wake_pins_release(svc
);
318 gb_svc_timesync_disable(svc
);
319 gb_timesync_platform_unlock_bus();
321 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_INACTIVE
);
324 static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
325 *timesync_svc
, int ret
)
327 if (ret
== -EAGAIN
) {
328 gb_timesync_set_state(timesync_svc
, timesync_svc
->state
);
330 pr_err("Failed to lock timesync bus %d\n", ret
);
331 gb_timesync_set_state(timesync_svc
, GB_TIMESYNC_STATE_INACTIVE
);
335 static void gb_timesync_enable(struct gb_timesync_svc
*timesync_svc
)
337 struct gb_svc
*svc
= timesync_svc
->svc
;
338 struct gb_host_device
*hd
;
339 struct gb_timesync_interface
*timesync_interface
;
340 struct gb_interface
*interface
;
342 unsigned long clock_rate
= gb_timesync_clock_rate
;
346 * Get access to the wake pins in the AP and SVC
347 * Release these pins either in gb_timesync_teardown() or in
348 * gb_timesync_authoritative()
350 ret
= gb_timesync_platform_lock_bus(timesync_svc
);
352 gb_timesync_platform_lock_bus_fail(timesync_svc
, ret
);
355 ret
= gb_svc_timesync_wake_pins_acquire(svc
, timesync_svc
->strobe_mask
);
358 "gb_svc_timesync_wake_pins_acquire %d\n", ret
);
359 gb_timesync_teardown(timesync_svc
);
363 /* Choose an initial time in the future */
364 init_frame_time
= __gb_timesync_get_frame_time(timesync_svc
) + 100000UL;
366 /* Send enable command to all relevant participants */
367 list_for_each_entry(timesync_interface
, ×ync_svc
->interface_list
,
369 interface
= timesync_interface
->interface
;
370 ret
= gb_interface_timesync_enable(interface
,
371 GB_TIMESYNC_MAX_STROBES
,
373 GB_TIMESYNC_STROBE_DELAY_US
,
376 dev_err(&interface
->dev
,
377 "interface timesync_enable %d\n", ret
);
381 hd
= timesync_svc
->timesync_hd
->hd
;
382 ret
= hd
->driver
->timesync_enable(hd
, GB_TIMESYNC_MAX_STROBES
,
384 GB_TIMESYNC_STROBE_DELAY_US
,
387 dev_err(&hd
->dev
, "host timesync_enable %d\n",
391 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_WAIT_SVC
);
392 ret
= gb_svc_timesync_enable(svc
, GB_TIMESYNC_MAX_STROBES
,
394 GB_TIMESYNC_STROBE_DELAY_US
,
398 "gb_svc_timesync_enable %d\n", ret
);
399 gb_timesync_teardown(timesync_svc
);
403 /* Schedule a timeout waiting for SVC to complete strobing */
404 gb_timesync_schedule_svc_timeout(timesync_svc
);
407 static void gb_timesync_authoritative(struct gb_timesync_svc
*timesync_svc
)
409 struct gb_svc
*svc
= timesync_svc
->svc
;
410 struct gb_host_device
*hd
;
411 struct gb_timesync_interface
*timesync_interface
;
412 struct gb_interface
*interface
;
413 u64 svc_frame_time
[GB_TIMESYNC_MAX_STROBES
];
416 /* Get authoritative time from SVC and adjust local clock */
417 ret
= gb_svc_timesync_authoritative(svc
, svc_frame_time
);
420 "gb_svc_timesync_authoritative %d\n", ret
);
421 gb_timesync_teardown(timesync_svc
);
424 gb_timesync_collate_frame_time(timesync_svc
, svc_frame_time
);
426 /* Transmit authoritative time to downstream slaves */
427 hd
= timesync_svc
->timesync_hd
->hd
;
428 ret
= hd
->driver
->timesync_authoritative(hd
, svc_frame_time
);
430 dev_err(&hd
->dev
, "host timesync_authoritative %d\n", ret
);
432 list_for_each_entry(timesync_interface
,
433 ×ync_svc
->interface_list
, list
) {
434 interface
= timesync_interface
->interface
;
435 ret
= gb_interface_timesync_authoritative(
439 dev_err(&interface
->dev
,
440 "interface timesync_authoritative %d\n", ret
);
444 /* Release wake pins */
445 gb_svc_timesync_wake_pins_release(svc
);
446 gb_timesync_platform_unlock_bus();
448 /* Transition to state ACTIVE */
449 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_ACTIVE
);
451 /* Schedule a ping to verify the synchronized system time */
452 timesync_svc
->print_ping
= true;
453 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_PING
);
456 static int __gb_timesync_get_status(struct gb_timesync_svc
*timesync_svc
)
460 switch (timesync_svc
->state
) {
461 case GB_TIMESYNC_STATE_INVALID
:
462 case GB_TIMESYNC_STATE_INACTIVE
:
465 case GB_TIMESYNC_STATE_INIT
:
466 case GB_TIMESYNC_STATE_WAIT_SVC
:
467 case GB_TIMESYNC_STATE_AUTHORITATIVE
:
470 case GB_TIMESYNC_STATE_PING
:
471 case GB_TIMESYNC_STATE_ACTIVE
:
479 * This routine takes a FrameTime and derives the difference with-respect
480 * to a reference FrameTime/ktime pair. It then returns the calculated
481 * ktime based on the difference between the supplied FrameTime and
482 * the reference FrameTime.
484 * The time difference is calculated to six decimal places. Taking 19.2MHz
485 * as an example this means we have 52.083333~ nanoseconds per clock or
486 * 52083333~ femtoseconds per clock.
488 * Naively taking the count difference and converting to
489 * seconds/nanoseconds would quickly see the 0.0833 component produce
490 * noticeable errors. For example a time difference of one second would
491 * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
493 * In contrast calculating in femtoseconds the same example of 19200000 *
494 * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
496 * Continuing the example of 19.2 MHz we cap the maximum error difference
497 * at a worst-case 0.3 microseconds over a potential calculation window of
498 * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
499 * seconds older/younger than the reference time with a maximum error of
500 * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
502 static int gb_timesync_to_timespec(struct gb_timesync_svc
*timesync_svc
,
503 u64 frame_time
, struct timespec
*ts
)
506 u64 delta_fs
, counts
, sec
, nsec
;
510 memset(ts
, 0x00, sizeof(*ts
));
511 mutex_lock(×ync_svc
->mutex
);
512 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
514 ret
= __gb_timesync_get_status(timesync_svc
);
518 /* Support calculating ktime upwards or downwards from the reference */
519 if (frame_time
< timesync_svc
->ktime_data
.frame_time
) {
521 counts
= timesync_svc
->ktime_data
.frame_time
- frame_time
;
524 counts
= frame_time
- timesync_svc
->ktime_data
.frame_time
;
527 /* Enforce the .23 of a usecond boundary @ 19.2MHz */
528 if (counts
> gb_timesync_max_ktime_diff
) {
533 /* Determine the time difference in femtoseconds */
534 delta_fs
= counts
* gb_timesync_fs_per_clock
;
536 /* Convert to seconds */
538 do_div(sec
, NSEC_PER_SEC
);
539 do_div(sec
, 1000000UL);
541 /* Get the nanosecond remainder */
542 nsec
= do_div(delta_fs
, sec
);
543 do_div(nsec
, 1000000UL);
546 /* Add the calculated offset - overflow nanoseconds upwards */
547 ts
->tv_sec
= timesync_svc
->ktime_data
.ts
.tv_sec
+ sec
;
548 ts
->tv_nsec
= timesync_svc
->ktime_data
.ts
.tv_nsec
+ nsec
;
549 if (ts
->tv_nsec
>= NSEC_PER_SEC
) {
551 ts
->tv_nsec
-= NSEC_PER_SEC
;
554 /* Subtract the difference over/underflow as necessary */
555 if (nsec
> timesync_svc
->ktime_data
.ts
.tv_nsec
) {
557 nsec
= nsec
+ timesync_svc
->ktime_data
.ts
.tv_nsec
;
558 nsec
= do_div(nsec
, NSEC_PER_SEC
);
560 nsec
= timesync_svc
->ktime_data
.ts
.tv_nsec
- nsec
;
562 /* Cannot return a negative second value */
563 if (sec
> timesync_svc
->ktime_data
.ts
.tv_sec
) {
567 ts
->tv_sec
= timesync_svc
->ktime_data
.ts
.tv_sec
- sec
;
571 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
572 mutex_unlock(×ync_svc
->mutex
);
576 static size_t gb_timesync_log_frame_time(struct gb_timesync_svc
*timesync_svc
,
577 char *buf
, size_t buflen
)
579 struct gb_svc
*svc
= timesync_svc
->svc
;
580 struct gb_host_device
*hd
;
581 struct gb_timesync_interface
*timesync_interface
;
582 struct gb_interface
*interface
;
587 off
= snprintf(buf
, buflen
, "%s frametime: ap=%llu %s=%llu ",
588 greybus_bus_type
.name
,
589 timesync_svc
->ap_ping_frame_time
, dev_name(&svc
->dev
),
590 timesync_svc
->svc_ping_frame_time
);
595 hd
= timesync_svc
->timesync_hd
->hd
;
596 off
+= snprintf(&buf
[off
], len
, "%s=%llu ", dev_name(&hd
->dev
),
597 timesync_svc
->timesync_hd
->ping_frame_time
);
601 list_for_each_entry(timesync_interface
,
602 ×ync_svc
->interface_list
, list
) {
604 interface
= timesync_interface
->interface
;
605 off
+= snprintf(&buf
[off
], len
, "%s=%llu ",
606 dev_name(&interface
->dev
),
607 timesync_interface
->ping_frame_time
);
612 off
+= snprintf(&buf
[off
], len
, "\n");
616 static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc
*timesync_svc
,
617 char *buf
, size_t buflen
)
619 struct gb_svc
*svc
= timesync_svc
->svc
;
620 struct gb_host_device
*hd
;
621 struct gb_timesync_interface
*timesync_interface
;
622 struct gb_interface
*interface
;
628 gb_timesync_to_timespec(timesync_svc
, timesync_svc
->ap_ping_frame_time
,
630 off
= snprintf(buf
, buflen
, "%s frametime: ap=%lu.%lu ",
631 greybus_bus_type
.name
, ts
.tv_sec
, ts
.tv_nsec
);
637 gb_timesync_to_timespec(timesync_svc
, timesync_svc
->svc_ping_frame_time
,
639 off
+= snprintf(&buf
[off
], len
, "%s=%lu.%lu ", dev_name(&svc
->dev
),
640 ts
.tv_sec
, ts
.tv_nsec
);
646 hd
= timesync_svc
->timesync_hd
->hd
;
647 gb_timesync_to_timespec(timesync_svc
,
648 timesync_svc
->timesync_hd
->ping_frame_time
,
650 off
+= snprintf(&buf
[off
], len
, "%s=%lu.%lu ",
652 ts
.tv_sec
, ts
.tv_nsec
);
657 list_for_each_entry(timesync_interface
,
658 ×ync_svc
->interface_list
, list
) {
659 interface
= timesync_interface
->interface
;
660 gb_timesync_to_timespec(timesync_svc
,
661 timesync_interface
->ping_frame_time
,
663 off
+= snprintf(&buf
[off
], len
, "%s=%lu.%lu ",
664 dev_name(&interface
->dev
),
665 ts
.tv_sec
, ts
.tv_nsec
);
670 off
+= snprintf(&buf
[off
], len
, "\n");
676 * Send an SVC initiated wake 'ping' to each TimeSync participant.
677 * Get the FrameTime from each participant associated with the wake
680 static void gb_timesync_ping(struct gb_timesync_svc
*timesync_svc
)
682 struct gb_svc
*svc
= timesync_svc
->svc
;
683 struct gb_host_device
*hd
;
684 struct gb_timesync_interface
*timesync_interface
;
685 struct gb_control
*control
;
686 u64
*ping_frame_time
;
689 /* Get access to the wake pins in the AP and SVC */
690 ret
= gb_timesync_platform_lock_bus(timesync_svc
);
692 gb_timesync_platform_lock_bus_fail(timesync_svc
, ret
);
695 ret
= gb_svc_timesync_wake_pins_acquire(svc
, timesync_svc
->strobe_mask
);
698 "gb_svc_timesync_wake_pins_acquire %d\n", ret
);
699 gb_timesync_teardown(timesync_svc
);
703 /* Have SVC generate a timesync ping */
704 timesync_svc
->capture_ping
= true;
705 timesync_svc
->svc_ping_frame_time
= 0;
706 ret
= gb_svc_timesync_ping(svc
, ×ync_svc
->svc_ping_frame_time
);
707 timesync_svc
->capture_ping
= false;
710 "gb_svc_timesync_ping %d\n", ret
);
711 gb_timesync_teardown(timesync_svc
);
715 /* Get the ping FrameTime from each APB/GPB */
716 hd
= timesync_svc
->timesync_hd
->hd
;
717 timesync_svc
->timesync_hd
->ping_frame_time
= 0;
718 ret
= hd
->driver
->timesync_get_last_event(hd
,
719 ×ync_svc
->timesync_hd
->ping_frame_time
);
721 dev_err(&hd
->dev
, "host timesync_get_last_event %d\n", ret
);
723 list_for_each_entry(timesync_interface
,
724 ×ync_svc
->interface_list
, list
) {
725 control
= timesync_interface
->interface
->control
;
726 timesync_interface
->ping_frame_time
= 0;
727 ping_frame_time
= ×ync_interface
->ping_frame_time
;
728 ret
= gb_control_timesync_get_last_event(control
,
731 dev_err(×ync_interface
->interface
->dev
,
732 "gb_control_timesync_get_last_event %d\n", ret
);
736 /* Ping success - move to timesync active */
737 gb_svc_timesync_wake_pins_release(svc
);
738 gb_timesync_platform_unlock_bus();
739 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_ACTIVE
);
742 static void gb_timesync_log_ping_time(struct gb_timesync_svc
*timesync_svc
)
746 if (!timesync_svc
->print_ping
)
749 buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
751 gb_timesync_log_frame_time(timesync_svc
, buf
, PAGE_SIZE
);
752 dev_dbg(×ync_svc
->svc
->dev
, "%s", buf
);
758 * Perform the actual work of scheduled TimeSync logic.
760 static void gb_timesync_worker(struct work_struct
*work
)
762 struct delayed_work
*delayed_work
= to_delayed_work(work
);
763 struct gb_timesync_svc
*timesync_svc
=
764 container_of(delayed_work
, struct gb_timesync_svc
, delayed_work
);
766 mutex_lock(×ync_svc
->mutex
);
768 switch (timesync_svc
->state
) {
769 case GB_TIMESYNC_STATE_INIT
:
770 gb_timesync_enable(timesync_svc
);
773 case GB_TIMESYNC_STATE_WAIT_SVC
:
774 dev_err(×ync_svc
->svc
->dev
,
775 "timeout SVC strobe completion %d/%d\n",
776 timesync_svc
->strobe
, GB_TIMESYNC_MAX_STROBES
);
777 gb_timesync_teardown(timesync_svc
);
780 case GB_TIMESYNC_STATE_AUTHORITATIVE
:
781 gb_timesync_authoritative(timesync_svc
);
784 case GB_TIMESYNC_STATE_PING
:
785 gb_timesync_ping(timesync_svc
);
786 gb_timesync_log_ping_time(timesync_svc
);
790 pr_err("Invalid state %d for delayed work\n",
791 timesync_svc
->state
);
795 mutex_unlock(×ync_svc
->mutex
);
799 * Schedule a new TimeSync INIT or PING operation serialized w/r to
800 * gb_timesync_worker().
802 static int gb_timesync_schedule(struct gb_timesync_svc
*timesync_svc
, int state
)
806 if (state
!= GB_TIMESYNC_STATE_INIT
&& state
!= GB_TIMESYNC_STATE_PING
)
809 mutex_lock(×ync_svc
->mutex
);
810 if (timesync_svc
->state
!= GB_TIMESYNC_STATE_INVALID
) {
811 gb_timesync_set_state_atomic(timesync_svc
, state
);
815 mutex_unlock(×ync_svc
->mutex
);
819 static int __gb_timesync_schedule_synchronous(
820 struct gb_timesync_svc
*timesync_svc
, int state
)
825 ret
= gb_timesync_schedule(timesync_svc
, state
);
829 ret
= wait_event_interruptible(timesync_svc
->wait_queue
,
830 (timesync_svc
->state
== GB_TIMESYNC_STATE_ACTIVE
||
831 timesync_svc
->state
== GB_TIMESYNC_STATE_INACTIVE
||
832 timesync_svc
->state
== GB_TIMESYNC_STATE_INVALID
));
836 mutex_lock(×ync_svc
->mutex
);
837 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
839 ret
= __gb_timesync_get_status(timesync_svc
);
841 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
842 mutex_unlock(×ync_svc
->mutex
);
847 static struct gb_timesync_svc
*gb_timesync_find_timesync_svc(
848 struct gb_host_device
*hd
)
850 struct gb_timesync_svc
*timesync_svc
;
852 list_for_each_entry(timesync_svc
, &gb_timesync_svc_list
, list
) {
853 if (timesync_svc
->svc
== hd
->svc
)
859 static struct gb_timesync_interface
*gb_timesync_find_timesync_interface(
860 struct gb_timesync_svc
*timesync_svc
,
861 struct gb_interface
*interface
)
863 struct gb_timesync_interface
*timesync_interface
;
865 list_for_each_entry(timesync_interface
, ×ync_svc
->interface_list
, list
) {
866 if (timesync_interface
->interface
== interface
)
867 return timesync_interface
;
872 int gb_timesync_schedule_synchronous(struct gb_interface
*interface
)
875 struct gb_timesync_svc
*timesync_svc
;
878 if (!(interface
->features
& GREYBUS_INTERFACE_FEATURE_TIMESYNC
))
881 mutex_lock(&gb_timesync_svc_list_mutex
);
882 for (retries
= 0; retries
< GB_TIMESYNC_MAX_RETRIES
; retries
++) {
883 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
889 ret
= __gb_timesync_schedule_synchronous(timesync_svc
,
890 GB_TIMESYNC_STATE_INIT
);
894 if (ret
&& retries
== GB_TIMESYNC_MAX_RETRIES
)
897 mutex_unlock(&gb_timesync_svc_list_mutex
);
900 EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous
);
902 void gb_timesync_schedule_asynchronous(struct gb_interface
*interface
)
904 struct gb_timesync_svc
*timesync_svc
;
906 if (!(interface
->features
& GREYBUS_INTERFACE_FEATURE_TIMESYNC
))
909 mutex_lock(&gb_timesync_svc_list_mutex
);
910 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
914 gb_timesync_schedule(timesync_svc
, GB_TIMESYNC_STATE_INIT
);
916 mutex_unlock(&gb_timesync_svc_list_mutex
);
919 EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous
);
921 static ssize_t
gb_timesync_ping_read(struct file
*file
, char __user
*ubuf
,
922 size_t len
, loff_t
*offset
, bool ktime
)
924 struct gb_timesync_svc
*timesync_svc
= file
->f_inode
->i_private
;
928 mutex_lock(&gb_timesync_svc_list_mutex
);
929 mutex_lock(×ync_svc
->mutex
);
930 if (list_empty(×ync_svc
->interface_list
))
932 timesync_svc
->print_ping
= false;
933 mutex_unlock(×ync_svc
->mutex
);
937 ret
= __gb_timesync_schedule_synchronous(timesync_svc
,
938 GB_TIMESYNC_STATE_PING
);
942 buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
949 ret
= gb_timesync_log_frame_ktime(timesync_svc
, buf
, PAGE_SIZE
);
951 ret
= gb_timesync_log_frame_time(timesync_svc
, buf
, PAGE_SIZE
);
953 ret
= simple_read_from_buffer(ubuf
, len
, offset
, buf
, ret
);
956 mutex_unlock(&gb_timesync_svc_list_mutex
);
960 static ssize_t
gb_timesync_ping_read_frame_time(struct file
*file
,
962 size_t len
, loff_t
*offset
)
964 return gb_timesync_ping_read(file
, buf
, len
, offset
, false);
967 static ssize_t
gb_timesync_ping_read_frame_ktime(struct file
*file
,
969 size_t len
, loff_t
*offset
)
971 return gb_timesync_ping_read(file
, buf
, len
, offset
, true);
974 static const struct file_operations gb_timesync_debugfs_frame_time_ops
= {
975 .read
= gb_timesync_ping_read_frame_time
,
978 static const struct file_operations gb_timesync_debugfs_frame_ktime_ops
= {
979 .read
= gb_timesync_ping_read_frame_ktime
,
982 static int gb_timesync_hd_add(struct gb_timesync_svc
*timesync_svc
,
983 struct gb_host_device
*hd
)
985 struct gb_timesync_host_device
*timesync_hd
;
987 timesync_hd
= kzalloc(sizeof(*timesync_hd
), GFP_KERNEL
);
991 WARN_ON(timesync_svc
->timesync_hd
);
992 timesync_hd
->hd
= hd
;
993 timesync_svc
->timesync_hd
= timesync_hd
;
998 static void gb_timesync_hd_remove(struct gb_timesync_svc
*timesync_svc
,
999 struct gb_host_device
*hd
)
1001 if (timesync_svc
->timesync_hd
->hd
== hd
) {
1002 kfree(timesync_svc
->timesync_hd
);
1003 timesync_svc
->timesync_hd
= NULL
;
1009 int gb_timesync_svc_add(struct gb_svc
*svc
)
1011 struct gb_timesync_svc
*timesync_svc
;
1014 timesync_svc
= kzalloc(sizeof(*timesync_svc
), GFP_KERNEL
);
1018 timesync_svc
->work_queue
=
1019 create_singlethread_workqueue("gb-timesync-work_queue");
1021 if (!timesync_svc
->work_queue
) {
1022 kfree(timesync_svc
);
1026 mutex_lock(&gb_timesync_svc_list_mutex
);
1027 INIT_LIST_HEAD(×ync_svc
->interface_list
);
1028 INIT_DELAYED_WORK(×ync_svc
->delayed_work
, gb_timesync_worker
);
1029 mutex_init(×ync_svc
->mutex
);
1030 spin_lock_init(×ync_svc
->spinlock
);
1031 init_waitqueue_head(×ync_svc
->wait_queue
);
1033 timesync_svc
->svc
= svc
;
1034 timesync_svc
->frame_time_offset
= 0;
1035 timesync_svc
->capture_ping
= false;
1036 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_INACTIVE
);
1038 timesync_svc
->frame_time_dentry
=
1039 debugfs_create_file("frame-time", S_IRUGO
, svc
->debugfs_dentry
,
1041 &gb_timesync_debugfs_frame_time_ops
);
1042 timesync_svc
->frame_ktime_dentry
=
1043 debugfs_create_file("frame-ktime", S_IRUGO
, svc
->debugfs_dentry
,
1045 &gb_timesync_debugfs_frame_ktime_ops
);
1047 list_add(×ync_svc
->list
, &gb_timesync_svc_list
);
1048 ret
= gb_timesync_hd_add(timesync_svc
, svc
->hd
);
1050 list_del(×ync_svc
->list
);
1051 debugfs_remove(timesync_svc
->frame_ktime_dentry
);
1052 debugfs_remove(timesync_svc
->frame_time_dentry
);
1053 destroy_workqueue(timesync_svc
->work_queue
);
1054 kfree(timesync_svc
);
1058 init_timer(×ync_svc
->ktime_timer
);
1059 timesync_svc
->ktime_timer
.function
= gb_timesync_ktime_timer_fn
;
1060 timesync_svc
->ktime_timer
.expires
= jiffies
+ GB_TIMESYNC_KTIME_UPDATE
;
1061 timesync_svc
->ktime_timer
.data
= (unsigned long)timesync_svc
;
1062 add_timer(×ync_svc
->ktime_timer
);
1064 mutex_unlock(&gb_timesync_svc_list_mutex
);
1067 EXPORT_SYMBOL_GPL(gb_timesync_svc_add
);
1069 void gb_timesync_svc_remove(struct gb_svc
*svc
)
1071 struct gb_timesync_svc
*timesync_svc
;
1072 struct gb_timesync_interface
*timesync_interface
;
1073 struct gb_timesync_interface
*next
;
1075 mutex_lock(&gb_timesync_svc_list_mutex
);
1076 timesync_svc
= gb_timesync_find_timesync_svc(svc
->hd
);
1080 cancel_delayed_work_sync(×ync_svc
->delayed_work
);
1082 mutex_lock(×ync_svc
->mutex
);
1084 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_INVALID
);
1085 del_timer_sync(×ync_svc
->ktime_timer
);
1086 gb_timesync_teardown(timesync_svc
);
1088 gb_timesync_hd_remove(timesync_svc
, svc
->hd
);
1089 list_for_each_entry_safe(timesync_interface
, next
,
1090 ×ync_svc
->interface_list
, list
) {
1091 list_del(×ync_interface
->list
);
1092 kfree(timesync_interface
);
1094 debugfs_remove(timesync_svc
->frame_ktime_dentry
);
1095 debugfs_remove(timesync_svc
->frame_time_dentry
);
1096 destroy_workqueue(timesync_svc
->work_queue
);
1097 list_del(×ync_svc
->list
);
1099 mutex_unlock(×ync_svc
->mutex
);
1101 kfree(timesync_svc
);
1103 mutex_unlock(&gb_timesync_svc_list_mutex
);
1105 EXPORT_SYMBOL_GPL(gb_timesync_svc_remove
);
1108 * Add a Greybus Interface to the set of TimeSync Interfaces.
1110 int gb_timesync_interface_add(struct gb_interface
*interface
)
1112 struct gb_timesync_svc
*timesync_svc
;
1113 struct gb_timesync_interface
*timesync_interface
;
1116 if (!(interface
->features
& GREYBUS_INTERFACE_FEATURE_TIMESYNC
))
1119 mutex_lock(&gb_timesync_svc_list_mutex
);
1120 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
1121 if (!timesync_svc
) {
1126 timesync_interface
= kzalloc(sizeof(*timesync_interface
), GFP_KERNEL
);
1127 if (!timesync_interface
) {
1132 mutex_lock(×ync_svc
->mutex
);
1133 timesync_interface
->interface
= interface
;
1134 list_add(×ync_interface
->list
, ×ync_svc
->interface_list
);
1135 timesync_svc
->strobe_mask
|= 1 << interface
->interface_id
;
1136 mutex_unlock(×ync_svc
->mutex
);
1139 mutex_unlock(&gb_timesync_svc_list_mutex
);
1142 EXPORT_SYMBOL_GPL(gb_timesync_interface_add
);
1145 * Remove a Greybus Interface from the set of TimeSync Interfaces.
1147 void gb_timesync_interface_remove(struct gb_interface
*interface
)
1149 struct gb_timesync_svc
*timesync_svc
;
1150 struct gb_timesync_interface
*timesync_interface
;
1152 if (!(interface
->features
& GREYBUS_INTERFACE_FEATURE_TIMESYNC
))
1155 mutex_lock(&gb_timesync_svc_list_mutex
);
1156 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
1160 timesync_interface
= gb_timesync_find_timesync_interface(timesync_svc
,
1162 if (!timesync_interface
)
1165 mutex_lock(×ync_svc
->mutex
);
1166 timesync_svc
->strobe_mask
&= ~(1 << interface
->interface_id
);
1167 list_del(×ync_interface
->list
);
1168 kfree(timesync_interface
);
1169 mutex_unlock(×ync_svc
->mutex
);
1171 mutex_unlock(&gb_timesync_svc_list_mutex
);
1173 EXPORT_SYMBOL_GPL(gb_timesync_interface_remove
);
1176 * Give the authoritative FrameTime to the calling function. Returns zero if we
1177 * are not in GB_TIMESYNC_STATE_ACTIVE.
1179 static u64
gb_timesync_get_frame_time(struct gb_timesync_svc
*timesync_svc
)
1181 unsigned long flags
;
1184 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
1185 if (timesync_svc
->state
== GB_TIMESYNC_STATE_ACTIVE
)
1186 ret
= __gb_timesync_get_frame_time(timesync_svc
);
1189 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
1193 u64
gb_timesync_get_frame_time_by_interface(struct gb_interface
*interface
)
1195 struct gb_timesync_svc
*timesync_svc
;
1198 mutex_lock(&gb_timesync_svc_list_mutex
);
1199 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
1203 ret
= gb_timesync_get_frame_time(timesync_svc
);
1205 mutex_unlock(&gb_timesync_svc_list_mutex
);
1208 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface
);
1210 u64
gb_timesync_get_frame_time_by_svc(struct gb_svc
*svc
)
1212 struct gb_timesync_svc
*timesync_svc
;
1215 mutex_lock(&gb_timesync_svc_list_mutex
);
1216 timesync_svc
= gb_timesync_find_timesync_svc(svc
->hd
);
1220 ret
= gb_timesync_get_frame_time(timesync_svc
);
1222 mutex_unlock(&gb_timesync_svc_list_mutex
);
1225 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc
);
1227 /* Incrementally updates the conversion base from FrameTime to ktime */
1228 static void gb_timesync_ktime_timer_fn(unsigned long data
)
1230 struct gb_timesync_svc
*timesync_svc
=
1231 (struct gb_timesync_svc
*)data
;
1232 unsigned long flags
;
1236 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
1238 if (timesync_svc
->state
!= GB_TIMESYNC_STATE_ACTIVE
)
1242 frame_time
= __gb_timesync_get_frame_time(timesync_svc
);
1243 gb_timesync_store_ktime(timesync_svc
, ts
, frame_time
);
1246 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
1247 mod_timer(×ync_svc
->ktime_timer
,
1248 jiffies
+ GB_TIMESYNC_KTIME_UPDATE
);
1251 int gb_timesync_to_timespec_by_svc(struct gb_svc
*svc
, u64 frame_time
,
1252 struct timespec
*ts
)
1254 struct gb_timesync_svc
*timesync_svc
;
1257 mutex_lock(&gb_timesync_svc_list_mutex
);
1258 timesync_svc
= gb_timesync_find_timesync_svc(svc
->hd
);
1259 if (!timesync_svc
) {
1263 ret
= gb_timesync_to_timespec(timesync_svc
, frame_time
, ts
);
1265 mutex_unlock(&gb_timesync_svc_list_mutex
);
1268 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc
);
1270 int gb_timesync_to_timespec_by_interface(struct gb_interface
*interface
,
1271 u64 frame_time
, struct timespec
*ts
)
1273 struct gb_timesync_svc
*timesync_svc
;
1276 mutex_lock(&gb_timesync_svc_list_mutex
);
1277 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
1278 if (!timesync_svc
) {
1283 ret
= gb_timesync_to_timespec(timesync_svc
, frame_time
, ts
);
1285 mutex_unlock(&gb_timesync_svc_list_mutex
);
1288 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface
);
1290 void gb_timesync_irq(struct gb_timesync_svc
*timesync_svc
)
1292 unsigned long flags
;
1294 bool strobe_is_ping
= true;
1298 strobe_time
= __gb_timesync_get_frame_time(timesync_svc
);
1300 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
1302 if (timesync_svc
->state
== GB_TIMESYNC_STATE_PING
) {
1303 if (!timesync_svc
->capture_ping
)
1305 timesync_svc
->ap_ping_frame_time
= strobe_time
;
1307 } else if (timesync_svc
->state
!= GB_TIMESYNC_STATE_WAIT_SVC
) {
1311 timesync_svc
->strobe_data
[timesync_svc
->strobe
].frame_time
= strobe_time
;
1312 timesync_svc
->strobe_data
[timesync_svc
->strobe
].ts
= ts
;
1314 if (++timesync_svc
->strobe
== GB_TIMESYNC_MAX_STROBES
) {
1315 gb_timesync_set_state(timesync_svc
,
1316 GB_TIMESYNC_STATE_AUTHORITATIVE
);
1318 strobe_is_ping
= false;
1320 trace_gb_timesync_irq(strobe_is_ping
, timesync_svc
->strobe
,
1321 GB_TIMESYNC_MAX_STROBES
, strobe_time
);
1323 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
1325 EXPORT_SYMBOL(gb_timesync_irq
);
1327 int __init
gb_timesync_init(void)
1331 ret
= gb_timesync_platform_init();
1333 pr_err("timesync platform init fail!\n");
1337 gb_timesync_clock_rate
= gb_timesync_platform_get_clock_rate();
1339 /* Calculate nanoseconds and femtoseconds per clock */
1340 gb_timesync_fs_per_clock
= FSEC_PER_SEC
;
1341 do_div(gb_timesync_fs_per_clock
, gb_timesync_clock_rate
);
1342 gb_timesync_ns_per_clock
= NSEC_PER_SEC
;
1343 do_div(gb_timesync_ns_per_clock
, gb_timesync_clock_rate
);
1345 /* Calculate the maximum number of clocks we will convert to ktime */
1346 gb_timesync_max_ktime_diff
=
1347 GB_TIMESYNC_MAX_KTIME_CONVERSION
* gb_timesync_clock_rate
;
1349 pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
1350 gb_timesync_clock_rate
, GB_TIMESYNC_MAX_KTIME_CONVERSION
);
1354 void gb_timesync_exit(void)
1356 gb_timesync_platform_exit();