Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / net / ethernet / meta / fbnic / fbnic_time.c
blob39d99677b71ea7c990fcbf99757b7e7524edd840
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
4 #include <linux/bitfield.h>
5 #include <linux/jiffies.h>
6 #include <linux/limits.h>
7 #include <linux/ptp_clock_kernel.h>
8 #include <linux/timer.h>
10 #include "fbnic.h"
11 #include "fbnic_csr.h"
12 #include "fbnic_netdev.h"
14 /* FBNIC timing & PTP implementation
15 * Datapath uses truncated 40b timestamps for scheduling and event reporting.
16 * We need to promote those to full 64b, hence we periodically cache the top
17 * 32bit of the HW time counter. Since this makes our time reporting non-atomic
18 * we leave the HW clock free running and adjust time offsets in SW as needed.
19 * Time offset is 64bit - we need a seq counter for 32bit machines.
20 * Time offset and the cache of top bits are independent so we don't need
21 * a coherent snapshot of both - READ_ONCE()/WRITE_ONCE() + writer side lock
22 * are enough.
25 /* Period of refresh of top bits of timestamp, give ourselves a 8x margin.
26 * This should translate to once a minute.
27 * The use of nsecs_to_jiffies() should be safe for a <=40b nsec value.
29 #define FBNIC_TS_HIGH_REFRESH_JIF nsecs_to_jiffies((1ULL << 40) / 16)
31 static struct fbnic_dev *fbnic_from_ptp_info(struct ptp_clock_info *ptp)
33 return container_of(ptp, struct fbnic_dev, ptp_info);
36 /* This function is "slow" because we could try guessing which high part
37 * is correct based on low instead of re-reading, and skip reading @hi
38 * twice altogether if @lo is far enough from 0.
40 static u64 __fbnic_time_get_slow(struct fbnic_dev *fbd)
42 u32 hi, lo;
44 lockdep_assert_held(&fbd->time_lock);
46 do {
47 hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
48 lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
49 } while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
51 return (u64)hi << 32 | lo;
54 static void __fbnic_time_set_addend(struct fbnic_dev *fbd, u64 addend)
56 lockdep_assert_held(&fbd->time_lock);
58 fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_NS,
59 FIELD_PREP(FBNIC_PTP_ADD_VAL_NS_MASK, addend >> 32));
60 fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_SUBNS, (u32)addend);
63 static void fbnic_ptp_fresh_check(struct fbnic_dev *fbd)
65 if (time_is_after_jiffies(fbd->last_read +
66 FBNIC_TS_HIGH_REFRESH_JIF * 3 / 2))
67 return;
69 dev_warn(fbd->dev, "NIC timestamp refresh stall, delayed by %lu sec\n",
70 (jiffies - fbd->last_read - FBNIC_TS_HIGH_REFRESH_JIF) / HZ);
73 static void fbnic_ptp_refresh_time(struct fbnic_dev *fbd, struct fbnic_net *fbn)
75 unsigned long flags;
76 u32 hi;
78 spin_lock_irqsave(&fbd->time_lock, flags);
79 hi = fbnic_rd32(fbn->fbd, FBNIC_PTP_CTR_VAL_HI);
80 if (!fbnic_present(fbd))
81 goto out; /* Don't bother handling, reset is pending */
82 /* Let's keep high cached value a bit lower to avoid race with
83 * incoming timestamps. The logic in fbnic_ts40_to_ns() will
84 * take care of overflow in this case. It will make cached time
85 * ~1 minute lower and incoming timestamp will always be later
86 * then cached time.
88 WRITE_ONCE(fbn->time_high, hi - 16);
89 fbd->last_read = jiffies;
90 out:
91 spin_unlock_irqrestore(&fbd->time_lock, flags);
94 static long fbnic_ptp_do_aux_work(struct ptp_clock_info *ptp)
96 struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
97 struct fbnic_net *fbn;
99 fbn = netdev_priv(fbd->netdev);
101 fbnic_ptp_fresh_check(fbd);
102 fbnic_ptp_refresh_time(fbd, fbn);
104 return FBNIC_TS_HIGH_REFRESH_JIF;
107 static int fbnic_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
109 struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
110 u64 addend, dclk_period;
111 unsigned long flags;
113 /* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
114 dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
115 addend = adjust_by_scaled_ppm(dclk_period, scaled_ppm);
117 spin_lock_irqsave(&fbd->time_lock, flags);
118 __fbnic_time_set_addend(fbd, addend);
119 fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_ADDEND_SET);
121 /* Flush, make sure FBNIC_PTP_ADD_VAL_* is stable for at least 4 clks */
122 fbnic_rd32(fbd, FBNIC_PTP_SPARE);
123 spin_unlock_irqrestore(&fbd->time_lock, flags);
125 return fbnic_present(fbd) ? 0 : -EIO;
128 static int fbnic_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
130 struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
131 struct fbnic_net *fbn;
132 unsigned long flags;
134 fbn = netdev_priv(fbd->netdev);
136 spin_lock_irqsave(&fbd->time_lock, flags);
137 u64_stats_update_begin(&fbn->time_seq);
138 WRITE_ONCE(fbn->time_offset, READ_ONCE(fbn->time_offset) + delta);
139 u64_stats_update_end(&fbn->time_seq);
140 spin_unlock_irqrestore(&fbd->time_lock, flags);
142 return 0;
145 static int
146 fbnic_ptp_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts,
147 struct ptp_system_timestamp *sts)
149 struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
150 struct fbnic_net *fbn;
151 unsigned long flags;
152 u64 time_ns;
153 u32 hi, lo;
155 fbn = netdev_priv(fbd->netdev);
157 spin_lock_irqsave(&fbd->time_lock, flags);
159 do {
160 hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
161 ptp_read_system_prets(sts);
162 lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
163 ptp_read_system_postts(sts);
164 /* Similarly to comment above __fbnic_time_get_slow()
165 * - this can be optimized if needed.
167 } while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
169 time_ns = ((u64)hi << 32 | lo) + fbn->time_offset;
170 spin_unlock_irqrestore(&fbd->time_lock, flags);
172 if (!fbnic_present(fbd))
173 return -EIO;
175 *ts = ns_to_timespec64(time_ns);
177 return 0;
180 static int
181 fbnic_ptp_settime64(struct ptp_clock_info *ptp, const struct timespec64 *ts)
183 struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
184 struct fbnic_net *fbn;
185 unsigned long flags;
186 u64 dev_ns, host_ns;
187 int ret;
189 fbn = netdev_priv(fbd->netdev);
191 host_ns = timespec64_to_ns(ts);
193 spin_lock_irqsave(&fbd->time_lock, flags);
195 dev_ns = __fbnic_time_get_slow(fbd);
197 if (fbnic_present(fbd)) {
198 u64_stats_update_begin(&fbn->time_seq);
199 WRITE_ONCE(fbn->time_offset, host_ns - dev_ns);
200 u64_stats_update_end(&fbn->time_seq);
201 ret = 0;
202 } else {
203 ret = -EIO;
205 spin_unlock_irqrestore(&fbd->time_lock, flags);
207 return ret;
210 static const struct ptp_clock_info fbnic_ptp_info = {
211 .owner = THIS_MODULE,
212 /* 1,000,000,000 - 1 PPB to ensure increment is positive
213 * after max negative adjustment.
215 .max_adj = 999999999,
216 .do_aux_work = fbnic_ptp_do_aux_work,
217 .adjfine = fbnic_ptp_adjfine,
218 .adjtime = fbnic_ptp_adjtime,
219 .gettimex64 = fbnic_ptp_gettimex64,
220 .settime64 = fbnic_ptp_settime64,
223 static void fbnic_ptp_reset(struct fbnic_dev *fbd)
225 struct fbnic_net *fbn = netdev_priv(fbd->netdev);
226 u64 dclk_period;
228 fbnic_wr32(fbd, FBNIC_PTP_CTRL,
229 FBNIC_PTP_CTRL_EN |
230 FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
232 /* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
233 dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
235 __fbnic_time_set_addend(fbd, dclk_period);
237 fbnic_wr32(fbd, FBNIC_PTP_INIT_HI, 0);
238 fbnic_wr32(fbd, FBNIC_PTP_INIT_LO, 0);
240 fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_INIT);
242 fbnic_wr32(fbd, FBNIC_PTP_CTRL,
243 FBNIC_PTP_CTRL_EN |
244 FBNIC_PTP_CTRL_TQS_OUT_EN |
245 FIELD_PREP(FBNIC_PTP_CTRL_MAC_OUT_IVAL, 3) |
246 FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
248 fbnic_rd32(fbd, FBNIC_PTP_SPARE);
250 fbn->time_offset = 0;
251 fbn->time_high = 0;
254 void fbnic_time_init(struct fbnic_net *fbn)
256 /* This is not really a statistic, but the lockng primitive fits
257 * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() /
258 * WRITE_ONCE() behavior.
260 u64_stats_init(&fbn->time_seq);
263 int fbnic_time_start(struct fbnic_net *fbn)
265 fbnic_ptp_refresh_time(fbn->fbd, fbn);
266 /* Assume that fbnic_ptp_do_aux_work() will never be called if not
267 * scheduled here
269 return ptp_schedule_worker(fbn->fbd->ptp, FBNIC_TS_HIGH_REFRESH_JIF);
272 void fbnic_time_stop(struct fbnic_net *fbn)
274 ptp_cancel_worker_sync(fbn->fbd->ptp);
275 fbnic_ptp_fresh_check(fbn->fbd);
278 int fbnic_ptp_setup(struct fbnic_dev *fbd)
280 struct device *dev = fbd->dev;
281 unsigned long flags;
283 spin_lock_init(&fbd->time_lock);
285 spin_lock_irqsave(&fbd->time_lock, flags); /* Appease lockdep */
286 fbnic_ptp_reset(fbd);
287 spin_unlock_irqrestore(&fbd->time_lock, flags);
289 memcpy(&fbd->ptp_info, &fbnic_ptp_info, sizeof(fbnic_ptp_info));
291 fbd->ptp = ptp_clock_register(&fbd->ptp_info, dev);
292 if (IS_ERR(fbd->ptp))
293 dev_err(dev, "Failed to register PTP: %pe\n", fbd->ptp);
295 return PTR_ERR_OR_ZERO(fbd->ptp);
298 void fbnic_ptp_destroy(struct fbnic_dev *fbd)
300 if (!fbd->ptp)
301 return;
302 ptp_clock_unregister(fbd->ptp);