1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
6 #define SJA1105_TAS_CLKSRC_DISABLED 0
7 #define SJA1105_TAS_CLKSRC_STANDALONE 1
8 #define SJA1105_TAS_CLKSRC_AS6802 2
9 #define SJA1105_TAS_CLKSRC_PTP 3
10 #define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
12 #define work_to_sja1105_tas(d) \
13 container_of((d), struct sja1105_tas_data, tas_work)
14 #define tas_to_sja1105(d) \
15 container_of((d), struct sja1105_private, tas_data)
17 static int sja1105_tas_set_runtime_params(struct sja1105_private
*priv
)
19 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
20 struct sja1105_gating_config
*gating_cfg
= &tas_data
->gating_cfg
;
21 struct dsa_switch
*ds
= priv
->ds
;
22 s64 earliest_base_time
= S64_MAX
;
23 s64 latest_base_time
= 0;
24 s64 its_cycle_time
= 0;
25 s64 max_cycle_time
= 0;
28 tas_data
->enabled
= false;
30 for (port
= 0; port
< SJA1105_NUM_PORTS
; port
++) {
31 const struct tc_taprio_qopt_offload
*offload
;
33 offload
= tas_data
->offload
[port
];
37 tas_data
->enabled
= true;
39 if (max_cycle_time
< offload
->cycle_time
)
40 max_cycle_time
= offload
->cycle_time
;
41 if (latest_base_time
< offload
->base_time
)
42 latest_base_time
= offload
->base_time
;
43 if (earliest_base_time
> offload
->base_time
) {
44 earliest_base_time
= offload
->base_time
;
45 its_cycle_time
= offload
->cycle_time
;
49 if (!list_empty(&gating_cfg
->entries
)) {
50 tas_data
->enabled
= true;
52 if (max_cycle_time
< gating_cfg
->cycle_time
)
53 max_cycle_time
= gating_cfg
->cycle_time
;
54 if (latest_base_time
< gating_cfg
->base_time
)
55 latest_base_time
= gating_cfg
->base_time
;
56 if (earliest_base_time
> gating_cfg
->base_time
) {
57 earliest_base_time
= gating_cfg
->base_time
;
58 its_cycle_time
= gating_cfg
->cycle_time
;
62 if (!tas_data
->enabled
)
65 /* Roll the earliest base time over until it is in a comparable
66 * time base with the latest, then compare their deltas.
67 * We want to enforce that all ports' base times are within
68 * SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
70 earliest_base_time
= future_base_time(earliest_base_time
,
73 while (earliest_base_time
> latest_base_time
)
74 earliest_base_time
-= its_cycle_time
;
75 if (latest_base_time
- earliest_base_time
>
76 sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA
)) {
78 "Base times too far apart: min %llu max %llu\n",
79 earliest_base_time
, latest_base_time
);
83 tas_data
->earliest_base_time
= earliest_base_time
;
84 tas_data
->max_cycle_time
= max_cycle_time
;
86 dev_dbg(ds
->dev
, "earliest base time %lld ns\n", earliest_base_time
);
87 dev_dbg(ds
->dev
, "latest base time %lld ns\n", latest_base_time
);
88 dev_dbg(ds
->dev
, "longest cycle time %lld ns\n", max_cycle_time
);
93 /* Lo and behold: the egress scheduler from hell.
95 * At the hardware level, the Time-Aware Shaper holds a global linear arrray of
96 * all schedule entries for all ports. These are the Gate Control List (GCL)
97 * entries, let's call them "timeslots" for short. This linear array of
98 * timeslots is held in BLK_IDX_SCHEDULE.
100 * Then there are a maximum of 8 "execution threads" inside the switch, which
101 * iterate cyclically through the "schedule". Each "cycle" has an entry point
102 * and an exit point, both being timeslot indices in the schedule table. The
103 * hardware calls each cycle a "subschedule".
105 * Subschedule (cycle) i starts when
106 * ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta.
108 * The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from
109 * k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to
110 * k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i]
112 * For each schedule entry (timeslot) k, the engine executes the gate control
113 * list entry for the duration of BLK_IDX_SCHEDULE[k].delta.
116 * | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
119 * +-----------------+
121 * BLK_IDX_SCHEDULE_ENTRY_POINTS v
126 * +----------------+ | | +-------------------------------------+
127 * | .subschindx | | .subschindx |
128 * | | +---------------+ |
129 * | .address | .address | |
132 * | BLK_IDX_SCHEDULE v v |
133 * | +-------+-------+-------+-------+-------+------+ |
134 * | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| |
135 * | +-------+-------+-------+-------+-------+------+ |
138 * | +-------------------------+ | | | |
139 * | | +-------------------------------+ | | |
140 * | | | +-------------------+ | |
142 * | +---------------------------------------------------------------+ |
143 * | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| |
144 * | +---------------------------------------------------------------+ |
145 * | ^ ^ BLK_IDX_SCHEDULE_PARAMS |
147 * +--------+ +-------------------------------------------+
149 * In the above picture there are two subschedules (cycles):
151 * - cycle 0: iterates the schedule table from 0 to 2 (and back)
152 * - cycle 1: iterates the schedule table from 3 to 5 (and back)
154 * All other possible execution threads must be marked as unused by making
155 * their "subschedule end index" (subscheind) equal to the last valid
156 * subschedule's end index (in this case 5).
158 int sja1105_init_scheduling(struct sja1105_private
*priv
)
160 struct sja1105_schedule_entry_points_entry
*schedule_entry_points
;
161 struct sja1105_schedule_entry_points_params_entry
162 *schedule_entry_points_params
;
163 struct sja1105_schedule_params_entry
*schedule_params
;
164 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
165 struct sja1105_gating_config
*gating_cfg
= &tas_data
->gating_cfg
;
166 struct sja1105_schedule_entry
*schedule
;
167 struct sja1105_table
*table
;
168 int schedule_start_idx
;
169 s64 entry_point_delta
;
170 int schedule_end_idx
;
177 rc
= sja1105_tas_set_runtime_params(priv
);
181 /* Discard previous Schedule Table */
182 table
= &priv
->static_config
.tables
[BLK_IDX_SCHEDULE
];
183 if (table
->entry_count
) {
184 kfree(table
->entries
);
185 table
->entry_count
= 0;
188 /* Discard previous Schedule Entry Points Parameters Table */
189 table
= &priv
->static_config
.tables
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
];
190 if (table
->entry_count
) {
191 kfree(table
->entries
);
192 table
->entry_count
= 0;
195 /* Discard previous Schedule Parameters Table */
196 table
= &priv
->static_config
.tables
[BLK_IDX_SCHEDULE_PARAMS
];
197 if (table
->entry_count
) {
198 kfree(table
->entries
);
199 table
->entry_count
= 0;
202 /* Discard previous Schedule Entry Points Table */
203 table
= &priv
->static_config
.tables
[BLK_IDX_SCHEDULE_ENTRY_POINTS
];
204 if (table
->entry_count
) {
205 kfree(table
->entries
);
206 table
->entry_count
= 0;
209 /* Figure out the dimensioning of the problem */
210 for (port
= 0; port
< SJA1105_NUM_PORTS
; port
++) {
211 if (tas_data
->offload
[port
]) {
212 num_entries
+= tas_data
->offload
[port
]->num_entries
;
217 if (!list_empty(&gating_cfg
->entries
)) {
218 num_entries
+= gating_cfg
->num_entries
;
226 /* Pre-allocate space in the static config tables */
229 table
= &priv
->static_config
.tables
[BLK_IDX_SCHEDULE
];
230 table
->entries
= kcalloc(num_entries
, table
->ops
->unpacked_entry_size
,
234 table
->entry_count
= num_entries
;
235 schedule
= table
->entries
;
237 /* Schedule Points Parameters Table */
238 table
= &priv
->static_config
.tables
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
];
239 table
->entries
= kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT
,
240 table
->ops
->unpacked_entry_size
, GFP_KERNEL
);
242 /* Previously allocated memory will be freed automatically in
243 * sja1105_static_config_free. This is true for all early
247 table
->entry_count
= SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT
;
248 schedule_entry_points_params
= table
->entries
;
250 /* Schedule Parameters Table */
251 table
= &priv
->static_config
.tables
[BLK_IDX_SCHEDULE_PARAMS
];
252 table
->entries
= kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT
,
253 table
->ops
->unpacked_entry_size
, GFP_KERNEL
);
256 table
->entry_count
= SJA1105_MAX_SCHEDULE_PARAMS_COUNT
;
257 schedule_params
= table
->entries
;
259 /* Schedule Entry Points Table */
260 table
= &priv
->static_config
.tables
[BLK_IDX_SCHEDULE_ENTRY_POINTS
];
261 table
->entries
= kcalloc(num_cycles
, table
->ops
->unpacked_entry_size
,
265 table
->entry_count
= num_cycles
;
266 schedule_entry_points
= table
->entries
;
268 /* Finally start populating the static config tables */
269 schedule_entry_points_params
->clksrc
= SJA1105_TAS_CLKSRC_PTP
;
270 schedule_entry_points_params
->actsubsch
= num_cycles
- 1;
272 for (port
= 0; port
< SJA1105_NUM_PORTS
; port
++) {
273 const struct tc_taprio_qopt_offload
*offload
;
274 /* Relative base time */
277 offload
= tas_data
->offload
[port
];
281 schedule_start_idx
= k
;
282 schedule_end_idx
= k
+ offload
->num_entries
- 1;
283 /* This is the base time expressed as a number of TAS ticks
284 * relative to PTPSCHTM, which we'll (perhaps improperly) call
285 * the operational base time.
287 rbt
= future_base_time(offload
->base_time
,
289 tas_data
->earliest_base_time
);
290 rbt
-= tas_data
->earliest_base_time
;
291 /* UM10944.pdf 4.2.2. Schedule Entry Points table says that
292 * delta cannot be zero, which is shitty. Advance all relative
293 * base times by 1 TAS delta, so that even the earliest base
294 * time becomes 1 in relative terms. Then start the operational
295 * base time (PTPSCHTM) one TAS delta earlier than planned.
297 entry_point_delta
= ns_to_sja1105_delta(rbt
) + 1;
299 schedule_entry_points
[cycle
].subschindx
= cycle
;
300 schedule_entry_points
[cycle
].delta
= entry_point_delta
;
301 schedule_entry_points
[cycle
].address
= schedule_start_idx
;
303 /* The subschedule end indices need to be
304 * monotonically increasing.
306 for (i
= cycle
; i
< 8; i
++)
307 schedule_params
->subscheind
[i
] = schedule_end_idx
;
309 for (i
= 0; i
< offload
->num_entries
; i
++, k
++) {
310 s64 delta_ns
= offload
->entries
[i
].interval
;
312 schedule
[k
].delta
= ns_to_sja1105_delta(delta_ns
);
313 schedule
[k
].destports
= BIT(port
);
314 schedule
[k
].resmedia_en
= true;
315 schedule
[k
].resmedia
= SJA1105_GATE_MASK
&
316 ~offload
->entries
[i
].gate_mask
;
321 if (!list_empty(&gating_cfg
->entries
)) {
322 struct sja1105_gate_entry
*e
;
324 /* Relative base time */
327 schedule_start_idx
= k
;
328 schedule_end_idx
= k
+ gating_cfg
->num_entries
- 1;
329 rbt
= future_base_time(gating_cfg
->base_time
,
330 gating_cfg
->cycle_time
,
331 tas_data
->earliest_base_time
);
332 rbt
-= tas_data
->earliest_base_time
;
333 entry_point_delta
= ns_to_sja1105_delta(rbt
) + 1;
335 schedule_entry_points
[cycle
].subschindx
= cycle
;
336 schedule_entry_points
[cycle
].delta
= entry_point_delta
;
337 schedule_entry_points
[cycle
].address
= schedule_start_idx
;
339 for (i
= cycle
; i
< 8; i
++)
340 schedule_params
->subscheind
[i
] = schedule_end_idx
;
342 list_for_each_entry(e
, &gating_cfg
->entries
, list
) {
343 schedule
[k
].delta
= ns_to_sja1105_delta(e
->interval
);
344 schedule
[k
].destports
= e
->rule
->vl
.destports
;
345 schedule
[k
].setvalid
= true;
346 schedule
[k
].txen
= true;
347 schedule
[k
].vlindex
= e
->rule
->vl
.sharindx
;
348 schedule
[k
].winstindex
= e
->rule
->vl
.sharindx
;
349 if (e
->gate_state
) /* Gate open */
350 schedule
[k
].winst
= true;
351 else /* Gate closed */
352 schedule
[k
].winend
= true;
360 /* Be there 2 port subschedules, each executing an arbitrary number of gate
361 * open/close events cyclically.
362 * None of those gate events must ever occur at the exact same time, otherwise
363 * the switch is known to act in exotically strange ways.
364 * However the hardware doesn't bother performing these integrity checks.
365 * So here we are with the task of validating whether the new @admin offload
366 * has any conflict with the already established TAS configuration in
367 * tas_data->offload. We already know the other ports are in harmony with one
368 * another, otherwise we wouldn't have saved them.
369 * Each gate event executes periodically, with a period of @cycle_time and a
370 * phase given by its cycle's @base_time plus its offset within the cycle
371 * (which in turn is given by the length of the events prior to it).
372 * There are two aspects to possible collisions:
373 * - Collisions within one cycle's (actually the longest cycle's) time frame.
374 * For that, we need to compare the cartesian product of each possible
375 * occurrence of each event within one cycle time.
376 * - Collisions in the future. Events may not collide within one cycle time,
377 * but if two port schedules don't have the same periodicity (aka the cycle
378 * times aren't multiples of one another), they surely will some time in the
379 * future (actually they will collide an infinite amount of times).
382 sja1105_tas_check_conflicts(struct sja1105_private
*priv
, int port
,
383 const struct tc_taprio_qopt_offload
*admin
)
385 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
386 const struct tc_taprio_qopt_offload
*offload
;
387 s64 max_cycle_time
, min_cycle_time
;
395 offload
= tas_data
->offload
[port
];
399 /* Check if the two cycle times are multiples of one another.
400 * If they aren't, then they will surely collide.
402 max_cycle_time
= max(offload
->cycle_time
, admin
->cycle_time
);
403 min_cycle_time
= min(offload
->cycle_time
, admin
->cycle_time
);
404 div_s64_rem(max_cycle_time
, min_cycle_time
, &rem
);
408 /* Calculate the "reduced" base time of each of the two cycles
409 * (transposed back as close to 0 as possible) by dividing to
412 div_s64_rem(offload
->base_time
, offload
->cycle_time
, &rem
);
415 div_s64_rem(admin
->base_time
, admin
->cycle_time
, &rem
);
418 stop_time
= max_cycle_time
+ max(rbt1
, rbt2
);
420 /* delta1 is the relative base time of each GCL entry within
421 * the established ports' TAS config.
423 for (i
= 0, delta1
= 0;
424 i
< offload
->num_entries
;
425 delta1
+= offload
->entries
[i
].interval
, i
++) {
426 /* delta2 is the relative base time of each GCL entry
427 * within the newly added TAS config.
429 for (j
= 0, delta2
= 0;
430 j
< admin
->num_entries
;
431 delta2
+= admin
->entries
[j
].interval
, j
++) {
432 /* t1 follows all possible occurrences of the
433 * established ports' GCL entry i within the
436 for (t1
= rbt1
+ delta1
;
438 t1
+= offload
->cycle_time
) {
439 /* t2 follows all possible occurrences
440 * of the newly added GCL entry j
441 * within the first cycle time.
443 for (t2
= rbt2
+ delta2
;
445 t2
+= admin
->cycle_time
) {
447 dev_warn(priv
->ds
->dev
,
448 "GCL entry %d collides with entry %d of port %d\n",
460 /* Check the tc-taprio configuration on @port for conflicts with the tc-gate
461 * global subschedule. If @port is -1, check it against all ports.
462 * To reuse the sja1105_tas_check_conflicts logic without refactoring it,
463 * convert the gating configuration to a dummy tc-taprio offload structure.
465 bool sja1105_gating_check_conflicts(struct sja1105_private
*priv
, int port
,
466 struct netlink_ext_ack
*extack
)
468 struct sja1105_gating_config
*gating_cfg
= &priv
->tas_data
.gating_cfg
;
469 size_t num_entries
= gating_cfg
->num_entries
;
470 struct tc_taprio_qopt_offload
*dummy
;
471 struct sja1105_gate_entry
*e
;
475 if (list_empty(&gating_cfg
->entries
))
478 dummy
= kzalloc(struct_size(dummy
, entries
, num_entries
), GFP_KERNEL
);
480 NL_SET_ERR_MSG_MOD(extack
, "Failed to allocate memory");
484 dummy
->num_entries
= num_entries
;
485 dummy
->base_time
= gating_cfg
->base_time
;
486 dummy
->cycle_time
= gating_cfg
->cycle_time
;
488 list_for_each_entry(e
, &gating_cfg
->entries
, list
)
489 dummy
->entries
[i
++].interval
= e
->interval
;
492 conflict
= sja1105_tas_check_conflicts(priv
, port
, dummy
);
494 for (port
= 0; port
< SJA1105_NUM_PORTS
; port
++) {
495 conflict
= sja1105_tas_check_conflicts(priv
, port
,
507 int sja1105_setup_tc_taprio(struct dsa_switch
*ds
, int port
,
508 struct tc_taprio_qopt_offload
*admin
)
510 struct sja1105_private
*priv
= ds
->priv
;
511 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
512 int other_port
, rc
, i
;
514 /* Can't change an already configured port (must delete qdisc first).
515 * Can't delete the qdisc from an unconfigured port.
517 if (!!tas_data
->offload
[port
] == admin
->enable
)
520 if (!admin
->enable
) {
521 taprio_offload_free(tas_data
->offload
[port
]);
522 tas_data
->offload
[port
] = NULL
;
524 rc
= sja1105_init_scheduling(priv
);
528 return sja1105_static_config_reload(priv
, SJA1105_SCHEDULING
);
531 /* The cycle time extension is the amount of time the last cycle from
532 * the old OPER needs to be extended in order to phase-align with the
533 * base time of the ADMIN when that becomes the new OPER.
534 * But of course our switch needs to be reset to switch-over between
535 * the ADMIN and the OPER configs - so much for a seamless transition.
536 * So don't add insult over injury and just say we don't support cycle
539 if (admin
->cycle_time_extension
)
542 for (i
= 0; i
< admin
->num_entries
; i
++) {
543 s64 delta_ns
= admin
->entries
[i
].interval
;
544 s64 delta_cycles
= ns_to_sja1105_delta(delta_ns
);
545 bool too_long
, too_short
;
547 too_long
= (delta_cycles
>= SJA1105_TAS_MAX_DELTA
);
548 too_short
= (delta_cycles
== 0);
549 if (too_long
|| too_short
) {
550 dev_err(priv
->ds
->dev
,
551 "Interval %llu too %s for GCL entry %d\n",
552 delta_ns
, too_long
? "long" : "short", i
);
557 for (other_port
= 0; other_port
< SJA1105_NUM_PORTS
; other_port
++) {
558 if (other_port
== port
)
561 if (sja1105_tas_check_conflicts(priv
, other_port
, admin
))
565 if (sja1105_gating_check_conflicts(priv
, port
, NULL
)) {
566 dev_err(ds
->dev
, "Conflict with tc-gate schedule\n");
570 tas_data
->offload
[port
] = taprio_offload_get(admin
);
572 rc
= sja1105_init_scheduling(priv
);
576 return sja1105_static_config_reload(priv
, SJA1105_SCHEDULING
);
579 static int sja1105_tas_check_running(struct sja1105_private
*priv
)
581 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
582 struct dsa_switch
*ds
= priv
->ds
;
583 struct sja1105_ptp_cmd cmd
= {0};
586 rc
= sja1105_ptp_commit(ds
, &cmd
, SPI_READ
);
590 if (cmd
.ptpstrtsch
== 1)
591 /* Schedule successfully started */
592 tas_data
->state
= SJA1105_TAS_STATE_RUNNING
;
593 else if (cmd
.ptpstopsch
== 1)
594 /* Schedule is stopped */
595 tas_data
->state
= SJA1105_TAS_STATE_DISABLED
;
597 /* Schedule is probably not configured with PTP clock source */
603 /* Write to PTPCLKCORP */
604 static int sja1105_tas_adjust_drift(struct sja1105_private
*priv
,
607 const struct sja1105_regs
*regs
= priv
->info
->regs
;
608 u32 ptpclkcorp
= ns_to_sja1105_ticks(correction
);
610 return sja1105_xfer_u32(priv
, SPI_WRITE
, regs
->ptpclkcorp
,
614 /* Write to PTPSCHTM */
615 static int sja1105_tas_set_base_time(struct sja1105_private
*priv
,
618 const struct sja1105_regs
*regs
= priv
->info
->regs
;
619 u64 ptpschtm
= ns_to_sja1105_ticks(base_time
);
621 return sja1105_xfer_u64(priv
, SPI_WRITE
, regs
->ptpschtm
,
625 static int sja1105_tas_start(struct sja1105_private
*priv
)
627 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
628 struct sja1105_ptp_cmd
*cmd
= &priv
->ptp_data
.cmd
;
629 struct dsa_switch
*ds
= priv
->ds
;
632 dev_dbg(ds
->dev
, "Starting the TAS\n");
634 if (tas_data
->state
== SJA1105_TAS_STATE_ENABLED_NOT_RUNNING
||
635 tas_data
->state
== SJA1105_TAS_STATE_RUNNING
) {
636 dev_err(ds
->dev
, "TAS already started\n");
643 rc
= sja1105_ptp_commit(ds
, cmd
, SPI_WRITE
);
647 tas_data
->state
= SJA1105_TAS_STATE_ENABLED_NOT_RUNNING
;
652 static int sja1105_tas_stop(struct sja1105_private
*priv
)
654 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
655 struct sja1105_ptp_cmd
*cmd
= &priv
->ptp_data
.cmd
;
656 struct dsa_switch
*ds
= priv
->ds
;
659 dev_dbg(ds
->dev
, "Stopping the TAS\n");
661 if (tas_data
->state
== SJA1105_TAS_STATE_DISABLED
) {
662 dev_err(ds
->dev
, "TAS already disabled\n");
669 rc
= sja1105_ptp_commit(ds
, cmd
, SPI_WRITE
);
673 tas_data
->state
= SJA1105_TAS_STATE_DISABLED
;
678 /* The schedule engine and the PTP clock are driven by the same oscillator, and
679 * they run in parallel. But whilst the PTP clock can keep an absolute
680 * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
681 * up a delta, which is 200ns), and wrapping around at the end of each cycle.
682 * The schedule engine is started when the PTP clock reaches the PTPSCHTM time
684 * Because the PTP clock can be rate-corrected (accelerated or slowed down) by
685 * a software servo, and the schedule engine clock runs in parallel to the PTP
686 * clock, there is logic internal to the switch that periodically keeps the
687 * schedule engine from drifting away. The frequency with which this internal
688 * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
689 * a value also in the PTP clock domain, and is also rate-corrected.
690 * To be precise, during a correction period, there is logic to determine by
691 * how many scheduler clock ticks has the PTP clock drifted. At the end of each
692 * correction period/beginning of new one, the length of a delta is shrunk or
693 * expanded with an integer number of ticks, compared with the typical 25.
694 * So a delta lasts for 200ns (or 25 ticks) only on average.
695 * Sometimes it is longer, sometimes it is shorter. The internal syntonization
696 * logic can adjust for at most 5 ticks each 20 ticks.
698 * The first implication is that you should choose your schedule correction
699 * period to be an integer multiple of the schedule length. Preferably one.
700 * In case there are schedules of multiple ports active, then the correction
701 * period needs to be a multiple of them all. Given the restriction that the
702 * cycle times have to be multiples of one another anyway, this means the
703 * correction period can simply be the largest cycle time, hence the current
704 * choice. This way, the updates are always synchronous to the transmission
705 * cycle, and therefore predictable.
707 * The second implication is that at the beginning of a correction period, the
708 * first few deltas will be modulated in time, until the schedule engine is
709 * properly phase-aligned with the PTP clock. For this reason, you should place
710 * your best-effort traffic at the beginning of a cycle, and your
711 * time-triggered traffic afterwards.
713 * The third implication is that once the schedule engine is started, it can
714 * only adjust for so much drift within a correction period. In the servo you
715 * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
716 * want to do the latter, you need to stop and restart the schedule engine,
717 * which is what the state machine handles.
719 static void sja1105_tas_state_machine(struct work_struct
*work
)
721 struct sja1105_tas_data
*tas_data
= work_to_sja1105_tas(work
);
722 struct sja1105_private
*priv
= tas_to_sja1105(tas_data
);
723 struct sja1105_ptp_data
*ptp_data
= &priv
->ptp_data
;
724 struct timespec64 base_time_ts
, now_ts
;
725 struct dsa_switch
*ds
= priv
->ds
;
726 struct timespec64 diff
;
730 mutex_lock(&ptp_data
->lock
);
732 switch (tas_data
->state
) {
733 case SJA1105_TAS_STATE_DISABLED
:
734 /* Can't do anything at all if clock is still being stepped */
735 if (tas_data
->last_op
!= SJA1105_PTP_ADJUSTFREQ
)
738 rc
= sja1105_tas_adjust_drift(priv
, tas_data
->max_cycle_time
);
742 rc
= __sja1105_ptp_gettimex(ds
, &now
, NULL
);
746 /* Plan to start the earliest schedule first. The others
747 * will be started in hardware, by way of their respective
748 * entry points delta.
749 * Try our best to avoid fringe cases (race condition between
750 * ptpschtm and ptpstrtsch) by pushing the oper_base_time at
751 * least one second in the future from now. This is not ideal,
752 * but this only needs to buy us time until the
753 * sja1105_tas_start command below gets executed.
755 base_time
= future_base_time(tas_data
->earliest_base_time
,
756 tas_data
->max_cycle_time
,
757 now
+ 1ull * NSEC_PER_SEC
);
758 base_time
-= sja1105_delta_to_ns(1);
760 rc
= sja1105_tas_set_base_time(priv
, base_time
);
764 tas_data
->oper_base_time
= base_time
;
766 rc
= sja1105_tas_start(priv
);
770 base_time_ts
= ns_to_timespec64(base_time
);
771 now_ts
= ns_to_timespec64(now
);
773 dev_dbg(ds
->dev
, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
774 base_time_ts
.tv_sec
, base_time_ts
.tv_nsec
,
775 now_ts
.tv_sec
, now_ts
.tv_nsec
);
779 case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING
:
780 if (tas_data
->last_op
!= SJA1105_PTP_ADJUSTFREQ
) {
781 /* Clock was stepped.. bad news for TAS */
782 sja1105_tas_stop(priv
);
786 /* Check if TAS has actually started, by comparing the
787 * scheduled start time with the SJA1105 PTP clock
789 rc
= __sja1105_ptp_gettimex(ds
, &now
, NULL
);
793 if (now
< tas_data
->oper_base_time
) {
794 /* TAS has not started yet */
795 diff
= ns_to_timespec64(tas_data
->oper_base_time
- now
);
796 dev_dbg(ds
->dev
, "time to start: [%lld.%09ld]",
797 diff
.tv_sec
, diff
.tv_nsec
);
801 /* Time elapsed, what happened? */
802 rc
= sja1105_tas_check_running(priv
);
806 if (tas_data
->state
!= SJA1105_TAS_STATE_RUNNING
)
807 /* TAS has started */
809 "TAS not started despite time elapsed\n");
813 case SJA1105_TAS_STATE_RUNNING
:
814 /* Clock was stepped.. bad news for TAS */
815 if (tas_data
->last_op
!= SJA1105_PTP_ADJUSTFREQ
) {
816 sja1105_tas_stop(priv
);
820 rc
= sja1105_tas_check_running(priv
);
824 if (tas_data
->state
!= SJA1105_TAS_STATE_RUNNING
)
825 dev_err(ds
->dev
, "TAS surprisingly stopped\n");
831 dev_err(ds
->dev
, "TAS in an invalid state (incorrect use of API)!\n");
834 if (rc
&& net_ratelimit())
835 dev_err(ds
->dev
, "An operation returned %d\n", rc
);
837 mutex_unlock(&ptp_data
->lock
);
840 void sja1105_tas_clockstep(struct dsa_switch
*ds
)
842 struct sja1105_private
*priv
= ds
->priv
;
843 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
845 if (!tas_data
->enabled
)
848 tas_data
->last_op
= SJA1105_PTP_CLOCKSTEP
;
849 schedule_work(&tas_data
->tas_work
);
852 void sja1105_tas_adjfreq(struct dsa_switch
*ds
)
854 struct sja1105_private
*priv
= ds
->priv
;
855 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
857 if (!tas_data
->enabled
)
860 /* No reason to schedule the workqueue, nothing changed */
861 if (tas_data
->state
== SJA1105_TAS_STATE_RUNNING
)
864 tas_data
->last_op
= SJA1105_PTP_ADJUSTFREQ
;
865 schedule_work(&tas_data
->tas_work
);
868 void sja1105_tas_setup(struct dsa_switch
*ds
)
870 struct sja1105_private
*priv
= ds
->priv
;
871 struct sja1105_tas_data
*tas_data
= &priv
->tas_data
;
873 INIT_WORK(&tas_data
->tas_work
, sja1105_tas_state_machine
);
874 tas_data
->state
= SJA1105_TAS_STATE_DISABLED
;
875 tas_data
->last_op
= SJA1105_PTP_NONE
;
877 INIT_LIST_HEAD(&tas_data
->gating_cfg
.entries
);
880 void sja1105_tas_teardown(struct dsa_switch
*ds
)
882 struct sja1105_private
*priv
= ds
->priv
;
883 struct tc_taprio_qopt_offload
*offload
;
886 cancel_work_sync(&priv
->tas_data
.tas_work
);
888 for (port
= 0; port
< SJA1105_NUM_PORTS
; port
++) {
889 offload
= priv
->tas_data
.offload
[port
];
893 taprio_offload_free(offload
);