Revert "unicode: Don't special case ignorable code points"
[linux.git] / drivers / net / dsa / sja1105 / sja1105_vl.c
blobb7e95d60a6e4534500d507504f59c4d8aa9b51d1
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2020 NXP
3 */
4 #include <net/tc_act/tc_gate.h>
5 #include <linux/dsa/8021q.h>
6 #include "sja1105_vl.h"
8 #define SJA1105_SIZE_VL_STATUS 8
10 /* Insert into the global gate list, sorted by gate action time. */
11 static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
12 struct sja1105_rule *rule,
13 u8 gate_state, s64 entry_time,
14 struct netlink_ext_ack *extack)
16 struct sja1105_gate_entry *e;
17 int rc;
19 e = kzalloc(sizeof(*e), GFP_KERNEL);
20 if (!e)
21 return -ENOMEM;
23 e->rule = rule;
24 e->gate_state = gate_state;
25 e->interval = entry_time;
27 if (list_empty(&gating_cfg->entries)) {
28 list_add(&e->list, &gating_cfg->entries);
29 } else {
30 struct sja1105_gate_entry *p;
32 list_for_each_entry(p, &gating_cfg->entries, list) {
33 if (p->interval == e->interval) {
34 NL_SET_ERR_MSG_MOD(extack,
35 "Gate conflict");
36 rc = -EBUSY;
37 goto err;
40 if (e->interval < p->interval)
41 break;
43 list_add(&e->list, p->list.prev);
46 gating_cfg->num_entries++;
48 return 0;
49 err:
50 kfree(e);
51 return rc;
54 /* The gate entries contain absolute times in their e->interval field. Convert
55 * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
57 static void
58 sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
59 u64 cycle_time)
61 struct sja1105_gate_entry *last_e;
62 struct sja1105_gate_entry *e;
63 struct list_head *prev;
65 list_for_each_entry(e, &gating_cfg->entries, list) {
66 struct sja1105_gate_entry *p;
68 prev = e->list.prev;
70 if (prev == &gating_cfg->entries)
71 continue;
73 p = list_entry(prev, struct sja1105_gate_entry, list);
74 p->interval = e->interval - p->interval;
76 last_e = list_last_entry(&gating_cfg->entries,
77 struct sja1105_gate_entry, list);
78 last_e->interval = cycle_time - last_e->interval;
81 static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
83 struct sja1105_gate_entry *e, *n;
85 list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
86 list_del(&e->list);
87 kfree(e);
91 static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
92 struct netlink_ext_ack *extack)
94 struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
95 struct sja1105_rule *rule;
96 s64 max_cycle_time = 0;
97 s64 its_base_time = 0;
98 int i, rc = 0;
100 sja1105_free_gating_config(gating_cfg);
102 list_for_each_entry(rule, &priv->flow_block.rules, list) {
103 if (rule->type != SJA1105_RULE_VL)
104 continue;
105 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
106 continue;
108 if (max_cycle_time < rule->vl.cycle_time) {
109 max_cycle_time = rule->vl.cycle_time;
110 its_base_time = rule->vl.base_time;
114 if (!max_cycle_time)
115 return 0;
117 dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
118 max_cycle_time, its_base_time);
120 gating_cfg->base_time = its_base_time;
121 gating_cfg->cycle_time = max_cycle_time;
122 gating_cfg->num_entries = 0;
124 list_for_each_entry(rule, &priv->flow_block.rules, list) {
125 s64 time;
126 s64 rbt;
128 if (rule->type != SJA1105_RULE_VL)
129 continue;
130 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
131 continue;
133 /* Calculate the difference between this gating schedule's
134 * base time, and the base time of the gating schedule with the
135 * longest cycle time. We call it the relative base time (rbt).
137 rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
138 its_base_time);
139 rbt -= its_base_time;
141 time = rbt;
143 for (i = 0; i < rule->vl.num_entries; i++) {
144 u8 gate_state = rule->vl.entries[i].gate_state;
145 s64 entry_time = time;
147 while (entry_time < max_cycle_time) {
148 rc = sja1105_insert_gate_entry(gating_cfg, rule,
149 gate_state,
150 entry_time,
151 extack);
152 if (rc)
153 goto err;
155 entry_time += rule->vl.cycle_time;
157 time += rule->vl.entries[i].interval;
161 sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
163 return 0;
164 err:
165 sja1105_free_gating_config(gating_cfg);
166 return rc;
169 /* The switch flow classification core implements TTEthernet, which 'thinks' in
170 * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
171 * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
172 * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
173 * (Per-Stream Filtering and Policing), which is what the driver is going to be
174 * implementing.
176 * VL Lookup
177 * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
178 * && VLAN PCP | | VLMARKER)
179 * && INGRESS PORT} +---------+ (both fixed)
180 * (exact match, | && DMAC[15:0] == VLID
181 * all specified in rule) | (specified in rule)
182 * v && INGRESS PORT }
183 * ------------
184 * 0 (PSFP) / \ 1 (ARINC664)
185 * +-----------/ VLLUPFORMAT \----------+
186 * | \ (fixed) / |
187 * | \ / |
188 * 0 (forwarding) v ------------ |
189 * ------------ |
190 * / \ 1 (QoS classification) |
191 * +---/ ISCRITICAL \-----------+ |
192 * | \ (per rule) / | |
193 * | \ / VLID taken from VLID taken from
194 * v ------------ index of rule contents of rule
195 * select that matched that matched
196 * DESTPORTS | |
197 * | +---------+--------+
198 * | |
199 * | v
200 * | VL Forwarding
201 * | (indexed by VLID)
202 * | +---------+
203 * | +--------------| |
204 * | | select TYPE +---------+
205 * | v
206 * | 0 (rate ------------ 1 (time
207 * | constrained) / \ triggered)
208 * | +------/ TYPE \------------+
209 * | | \ (per VLID) / |
210 * | v \ / v
211 * | VL Policing ------------ VL Policing
212 * | (indexed by VLID) (indexed by VLID)
213 * | +---------+ +---------+
214 * | | TYPE=0 | | TYPE=1 |
215 * | +---------+ +---------+
216 * | select SHARINDX select SHARINDX to
217 * | to rate-limit re-enter VL Forwarding
218 * | groups of VL's with new VLID for egress
219 * | to same quota |
220 * | | |
221 * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
222 * | | |
223 * | v v
224 * | VL Forwarding VL Forwarding
225 * | (indexed by SHARINDX) (indexed by SHARINDX)
226 * | +---------+ +---------+
227 * | | TYPE=0 | | TYPE=1 |
228 * | +---------+ +---------+
229 * | select PRIORITY, select PRIORITY,
230 * | PARTITION, DESTPORTS PARTITION, DESTPORTS
231 * | | |
232 * | v v
233 * | VL Policing VL Policing
234 * | (indexed by SHARINDX) (indexed by SHARINDX)
235 * | +---------+ +---------+
236 * | | TYPE=0 | | TYPE=1 |
237 * | +---------+ +---------+
238 * | | |
239 * | v |
240 * | select BAG, -> exceed => drop |
241 * | JITTER v
242 * | | ----------------------------------------------
243 * | | / Reception Window is open for this VL \
244 * | | / (the Schedule Table executes an entry i \
245 * | | / M <= i < N, for which these conditions hold): \ no
246 * | | +----/ \-+
247 * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
248 * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
249 * | | | \ / |
250 * | | | \ (the VL window has opened and not yet closed)/ |
251 * | | | ---------------------------------------------- |
252 * | | v v
253 * | | dispatch to DESTPORTS when the Schedule Table drop
254 * | | executes an entry i with TXEN == 1 && VLINDEX == i
255 * v v
256 * dispatch immediately to DESTPORTS
258 * The per-port classification key is always composed of {DMAC, VID, PCP} and
259 * is non-maskable. This 'looks like' the NULL stream identification function
260 * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
261 * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
262 * ID and PCP, and then the port-based defaults will be used.
264 * In TTEthernet, routing is something that needs to be done manually for each
265 * Virtual Link. So the flow action must always include one of:
266 * a. 'redirect', 'trap' or 'drop': select the egress port list
267 * Additionally, the following actions may be applied on a Virtual Link,
268 * turning it into 'critical' traffic:
269 * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
270 * given by the maximum frame length, bandwidth allocation gap (BAG) and
271 * maximum jitter.
272 * c. 'gate': turn it into a time-triggered VL, which can be only be received
273 * and forwarded according to a given schedule.
276 static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
277 struct sja1105_vl_lookup_entry *b)
279 if (a->macaddr < b->macaddr)
280 return true;
281 if (a->macaddr > b->macaddr)
282 return false;
283 if (a->vlanid < b->vlanid)
284 return true;
285 if (a->vlanid > b->vlanid)
286 return false;
287 if (a->port < b->port)
288 return true;
289 if (a->port > b->port)
290 return false;
291 if (a->vlanprior < b->vlanprior)
292 return true;
293 if (a->vlanprior > b->vlanprior)
294 return false;
295 /* Keys are equal */
296 return false;
299 /* FIXME: this should change when the bridge upper of the port changes. */
300 static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
302 unsigned long bridge_num;
304 if (!dp->bridge)
305 return dsa_tag_8021q_standalone_vid(dp);
307 bridge_num = dsa_port_bridge_num_get(dp);
309 return dsa_tag_8021q_bridge_vid(bridge_num);
312 static int sja1105_init_virtual_links(struct sja1105_private *priv,
313 struct netlink_ext_ack *extack)
315 struct sja1105_vl_policing_entry *vl_policing;
316 struct sja1105_vl_forwarding_entry *vl_fwd;
317 struct sja1105_vl_lookup_entry *vl_lookup;
318 bool have_critical_virtual_links = false;
319 struct sja1105_table *table;
320 struct sja1105_rule *rule;
321 int num_virtual_links = 0;
322 int max_sharindx = 0;
323 int i, j, k;
325 /* Figure out the dimensioning of the problem */
326 list_for_each_entry(rule, &priv->flow_block.rules, list) {
327 if (rule->type != SJA1105_RULE_VL)
328 continue;
329 /* Each VL lookup entry matches on a single ingress port */
330 num_virtual_links += hweight_long(rule->port_mask);
332 if (rule->vl.type != SJA1105_VL_NONCRITICAL)
333 have_critical_virtual_links = true;
334 if (max_sharindx < rule->vl.sharindx)
335 max_sharindx = rule->vl.sharindx;
338 if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
339 NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
340 return -ENOSPC;
343 if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
344 NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
345 return -ENOSPC;
348 max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
350 /* Discard previous VL Lookup Table */
351 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
352 if (table->entry_count) {
353 kfree(table->entries);
354 table->entry_count = 0;
357 /* Discard previous VL Policing Table */
358 table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
359 if (table->entry_count) {
360 kfree(table->entries);
361 table->entry_count = 0;
364 /* Discard previous VL Forwarding Table */
365 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
366 if (table->entry_count) {
367 kfree(table->entries);
368 table->entry_count = 0;
371 /* Discard previous VL Forwarding Parameters Table */
372 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
373 if (table->entry_count) {
374 kfree(table->entries);
375 table->entry_count = 0;
378 /* Nothing to do */
379 if (!num_virtual_links)
380 return 0;
382 /* Pre-allocate space in the static config tables */
384 /* VL Lookup Table */
385 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
386 table->entries = kcalloc(num_virtual_links,
387 table->ops->unpacked_entry_size,
388 GFP_KERNEL);
389 if (!table->entries)
390 return -ENOMEM;
391 table->entry_count = num_virtual_links;
392 vl_lookup = table->entries;
394 k = 0;
396 list_for_each_entry(rule, &priv->flow_block.rules, list) {
397 unsigned long port;
399 if (rule->type != SJA1105_RULE_VL)
400 continue;
402 for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
403 vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
404 vl_lookup[k].port = port;
405 vl_lookup[k].macaddr = rule->key.vl.dmac;
406 if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
407 vl_lookup[k].vlanid = rule->key.vl.vid;
408 vl_lookup[k].vlanprior = rule->key.vl.pcp;
409 } else {
410 /* FIXME */
411 struct dsa_port *dp = dsa_to_port(priv->ds, port);
412 u16 vid = sja1105_port_get_tag_8021q_vid(dp);
414 vl_lookup[k].vlanid = vid;
415 vl_lookup[k].vlanprior = 0;
417 /* For critical VLs, the DESTPORTS mask is taken from
418 * the VL Forwarding Table, so no point in putting it
419 * in the VL Lookup Table
421 if (rule->vl.type == SJA1105_VL_NONCRITICAL)
422 vl_lookup[k].destports = rule->vl.destports;
423 else
424 vl_lookup[k].iscritical = true;
425 vl_lookup[k].flow_cookie = rule->cookie;
426 k++;
430 /* UM10944.pdf chapter 4.2.3 VL Lookup table:
431 * "the entries in the VL Lookup table must be sorted in ascending
432 * order (i.e. the smallest value must be loaded first) according to
433 * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
435 for (i = 0; i < num_virtual_links; i++) {
436 struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
438 for (j = i + 1; j < num_virtual_links; j++) {
439 struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
441 if (sja1105_vl_key_lower(b, a)) {
442 struct sja1105_vl_lookup_entry tmp = *a;
444 *a = *b;
445 *b = tmp;
450 if (!have_critical_virtual_links)
451 return 0;
453 /* VL Policing Table */
454 table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
455 table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
456 GFP_KERNEL);
457 if (!table->entries)
458 return -ENOMEM;
459 table->entry_count = max_sharindx;
460 vl_policing = table->entries;
462 /* VL Forwarding Table */
463 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
464 table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
465 GFP_KERNEL);
466 if (!table->entries)
467 return -ENOMEM;
468 table->entry_count = max_sharindx;
469 vl_fwd = table->entries;
471 /* VL Forwarding Parameters Table */
472 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
473 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
474 GFP_KERNEL);
475 if (!table->entries)
476 return -ENOMEM;
477 table->entry_count = 1;
479 for (i = 0; i < num_virtual_links; i++) {
480 unsigned long cookie = vl_lookup[i].flow_cookie;
481 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
483 if (rule->vl.type == SJA1105_VL_NONCRITICAL)
484 continue;
485 if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
486 int sharindx = rule->vl.sharindx;
488 vl_policing[i].type = 1;
489 vl_policing[i].sharindx = sharindx;
490 vl_policing[i].maxlen = rule->vl.maxlen;
491 vl_policing[sharindx].type = 1;
493 vl_fwd[i].type = 1;
494 vl_fwd[sharindx].type = 1;
495 vl_fwd[sharindx].priority = rule->vl.ipv;
496 vl_fwd[sharindx].partition = 0;
497 vl_fwd[sharindx].destports = rule->vl.destports;
501 sja1105_frame_memory_partitioning(priv);
503 return 0;
506 int sja1105_vl_redirect(struct sja1105_private *priv, int port,
507 struct netlink_ext_ack *extack, unsigned long cookie,
508 struct sja1105_key *key, unsigned long destports,
509 bool append)
511 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
512 struct dsa_port *dp = dsa_to_port(priv->ds, port);
513 bool vlan_aware = dsa_port_is_vlan_filtering(dp);
514 int rc;
516 if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
517 NL_SET_ERR_MSG_MOD(extack,
518 "Can only redirect based on DMAC");
519 return -EOPNOTSUPP;
520 } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
521 NL_SET_ERR_MSG_MOD(extack,
522 "Can only redirect based on {DMAC, VID, PCP}");
523 return -EOPNOTSUPP;
526 if (!rule) {
527 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
528 if (!rule)
529 return -ENOMEM;
531 rule->cookie = cookie;
532 rule->type = SJA1105_RULE_VL;
533 rule->key = *key;
534 list_add(&rule->list, &priv->flow_block.rules);
537 rule->port_mask |= BIT(port);
538 if (append)
539 rule->vl.destports |= destports;
540 else
541 rule->vl.destports = destports;
543 rc = sja1105_init_virtual_links(priv, extack);
544 if (rc) {
545 rule->port_mask &= ~BIT(port);
546 if (!rule->port_mask) {
547 list_del(&rule->list);
548 kfree(rule);
552 return rc;
555 int sja1105_vl_delete(struct sja1105_private *priv, int port,
556 struct sja1105_rule *rule, struct netlink_ext_ack *extack)
558 int rc;
560 rule->port_mask &= ~BIT(port);
561 if (!rule->port_mask) {
562 list_del(&rule->list);
563 kfree(rule);
566 rc = sja1105_compose_gating_subschedule(priv, extack);
567 if (rc)
568 return rc;
570 rc = sja1105_init_virtual_links(priv, extack);
571 if (rc)
572 return rc;
574 rc = sja1105_init_scheduling(priv);
575 if (rc < 0)
576 return rc;
578 return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
581 int sja1105_vl_gate(struct sja1105_private *priv, int port,
582 struct netlink_ext_ack *extack, unsigned long cookie,
583 struct sja1105_key *key, u32 index, s32 prio,
584 u64 base_time, u64 cycle_time, u64 cycle_time_ext,
585 u32 num_entries, struct action_gate_entry *entries)
587 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
588 struct dsa_port *dp = dsa_to_port(priv->ds, port);
589 bool vlan_aware = dsa_port_is_vlan_filtering(dp);
590 int ipv = -1;
591 int i, rc;
592 s32 rem;
594 if (cycle_time_ext) {
595 NL_SET_ERR_MSG_MOD(extack,
596 "Cycle time extension not supported");
597 return -EOPNOTSUPP;
600 div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
601 if (rem) {
602 NL_SET_ERR_MSG_MOD(extack,
603 "Base time must be multiple of 200 ns");
604 return -ERANGE;
607 div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
608 if (rem) {
609 NL_SET_ERR_MSG_MOD(extack,
610 "Cycle time must be multiple of 200 ns");
611 return -ERANGE;
614 if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
615 NL_SET_ERR_MSG_MOD(extack,
616 "Can only gate based on DMAC");
617 return -EOPNOTSUPP;
618 } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
619 NL_SET_ERR_MSG_MOD(extack,
620 "Can only gate based on {DMAC, VID, PCP}");
621 return -EOPNOTSUPP;
624 if (!rule) {
625 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
626 if (!rule)
627 return -ENOMEM;
629 list_add(&rule->list, &priv->flow_block.rules);
630 rule->cookie = cookie;
631 rule->type = SJA1105_RULE_VL;
632 rule->key = *key;
633 rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
634 rule->vl.sharindx = index;
635 rule->vl.base_time = base_time;
636 rule->vl.cycle_time = cycle_time;
637 rule->vl.num_entries = num_entries;
638 rule->vl.entries = kcalloc(num_entries,
639 sizeof(struct action_gate_entry),
640 GFP_KERNEL);
641 if (!rule->vl.entries) {
642 rc = -ENOMEM;
643 goto out;
646 for (i = 0; i < num_entries; i++) {
647 div_s64_rem(entries[i].interval,
648 sja1105_delta_to_ns(1), &rem);
649 if (rem) {
650 NL_SET_ERR_MSG_MOD(extack,
651 "Interval must be multiple of 200 ns");
652 rc = -ERANGE;
653 goto out;
656 if (!entries[i].interval) {
657 NL_SET_ERR_MSG_MOD(extack,
658 "Interval cannot be zero");
659 rc = -ERANGE;
660 goto out;
663 if (ns_to_sja1105_delta(entries[i].interval) >
664 SJA1105_TAS_MAX_DELTA) {
665 NL_SET_ERR_MSG_MOD(extack,
666 "Maximum interval is 52 ms");
667 rc = -ERANGE;
668 goto out;
671 if (entries[i].maxoctets != -1) {
672 NL_SET_ERR_MSG_MOD(extack,
673 "Cannot offload IntervalOctetMax");
674 rc = -EOPNOTSUPP;
675 goto out;
678 if (ipv == -1) {
679 ipv = entries[i].ipv;
680 } else if (ipv != entries[i].ipv) {
681 NL_SET_ERR_MSG_MOD(extack,
682 "Only support a single IPV per VL");
683 rc = -EOPNOTSUPP;
684 goto out;
687 rule->vl.entries[i] = entries[i];
690 if (ipv == -1) {
691 if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
692 ipv = key->vl.pcp;
693 else
694 ipv = 0;
697 /* TODO: support per-flow MTU */
698 rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
699 rule->vl.ipv = ipv;
702 rule->port_mask |= BIT(port);
704 rc = sja1105_compose_gating_subschedule(priv, extack);
705 if (rc)
706 goto out;
708 rc = sja1105_init_virtual_links(priv, extack);
709 if (rc)
710 goto out;
712 if (sja1105_gating_check_conflicts(priv, -1, extack)) {
713 NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
714 rc = -ERANGE;
715 goto out;
718 out:
719 if (rc) {
720 rule->port_mask &= ~BIT(port);
721 if (!rule->port_mask) {
722 list_del(&rule->list);
723 kfree(rule->vl.entries);
724 kfree(rule);
728 return rc;
731 static int sja1105_find_vlid(struct sja1105_private *priv, int port,
732 struct sja1105_key *key)
734 struct sja1105_vl_lookup_entry *vl_lookup;
735 struct sja1105_table *table;
736 int i;
738 if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
739 key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
740 return -1;
742 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
743 vl_lookup = table->entries;
745 for (i = 0; i < table->entry_count; i++) {
746 if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
747 if (vl_lookup[i].port == port &&
748 vl_lookup[i].macaddr == key->vl.dmac &&
749 vl_lookup[i].vlanid == key->vl.vid &&
750 vl_lookup[i].vlanprior == key->vl.pcp)
751 return i;
752 } else {
753 if (vl_lookup[i].port == port &&
754 vl_lookup[i].macaddr == key->vl.dmac)
755 return i;
759 return -1;
762 int sja1105_vl_stats(struct sja1105_private *priv, int port,
763 struct sja1105_rule *rule, struct flow_stats *stats,
764 struct netlink_ext_ack *extack)
766 const struct sja1105_regs *regs = priv->info->regs;
767 u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
768 u64 unreleased;
769 u64 timingerr;
770 u64 lengtherr;
771 int vlid, rc;
772 u64 pkts;
774 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
775 return 0;
777 vlid = sja1105_find_vlid(priv, port, &rule->key);
778 if (vlid < 0)
779 return 0;
781 rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
782 SJA1105_SIZE_VL_STATUS);
783 if (rc) {
784 NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
785 return rc;
788 sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
789 sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
790 sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
792 pkts = timingerr + unreleased + lengtherr;
794 flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0,
795 jiffies - rule->vl.stats.lastused,
796 FLOW_ACTION_HW_STATS_IMMEDIATE);
798 rule->vl.stats.pkts = pkts;
799 rule->vl.stats.lastused = jiffies;
801 return 0;