1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3 * Copyright (C) 2020 Marvell International Ltd.
8 #include <linux/rtnetlink.h>
10 #include "macsec/macsec_api.h"
11 #define AQ_MACSEC_KEY_LEN_128_BIT 16
12 #define AQ_MACSEC_KEY_LEN_192_BIT 24
13 #define AQ_MACSEC_KEY_LEN_256_BIT 32
16 /* update HW configuration */
18 /* update SW configuration (busy bits, pointers) */
20 /* update both HW and SW configuration */
21 AQ_CLEAR_ALL
= AQ_CLEAR_HW
| AQ_CLEAR_SW
,
24 static int aq_clear_txsc(struct aq_nic_s
*nic
, const int txsc_idx
,
25 enum aq_clear_type clear_type
);
26 static int aq_clear_txsa(struct aq_nic_s
*nic
, struct aq_macsec_txsc
*aq_txsc
,
27 const int sa_num
, enum aq_clear_type clear_type
);
28 static int aq_clear_rxsc(struct aq_nic_s
*nic
, const int rxsc_idx
,
29 enum aq_clear_type clear_type
);
30 static int aq_clear_rxsa(struct aq_nic_s
*nic
, struct aq_macsec_rxsc
*aq_rxsc
,
31 const int sa_num
, enum aq_clear_type clear_type
);
32 static int aq_clear_secy(struct aq_nic_s
*nic
, const struct macsec_secy
*secy
,
33 enum aq_clear_type clear_type
);
34 static int aq_apply_macsec_cfg(struct aq_nic_s
*nic
);
35 static int aq_apply_secy_cfg(struct aq_nic_s
*nic
,
36 const struct macsec_secy
*secy
);
38 static void aq_ether_addr_to_mac(u32 mac
[2], unsigned char *emac
)
42 memcpy(((u8
*)tmp
) + 2, emac
, ETH_ALEN
);
44 mac
[0] = swab32(tmp
[1]);
45 mac
[1] = swab32(tmp
[0]);
48 /* There's a 1:1 mapping between SecY and TX SC */
49 static int aq_get_txsc_idx_from_secy(struct aq_macsec_cfg
*macsec_cfg
,
50 const struct macsec_secy
*secy
)
57 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
58 if (macsec_cfg
->aq_txsc
[i
].sw_secy
== secy
)
64 static int aq_get_rxsc_idx_from_rxsc(struct aq_macsec_cfg
*macsec_cfg
,
65 const struct macsec_rx_sc
*rxsc
)
72 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
73 if (macsec_cfg
->aq_rxsc
[i
].sw_rxsc
== rxsc
)
80 static int aq_get_txsc_idx_from_sc_idx(const enum aq_macsec_sc_sa sc_sa
,
84 case aq_macsec_sa_sc_4sa_8sc
:
86 case aq_macsec_sa_sc_2sa_16sc
:
88 case aq_macsec_sa_sc_1sa_32sc
:
91 WARN_ONCE(true, "Invalid sc_sa");
96 /* Rotate keys u32[8] */
97 static void aq_rotate_keys(u32 (*key
)[8], const int key_len
)
101 memcpy(&tmp
, key
, sizeof(tmp
));
102 memset(*key
, 0, sizeof(*key
));
104 if (key_len
== AQ_MACSEC_KEY_LEN_128_BIT
) {
105 (*key
)[0] = swab32(tmp
[3]);
106 (*key
)[1] = swab32(tmp
[2]);
107 (*key
)[2] = swab32(tmp
[1]);
108 (*key
)[3] = swab32(tmp
[0]);
109 } else if (key_len
== AQ_MACSEC_KEY_LEN_192_BIT
) {
110 (*key
)[0] = swab32(tmp
[5]);
111 (*key
)[1] = swab32(tmp
[4]);
112 (*key
)[2] = swab32(tmp
[3]);
113 (*key
)[3] = swab32(tmp
[2]);
114 (*key
)[4] = swab32(tmp
[1]);
115 (*key
)[5] = swab32(tmp
[0]);
116 } else if (key_len
== AQ_MACSEC_KEY_LEN_256_BIT
) {
117 (*key
)[0] = swab32(tmp
[7]);
118 (*key
)[1] = swab32(tmp
[6]);
119 (*key
)[2] = swab32(tmp
[5]);
120 (*key
)[3] = swab32(tmp
[4]);
121 (*key
)[4] = swab32(tmp
[3]);
122 (*key
)[5] = swab32(tmp
[2]);
123 (*key
)[6] = swab32(tmp
[1]);
124 (*key
)[7] = swab32(tmp
[0]);
126 pr_warn("Rotate_keys: invalid key_len\n");
130 #define STATS_2x32_TO_64(stat_field) \
131 (((u64)stat_field[1] << 32) | stat_field[0])
133 static int aq_get_macsec_common_stats(struct aq_hw_s
*hw
,
134 struct aq_macsec_common_stats
*stats
)
136 struct aq_mss_ingress_common_counters ingress_counters
;
137 struct aq_mss_egress_common_counters egress_counters
;
140 /* MACSEC counters */
141 ret
= aq_mss_get_ingress_common_counters(hw
, &ingress_counters
);
145 stats
->in
.ctl_pkts
= STATS_2x32_TO_64(ingress_counters
.ctl_pkts
);
146 stats
->in
.tagged_miss_pkts
=
147 STATS_2x32_TO_64(ingress_counters
.tagged_miss_pkts
);
148 stats
->in
.untagged_miss_pkts
=
149 STATS_2x32_TO_64(ingress_counters
.untagged_miss_pkts
);
150 stats
->in
.notag_pkts
= STATS_2x32_TO_64(ingress_counters
.notag_pkts
);
151 stats
->in
.untagged_pkts
=
152 STATS_2x32_TO_64(ingress_counters
.untagged_pkts
);
153 stats
->in
.bad_tag_pkts
=
154 STATS_2x32_TO_64(ingress_counters
.bad_tag_pkts
);
155 stats
->in
.no_sci_pkts
= STATS_2x32_TO_64(ingress_counters
.no_sci_pkts
);
156 stats
->in
.unknown_sci_pkts
=
157 STATS_2x32_TO_64(ingress_counters
.unknown_sci_pkts
);
158 stats
->in
.ctrl_prt_pass_pkts
=
159 STATS_2x32_TO_64(ingress_counters
.ctrl_prt_pass_pkts
);
160 stats
->in
.unctrl_prt_pass_pkts
=
161 STATS_2x32_TO_64(ingress_counters
.unctrl_prt_pass_pkts
);
162 stats
->in
.ctrl_prt_fail_pkts
=
163 STATS_2x32_TO_64(ingress_counters
.ctrl_prt_fail_pkts
);
164 stats
->in
.unctrl_prt_fail_pkts
=
165 STATS_2x32_TO_64(ingress_counters
.unctrl_prt_fail_pkts
);
166 stats
->in
.too_long_pkts
=
167 STATS_2x32_TO_64(ingress_counters
.too_long_pkts
);
168 stats
->in
.igpoc_ctl_pkts
=
169 STATS_2x32_TO_64(ingress_counters
.igpoc_ctl_pkts
);
170 stats
->in
.ecc_error_pkts
=
171 STATS_2x32_TO_64(ingress_counters
.ecc_error_pkts
);
172 stats
->in
.unctrl_hit_drop_redir
=
173 STATS_2x32_TO_64(ingress_counters
.unctrl_hit_drop_redir
);
175 ret
= aq_mss_get_egress_common_counters(hw
, &egress_counters
);
178 stats
->out
.ctl_pkts
= STATS_2x32_TO_64(egress_counters
.ctl_pkt
);
179 stats
->out
.unknown_sa_pkts
=
180 STATS_2x32_TO_64(egress_counters
.unknown_sa_pkts
);
181 stats
->out
.untagged_pkts
=
182 STATS_2x32_TO_64(egress_counters
.untagged_pkts
);
183 stats
->out
.too_long
= STATS_2x32_TO_64(egress_counters
.too_long
);
184 stats
->out
.ecc_error_pkts
=
185 STATS_2x32_TO_64(egress_counters
.ecc_error_pkts
);
186 stats
->out
.unctrl_hit_drop_redir
=
187 STATS_2x32_TO_64(egress_counters
.unctrl_hit_drop_redir
);
192 static int aq_get_rxsa_stats(struct aq_hw_s
*hw
, const int sa_idx
,
193 struct aq_macsec_rx_sa_stats
*stats
)
195 struct aq_mss_ingress_sa_counters i_sa_counters
;
198 ret
= aq_mss_get_ingress_sa_counters(hw
, &i_sa_counters
, sa_idx
);
202 stats
->untagged_hit_pkts
=
203 STATS_2x32_TO_64(i_sa_counters
.untagged_hit_pkts
);
204 stats
->ctrl_hit_drop_redir_pkts
=
205 STATS_2x32_TO_64(i_sa_counters
.ctrl_hit_drop_redir_pkts
);
206 stats
->not_using_sa
= STATS_2x32_TO_64(i_sa_counters
.not_using_sa
);
207 stats
->unused_sa
= STATS_2x32_TO_64(i_sa_counters
.unused_sa
);
208 stats
->not_valid_pkts
= STATS_2x32_TO_64(i_sa_counters
.not_valid_pkts
);
209 stats
->invalid_pkts
= STATS_2x32_TO_64(i_sa_counters
.invalid_pkts
);
210 stats
->ok_pkts
= STATS_2x32_TO_64(i_sa_counters
.ok_pkts
);
211 stats
->late_pkts
= STATS_2x32_TO_64(i_sa_counters
.late_pkts
);
212 stats
->delayed_pkts
= STATS_2x32_TO_64(i_sa_counters
.delayed_pkts
);
213 stats
->unchecked_pkts
= STATS_2x32_TO_64(i_sa_counters
.unchecked_pkts
);
214 stats
->validated_octets
=
215 STATS_2x32_TO_64(i_sa_counters
.validated_octets
);
216 stats
->decrypted_octets
=
217 STATS_2x32_TO_64(i_sa_counters
.decrypted_octets
);
222 static int aq_get_txsa_stats(struct aq_hw_s
*hw
, const int sa_idx
,
223 struct aq_macsec_tx_sa_stats
*stats
)
225 struct aq_mss_egress_sa_counters e_sa_counters
;
228 ret
= aq_mss_get_egress_sa_counters(hw
, &e_sa_counters
, sa_idx
);
232 stats
->sa_hit_drop_redirect
=
233 STATS_2x32_TO_64(e_sa_counters
.sa_hit_drop_redirect
);
234 stats
->sa_protected2_pkts
=
235 STATS_2x32_TO_64(e_sa_counters
.sa_protected2_pkts
);
236 stats
->sa_protected_pkts
=
237 STATS_2x32_TO_64(e_sa_counters
.sa_protected_pkts
);
238 stats
->sa_encrypted_pkts
=
239 STATS_2x32_TO_64(e_sa_counters
.sa_encrypted_pkts
);
244 static int aq_get_txsa_next_pn(struct aq_hw_s
*hw
, const int sa_idx
, u32
*pn
)
246 struct aq_mss_egress_sa_record sa_rec
;
249 ret
= aq_mss_get_egress_sa_record(hw
, &sa_rec
, sa_idx
);
251 *pn
= sa_rec
.next_pn
;
256 static int aq_get_rxsa_next_pn(struct aq_hw_s
*hw
, const int sa_idx
, u32
*pn
)
258 struct aq_mss_ingress_sa_record sa_rec
;
261 ret
= aq_mss_get_ingress_sa_record(hw
, &sa_rec
, sa_idx
);
263 *pn
= (!sa_rec
.sat_nextpn
) ? sa_rec
.next_pn
: 0;
268 static int aq_get_txsc_stats(struct aq_hw_s
*hw
, const int sc_idx
,
269 struct aq_macsec_tx_sc_stats
*stats
)
271 struct aq_mss_egress_sc_counters e_sc_counters
;
274 ret
= aq_mss_get_egress_sc_counters(hw
, &e_sc_counters
, sc_idx
);
278 stats
->sc_protected_pkts
=
279 STATS_2x32_TO_64(e_sc_counters
.sc_protected_pkts
);
280 stats
->sc_encrypted_pkts
=
281 STATS_2x32_TO_64(e_sc_counters
.sc_encrypted_pkts
);
282 stats
->sc_protected_octets
=
283 STATS_2x32_TO_64(e_sc_counters
.sc_protected_octets
);
284 stats
->sc_encrypted_octets
=
285 STATS_2x32_TO_64(e_sc_counters
.sc_encrypted_octets
);
290 static int aq_mdo_dev_open(struct macsec_context
*ctx
)
292 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
298 if (netif_carrier_ok(nic
->ndev
))
299 ret
= aq_apply_secy_cfg(nic
, ctx
->secy
);
304 static int aq_mdo_dev_stop(struct macsec_context
*ctx
)
306 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
312 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
313 if (nic
->macsec_cfg
->txsc_idx_busy
& BIT(i
))
314 aq_clear_secy(nic
, nic
->macsec_cfg
->aq_txsc
[i
].sw_secy
,
321 static int aq_set_txsc(struct aq_nic_s
*nic
, const int txsc_idx
)
323 struct aq_macsec_txsc
*aq_txsc
= &nic
->macsec_cfg
->aq_txsc
[txsc_idx
];
324 struct aq_mss_egress_class_record tx_class_rec
= { 0 };
325 const struct macsec_secy
*secy
= aq_txsc
->sw_secy
;
326 struct aq_mss_egress_sc_record sc_rec
= { 0 };
327 unsigned int sc_idx
= aq_txsc
->hw_sc_idx
;
328 struct aq_hw_s
*hw
= nic
->aq_hw
;
331 aq_ether_addr_to_mac(tx_class_rec
.mac_sa
, secy
->netdev
->dev_addr
);
333 put_unaligned_be64((__force u64
)secy
->sci
, tx_class_rec
.sci
);
334 tx_class_rec
.sci_mask
= 0;
336 tx_class_rec
.sa_mask
= 0x3f;
338 tx_class_rec
.action
= 0; /* forward to SA/SC table */
339 tx_class_rec
.valid
= 1;
341 tx_class_rec
.sc_idx
= sc_idx
;
343 tx_class_rec
.sc_sa
= nic
->macsec_cfg
->sc_sa
;
345 ret
= aq_mss_set_egress_class_record(hw
, &tx_class_rec
, txsc_idx
);
349 sc_rec
.protect
= secy
->protect_frames
;
350 if (secy
->tx_sc
.encrypt
)
351 sc_rec
.tci
|= BIT(1);
353 sc_rec
.tci
|= BIT(2);
354 if (secy
->tx_sc
.send_sci
)
355 sc_rec
.tci
|= BIT(3);
356 if (secy
->tx_sc
.end_station
)
357 sc_rec
.tci
|= BIT(4);
358 /* The C bit is clear if and only if the Secure Data is
359 * exactly the same as the User Data and the ICV is 16 octets long.
361 if (!(secy
->icv_len
== 16 && !secy
->tx_sc
.encrypt
))
362 sc_rec
.tci
|= BIT(0);
366 switch (secy
->key_len
) {
367 case AQ_MACSEC_KEY_LEN_128_BIT
:
370 case AQ_MACSEC_KEY_LEN_192_BIT
:
373 case AQ_MACSEC_KEY_LEN_256_BIT
:
377 WARN_ONCE(true, "Invalid sc_sa");
381 sc_rec
.curr_an
= secy
->tx_sc
.encoding_sa
;
385 return aq_mss_set_egress_sc_record(hw
, &sc_rec
, sc_idx
);
388 static u32
aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa
)
393 case aq_macsec_sa_sc_4sa_8sc
:
396 case aq_macsec_sa_sc_2sa_16sc
:
399 case aq_macsec_sa_sc_1sa_32sc
:
409 static u32
aq_to_hw_sc_idx(const u32 sc_idx
, const enum aq_macsec_sc_sa sc_sa
)
412 case aq_macsec_sa_sc_4sa_8sc
:
414 case aq_macsec_sa_sc_2sa_16sc
:
416 case aq_macsec_sa_sc_1sa_32sc
:
419 WARN_ONCE(true, "Invalid sc_sa");
425 static enum aq_macsec_sc_sa
sc_sa_from_num_an(const int num_an
)
427 enum aq_macsec_sc_sa sc_sa
= aq_macsec_sa_sc_not_used
;
431 sc_sa
= aq_macsec_sa_sc_4sa_8sc
;
434 sc_sa
= aq_macsec_sa_sc_2sa_16sc
;
437 sc_sa
= aq_macsec_sa_sc_1sa_32sc
;
446 static int aq_mdo_add_secy(struct macsec_context
*ctx
)
448 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
449 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
450 const struct macsec_secy
*secy
= ctx
->secy
;
451 enum aq_macsec_sc_sa sc_sa
;
458 sc_sa
= sc_sa_from_num_an(MACSEC_NUM_AN
);
459 if (sc_sa
== aq_macsec_sa_sc_not_used
)
462 if (hweight32(cfg
->txsc_idx_busy
) >= aq_sc_idx_max(sc_sa
))
465 txsc_idx
= ffz(cfg
->txsc_idx_busy
);
466 if (txsc_idx
== AQ_MACSEC_MAX_SC
)
473 cfg
->aq_txsc
[txsc_idx
].hw_sc_idx
= aq_to_hw_sc_idx(txsc_idx
, sc_sa
);
474 cfg
->aq_txsc
[txsc_idx
].sw_secy
= secy
;
476 if (netif_carrier_ok(nic
->ndev
) && netif_running(secy
->netdev
))
477 ret
= aq_set_txsc(nic
, txsc_idx
);
479 set_bit(txsc_idx
, &cfg
->txsc_idx_busy
);
484 static int aq_mdo_upd_secy(struct macsec_context
*ctx
)
486 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
487 const struct macsec_secy
*secy
= ctx
->secy
;
491 txsc_idx
= aq_get_txsc_idx_from_secy(nic
->macsec_cfg
, secy
);
498 if (netif_carrier_ok(nic
->ndev
) && netif_running(secy
->netdev
))
499 ret
= aq_set_txsc(nic
, txsc_idx
);
504 static int aq_clear_txsc(struct aq_nic_s
*nic
, const int txsc_idx
,
505 enum aq_clear_type clear_type
)
507 struct aq_macsec_txsc
*tx_sc
= &nic
->macsec_cfg
->aq_txsc
[txsc_idx
];
508 struct aq_mss_egress_class_record tx_class_rec
= { 0 };
509 struct aq_mss_egress_sc_record sc_rec
= { 0 };
510 struct aq_hw_s
*hw
= nic
->aq_hw
;
514 for_each_set_bit (sa_num
, &tx_sc
->tx_sa_idx_busy
, AQ_MACSEC_MAX_SA
) {
515 ret
= aq_clear_txsa(nic
, tx_sc
, sa_num
, clear_type
);
520 if (clear_type
& AQ_CLEAR_HW
) {
521 ret
= aq_mss_set_egress_class_record(hw
, &tx_class_rec
,
527 ret
= aq_mss_set_egress_sc_record(hw
, &sc_rec
,
533 if (clear_type
& AQ_CLEAR_SW
) {
534 clear_bit(txsc_idx
, &nic
->macsec_cfg
->txsc_idx_busy
);
535 nic
->macsec_cfg
->aq_txsc
[txsc_idx
].sw_secy
= NULL
;
541 static int aq_mdo_del_secy(struct macsec_context
*ctx
)
543 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
549 if (!nic
->macsec_cfg
)
552 ret
= aq_clear_secy(nic
, ctx
->secy
, AQ_CLEAR_ALL
);
557 static int aq_update_txsa(struct aq_nic_s
*nic
, const unsigned int sc_idx
,
558 const struct macsec_secy
*secy
,
559 const struct macsec_tx_sa
*tx_sa
,
560 const unsigned char *key
, const unsigned char an
)
562 const u32 next_pn
= tx_sa
->next_pn_halves
.lower
;
563 struct aq_mss_egress_sakey_record key_rec
;
564 const unsigned int sa_idx
= sc_idx
| an
;
565 struct aq_mss_egress_sa_record sa_rec
;
566 struct aq_hw_s
*hw
= nic
->aq_hw
;
569 memset(&sa_rec
, 0, sizeof(sa_rec
));
570 sa_rec
.valid
= tx_sa
->active
;
572 sa_rec
.next_pn
= next_pn
;
574 ret
= aq_mss_set_egress_sa_record(hw
, &sa_rec
, sa_idx
);
581 memset(&key_rec
, 0, sizeof(key_rec
));
582 memcpy(&key_rec
.key
, key
, secy
->key_len
);
584 aq_rotate_keys(&key_rec
.key
, secy
->key_len
);
586 ret
= aq_mss_set_egress_sakey_record(hw
, &key_rec
, sa_idx
);
591 static int aq_mdo_add_txsa(struct macsec_context
*ctx
)
593 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
594 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
595 const struct macsec_secy
*secy
= ctx
->secy
;
596 struct aq_macsec_txsc
*aq_txsc
;
600 txsc_idx
= aq_get_txsc_idx_from_secy(cfg
, secy
);
607 aq_txsc
= &cfg
->aq_txsc
[txsc_idx
];
608 set_bit(ctx
->sa
.assoc_num
, &aq_txsc
->tx_sa_idx_busy
);
610 memcpy(aq_txsc
->tx_sa_key
[ctx
->sa
.assoc_num
], ctx
->sa
.key
,
613 if (netif_carrier_ok(nic
->ndev
) && netif_running(secy
->netdev
))
614 ret
= aq_update_txsa(nic
, aq_txsc
->hw_sc_idx
, secy
,
615 ctx
->sa
.tx_sa
, ctx
->sa
.key
,
621 static int aq_mdo_upd_txsa(struct macsec_context
*ctx
)
623 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
624 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
625 const struct macsec_secy
*secy
= ctx
->secy
;
626 struct aq_macsec_txsc
*aq_txsc
;
630 txsc_idx
= aq_get_txsc_idx_from_secy(cfg
, secy
);
637 aq_txsc
= &cfg
->aq_txsc
[txsc_idx
];
638 if (netif_carrier_ok(nic
->ndev
) && netif_running(secy
->netdev
))
639 ret
= aq_update_txsa(nic
, aq_txsc
->hw_sc_idx
, secy
,
640 ctx
->sa
.tx_sa
, NULL
, ctx
->sa
.assoc_num
);
645 static int aq_clear_txsa(struct aq_nic_s
*nic
, struct aq_macsec_txsc
*aq_txsc
,
646 const int sa_num
, enum aq_clear_type clear_type
)
648 const int sa_idx
= aq_txsc
->hw_sc_idx
| sa_num
;
649 struct aq_hw_s
*hw
= nic
->aq_hw
;
652 if (clear_type
& AQ_CLEAR_SW
)
653 clear_bit(sa_num
, &aq_txsc
->tx_sa_idx_busy
);
655 if ((clear_type
& AQ_CLEAR_HW
) && netif_carrier_ok(nic
->ndev
)) {
656 struct aq_mss_egress_sakey_record key_rec
;
657 struct aq_mss_egress_sa_record sa_rec
;
659 memset(&sa_rec
, 0, sizeof(sa_rec
));
662 ret
= aq_mss_set_egress_sa_record(hw
, &sa_rec
, sa_idx
);
666 memset(&key_rec
, 0, sizeof(key_rec
));
667 return aq_mss_set_egress_sakey_record(hw
, &key_rec
, sa_idx
);
673 static int aq_mdo_del_txsa(struct macsec_context
*ctx
)
675 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
676 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
680 txsc_idx
= aq_get_txsc_idx_from_secy(cfg
, ctx
->secy
);
687 ret
= aq_clear_txsa(nic
, &cfg
->aq_txsc
[txsc_idx
], ctx
->sa
.assoc_num
,
693 static int aq_rxsc_validate_frames(const enum macsec_validation_type validate
)
696 case MACSEC_VALIDATE_DISABLED
:
698 case MACSEC_VALIDATE_CHECK
:
700 case MACSEC_VALIDATE_STRICT
:
703 WARN_ONCE(true, "Invalid validation type");
709 static int aq_set_rxsc(struct aq_nic_s
*nic
, const u32 rxsc_idx
)
711 const struct aq_macsec_rxsc
*aq_rxsc
=
712 &nic
->macsec_cfg
->aq_rxsc
[rxsc_idx
];
713 struct aq_mss_ingress_preclass_record pre_class_record
;
714 const struct macsec_rx_sc
*rx_sc
= aq_rxsc
->sw_rxsc
;
715 const struct macsec_secy
*secy
= aq_rxsc
->sw_secy
;
716 const u32 hw_sc_idx
= aq_rxsc
->hw_sc_idx
;
717 struct aq_mss_ingress_sc_record sc_record
;
718 struct aq_hw_s
*hw
= nic
->aq_hw
;
721 memset(&pre_class_record
, 0, sizeof(pre_class_record
));
722 put_unaligned_be64((__force u64
)rx_sc
->sci
, pre_class_record
.sci
);
723 pre_class_record
.sci_mask
= 0xff;
724 /* match all MACSEC ethertype packets */
725 pre_class_record
.eth_type
= ETH_P_MACSEC
;
726 pre_class_record
.eth_type_mask
= 0x3;
728 aq_ether_addr_to_mac(pre_class_record
.mac_sa
, (char *)&rx_sc
->sci
);
729 pre_class_record
.sa_mask
= 0x3f;
731 pre_class_record
.an_mask
= nic
->macsec_cfg
->sc_sa
;
732 pre_class_record
.sc_idx
= hw_sc_idx
;
733 /* strip SecTAG & forward for decryption */
734 pre_class_record
.action
= 0x0;
735 pre_class_record
.valid
= 1;
737 ret
= aq_mss_set_ingress_preclass_record(hw
, &pre_class_record
,
742 /* If SCI is absent, then match by SA alone */
743 pre_class_record
.sci_mask
= 0;
744 pre_class_record
.sci_from_table
= 1;
746 ret
= aq_mss_set_ingress_preclass_record(hw
, &pre_class_record
,
751 memset(&sc_record
, 0, sizeof(sc_record
));
752 sc_record
.validate_frames
=
753 aq_rxsc_validate_frames(secy
->validate_frames
);
754 if (secy
->replay_protect
) {
755 sc_record
.replay_protect
= 1;
756 sc_record
.anti_replay_window
= secy
->replay_window
;
761 ret
= aq_mss_set_ingress_sc_record(hw
, &sc_record
, hw_sc_idx
);
768 static int aq_mdo_add_rxsc(struct macsec_context
*ctx
)
770 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
771 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
772 const u32 rxsc_idx_max
= aq_sc_idx_max(cfg
->sc_sa
);
776 if (hweight32(cfg
->rxsc_idx_busy
) >= rxsc_idx_max
)
779 rxsc_idx
= ffz(cfg
->rxsc_idx_busy
);
780 if (rxsc_idx
>= rxsc_idx_max
)
786 cfg
->aq_rxsc
[rxsc_idx
].hw_sc_idx
= aq_to_hw_sc_idx(rxsc_idx
,
788 cfg
->aq_rxsc
[rxsc_idx
].sw_secy
= ctx
->secy
;
789 cfg
->aq_rxsc
[rxsc_idx
].sw_rxsc
= ctx
->rx_sc
;
791 if (netif_carrier_ok(nic
->ndev
) && netif_running(ctx
->secy
->netdev
))
792 ret
= aq_set_rxsc(nic
, rxsc_idx
);
797 set_bit(rxsc_idx
, &cfg
->rxsc_idx_busy
);
802 static int aq_mdo_upd_rxsc(struct macsec_context
*ctx
)
804 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
808 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(nic
->macsec_cfg
, ctx
->rx_sc
);
815 if (netif_carrier_ok(nic
->ndev
) && netif_running(ctx
->secy
->netdev
))
816 ret
= aq_set_rxsc(nic
, rxsc_idx
);
821 static int aq_clear_rxsc(struct aq_nic_s
*nic
, const int rxsc_idx
,
822 enum aq_clear_type clear_type
)
824 struct aq_macsec_rxsc
*rx_sc
= &nic
->macsec_cfg
->aq_rxsc
[rxsc_idx
];
825 struct aq_hw_s
*hw
= nic
->aq_hw
;
829 for_each_set_bit (sa_num
, &rx_sc
->rx_sa_idx_busy
, AQ_MACSEC_MAX_SA
) {
830 ret
= aq_clear_rxsa(nic
, rx_sc
, sa_num
, clear_type
);
835 if (clear_type
& AQ_CLEAR_HW
) {
836 struct aq_mss_ingress_preclass_record pre_class_record
;
837 struct aq_mss_ingress_sc_record sc_record
;
839 memset(&pre_class_record
, 0, sizeof(pre_class_record
));
840 memset(&sc_record
, 0, sizeof(sc_record
));
842 ret
= aq_mss_set_ingress_preclass_record(hw
, &pre_class_record
,
847 ret
= aq_mss_set_ingress_preclass_record(hw
, &pre_class_record
,
853 ret
= aq_mss_set_ingress_sc_record(hw
, &sc_record
,
859 if (clear_type
& AQ_CLEAR_SW
) {
860 clear_bit(rxsc_idx
, &nic
->macsec_cfg
->rxsc_idx_busy
);
861 rx_sc
->sw_secy
= NULL
;
862 rx_sc
->sw_rxsc
= NULL
;
868 static int aq_mdo_del_rxsc(struct macsec_context
*ctx
)
870 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
871 enum aq_clear_type clear_type
= AQ_CLEAR_SW
;
875 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(nic
->macsec_cfg
, ctx
->rx_sc
);
882 if (netif_carrier_ok(nic
->ndev
))
883 clear_type
= AQ_CLEAR_ALL
;
885 ret
= aq_clear_rxsc(nic
, rxsc_idx
, clear_type
);
890 static int aq_update_rxsa(struct aq_nic_s
*nic
, const unsigned int sc_idx
,
891 const struct macsec_secy
*secy
,
892 const struct macsec_rx_sa
*rx_sa
,
893 const unsigned char *key
, const unsigned char an
)
895 struct aq_mss_ingress_sakey_record sa_key_record
;
896 const u32 next_pn
= rx_sa
->next_pn_halves
.lower
;
897 struct aq_mss_ingress_sa_record sa_record
;
898 struct aq_hw_s
*hw
= nic
->aq_hw
;
899 const int sa_idx
= sc_idx
| an
;
902 memset(&sa_record
, 0, sizeof(sa_record
));
903 sa_record
.valid
= rx_sa
->active
;
905 sa_record
.next_pn
= next_pn
;
907 ret
= aq_mss_set_ingress_sa_record(hw
, &sa_record
, sa_idx
);
914 memset(&sa_key_record
, 0, sizeof(sa_key_record
));
915 memcpy(&sa_key_record
.key
, key
, secy
->key_len
);
917 switch (secy
->key_len
) {
918 case AQ_MACSEC_KEY_LEN_128_BIT
:
919 sa_key_record
.key_len
= 0;
921 case AQ_MACSEC_KEY_LEN_192_BIT
:
922 sa_key_record
.key_len
= 1;
924 case AQ_MACSEC_KEY_LEN_256_BIT
:
925 sa_key_record
.key_len
= 2;
931 aq_rotate_keys(&sa_key_record
.key
, secy
->key_len
);
933 ret
= aq_mss_set_ingress_sakey_record(hw
, &sa_key_record
, sa_idx
);
938 static int aq_mdo_add_rxsa(struct macsec_context
*ctx
)
940 const struct macsec_rx_sc
*rx_sc
= ctx
->sa
.rx_sa
->sc
;
941 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
942 const struct macsec_secy
*secy
= ctx
->secy
;
943 struct aq_macsec_rxsc
*aq_rxsc
;
947 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(nic
->macsec_cfg
, rx_sc
);
954 aq_rxsc
= &nic
->macsec_cfg
->aq_rxsc
[rxsc_idx
];
955 set_bit(ctx
->sa
.assoc_num
, &aq_rxsc
->rx_sa_idx_busy
);
957 memcpy(aq_rxsc
->rx_sa_key
[ctx
->sa
.assoc_num
], ctx
->sa
.key
,
960 if (netif_carrier_ok(nic
->ndev
) && netif_running(secy
->netdev
))
961 ret
= aq_update_rxsa(nic
, aq_rxsc
->hw_sc_idx
, secy
,
962 ctx
->sa
.rx_sa
, ctx
->sa
.key
,
968 static int aq_mdo_upd_rxsa(struct macsec_context
*ctx
)
970 const struct macsec_rx_sc
*rx_sc
= ctx
->sa
.rx_sa
->sc
;
971 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
972 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
973 const struct macsec_secy
*secy
= ctx
->secy
;
977 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(cfg
, rx_sc
);
984 if (netif_carrier_ok(nic
->ndev
) && netif_running(secy
->netdev
))
985 ret
= aq_update_rxsa(nic
, cfg
->aq_rxsc
[rxsc_idx
].hw_sc_idx
,
986 secy
, ctx
->sa
.rx_sa
, NULL
,
992 static int aq_clear_rxsa(struct aq_nic_s
*nic
, struct aq_macsec_rxsc
*aq_rxsc
,
993 const int sa_num
, enum aq_clear_type clear_type
)
995 int sa_idx
= aq_rxsc
->hw_sc_idx
| sa_num
;
996 struct aq_hw_s
*hw
= nic
->aq_hw
;
999 if (clear_type
& AQ_CLEAR_SW
)
1000 clear_bit(sa_num
, &aq_rxsc
->rx_sa_idx_busy
);
1002 if ((clear_type
& AQ_CLEAR_HW
) && netif_carrier_ok(nic
->ndev
)) {
1003 struct aq_mss_ingress_sakey_record sa_key_record
;
1004 struct aq_mss_ingress_sa_record sa_record
;
1006 memset(&sa_key_record
, 0, sizeof(sa_key_record
));
1007 memset(&sa_record
, 0, sizeof(sa_record
));
1008 sa_record
.fresh
= 1;
1009 ret
= aq_mss_set_ingress_sa_record(hw
, &sa_record
, sa_idx
);
1013 return aq_mss_set_ingress_sakey_record(hw
, &sa_key_record
,
1020 static int aq_mdo_del_rxsa(struct macsec_context
*ctx
)
1022 const struct macsec_rx_sc
*rx_sc
= ctx
->sa
.rx_sa
->sc
;
1023 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
1024 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1028 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(cfg
, rx_sc
);
1035 ret
= aq_clear_rxsa(nic
, &cfg
->aq_rxsc
[rxsc_idx
], ctx
->sa
.assoc_num
,
1041 static int aq_mdo_get_dev_stats(struct macsec_context
*ctx
)
1043 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
1044 struct aq_macsec_common_stats
*stats
= &nic
->macsec_cfg
->stats
;
1045 struct aq_hw_s
*hw
= nic
->aq_hw
;
1050 aq_get_macsec_common_stats(hw
, stats
);
1052 ctx
->stats
.dev_stats
->OutPktsUntagged
= stats
->out
.untagged_pkts
;
1053 ctx
->stats
.dev_stats
->InPktsUntagged
= stats
->in
.untagged_pkts
;
1054 ctx
->stats
.dev_stats
->OutPktsTooLong
= stats
->out
.too_long
;
1055 ctx
->stats
.dev_stats
->InPktsNoTag
= stats
->in
.notag_pkts
;
1056 ctx
->stats
.dev_stats
->InPktsBadTag
= stats
->in
.bad_tag_pkts
;
1057 ctx
->stats
.dev_stats
->InPktsUnknownSCI
= stats
->in
.unknown_sci_pkts
;
1058 ctx
->stats
.dev_stats
->InPktsNoSCI
= stats
->in
.no_sci_pkts
;
1059 ctx
->stats
.dev_stats
->InPktsOverrun
= 0;
1064 static int aq_mdo_get_tx_sc_stats(struct macsec_context
*ctx
)
1066 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
1067 struct aq_macsec_tx_sc_stats
*stats
;
1068 struct aq_hw_s
*hw
= nic
->aq_hw
;
1069 struct aq_macsec_txsc
*aq_txsc
;
1072 txsc_idx
= aq_get_txsc_idx_from_secy(nic
->macsec_cfg
, ctx
->secy
);
1079 aq_txsc
= &nic
->macsec_cfg
->aq_txsc
[txsc_idx
];
1080 stats
= &aq_txsc
->stats
;
1081 aq_get_txsc_stats(hw
, aq_txsc
->hw_sc_idx
, stats
);
1083 ctx
->stats
.tx_sc_stats
->OutPktsProtected
= stats
->sc_protected_pkts
;
1084 ctx
->stats
.tx_sc_stats
->OutPktsEncrypted
= stats
->sc_encrypted_pkts
;
1085 ctx
->stats
.tx_sc_stats
->OutOctetsProtected
= stats
->sc_protected_octets
;
1086 ctx
->stats
.tx_sc_stats
->OutOctetsEncrypted
= stats
->sc_encrypted_octets
;
1091 static int aq_mdo_get_tx_sa_stats(struct macsec_context
*ctx
)
1093 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
1094 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1095 struct aq_macsec_tx_sa_stats
*stats
;
1096 struct aq_hw_s
*hw
= nic
->aq_hw
;
1097 const struct macsec_secy
*secy
;
1098 struct aq_macsec_txsc
*aq_txsc
;
1099 struct macsec_tx_sa
*tx_sa
;
1100 unsigned int sa_idx
;
1105 txsc_idx
= aq_get_txsc_idx_from_secy(cfg
, ctx
->secy
);
1112 aq_txsc
= &cfg
->aq_txsc
[txsc_idx
];
1113 sa_idx
= aq_txsc
->hw_sc_idx
| ctx
->sa
.assoc_num
;
1114 stats
= &aq_txsc
->tx_sa_stats
[ctx
->sa
.assoc_num
];
1115 ret
= aq_get_txsa_stats(hw
, sa_idx
, stats
);
1119 ctx
->stats
.tx_sa_stats
->OutPktsProtected
= stats
->sa_protected_pkts
;
1120 ctx
->stats
.tx_sa_stats
->OutPktsEncrypted
= stats
->sa_encrypted_pkts
;
1122 secy
= aq_txsc
->sw_secy
;
1123 tx_sa
= rcu_dereference_bh(secy
->tx_sc
.sa
[ctx
->sa
.assoc_num
]);
1124 ret
= aq_get_txsa_next_pn(hw
, sa_idx
, &next_pn
);
1126 spin_lock_bh(&tx_sa
->lock
);
1127 tx_sa
->next_pn
= next_pn
;
1128 spin_unlock_bh(&tx_sa
->lock
);
1134 static int aq_mdo_get_rx_sc_stats(struct macsec_context
*ctx
)
1136 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
1137 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1138 struct aq_macsec_rx_sa_stats
*stats
;
1139 struct aq_hw_s
*hw
= nic
->aq_hw
;
1140 struct aq_macsec_rxsc
*aq_rxsc
;
1141 unsigned int sa_idx
;
1146 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(cfg
, ctx
->rx_sc
);
1153 aq_rxsc
= &cfg
->aq_rxsc
[rxsc_idx
];
1154 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1155 if (!test_bit(i
, &aq_rxsc
->rx_sa_idx_busy
))
1158 stats
= &aq_rxsc
->rx_sa_stats
[i
];
1159 sa_idx
= aq_rxsc
->hw_sc_idx
| i
;
1160 ret
= aq_get_rxsa_stats(hw
, sa_idx
, stats
);
1164 ctx
->stats
.rx_sc_stats
->InOctetsValidated
+=
1165 stats
->validated_octets
;
1166 ctx
->stats
.rx_sc_stats
->InOctetsDecrypted
+=
1167 stats
->decrypted_octets
;
1168 ctx
->stats
.rx_sc_stats
->InPktsUnchecked
+=
1169 stats
->unchecked_pkts
;
1170 ctx
->stats
.rx_sc_stats
->InPktsDelayed
+= stats
->delayed_pkts
;
1171 ctx
->stats
.rx_sc_stats
->InPktsOK
+= stats
->ok_pkts
;
1172 ctx
->stats
.rx_sc_stats
->InPktsInvalid
+= stats
->invalid_pkts
;
1173 ctx
->stats
.rx_sc_stats
->InPktsLate
+= stats
->late_pkts
;
1174 ctx
->stats
.rx_sc_stats
->InPktsNotValid
+= stats
->not_valid_pkts
;
1175 ctx
->stats
.rx_sc_stats
->InPktsNotUsingSA
+= stats
->not_using_sa
;
1176 ctx
->stats
.rx_sc_stats
->InPktsUnusedSA
+= stats
->unused_sa
;
1182 static int aq_mdo_get_rx_sa_stats(struct macsec_context
*ctx
)
1184 struct aq_nic_s
*nic
= netdev_priv(ctx
->netdev
);
1185 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1186 struct aq_macsec_rx_sa_stats
*stats
;
1187 struct aq_hw_s
*hw
= nic
->aq_hw
;
1188 struct aq_macsec_rxsc
*aq_rxsc
;
1189 struct macsec_rx_sa
*rx_sa
;
1190 unsigned int sa_idx
;
1195 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(cfg
, ctx
->rx_sc
);
1202 aq_rxsc
= &cfg
->aq_rxsc
[rxsc_idx
];
1203 stats
= &aq_rxsc
->rx_sa_stats
[ctx
->sa
.assoc_num
];
1204 sa_idx
= aq_rxsc
->hw_sc_idx
| ctx
->sa
.assoc_num
;
1205 ret
= aq_get_rxsa_stats(hw
, sa_idx
, stats
);
1209 ctx
->stats
.rx_sa_stats
->InPktsOK
= stats
->ok_pkts
;
1210 ctx
->stats
.rx_sa_stats
->InPktsInvalid
= stats
->invalid_pkts
;
1211 ctx
->stats
.rx_sa_stats
->InPktsNotValid
= stats
->not_valid_pkts
;
1212 ctx
->stats
.rx_sa_stats
->InPktsNotUsingSA
= stats
->not_using_sa
;
1213 ctx
->stats
.rx_sa_stats
->InPktsUnusedSA
= stats
->unused_sa
;
1215 rx_sa
= rcu_dereference_bh(aq_rxsc
->sw_rxsc
->sa
[ctx
->sa
.assoc_num
]);
1216 ret
= aq_get_rxsa_next_pn(hw
, sa_idx
, &next_pn
);
1218 spin_lock_bh(&rx_sa
->lock
);
1219 rx_sa
->next_pn
= next_pn
;
1220 spin_unlock_bh(&rx_sa
->lock
);
1226 static int apply_txsc_cfg(struct aq_nic_s
*nic
, const int txsc_idx
)
1228 struct aq_macsec_txsc
*aq_txsc
= &nic
->macsec_cfg
->aq_txsc
[txsc_idx
];
1229 const struct macsec_secy
*secy
= aq_txsc
->sw_secy
;
1230 struct macsec_tx_sa
*tx_sa
;
1234 if (!netif_running(secy
->netdev
))
1237 ret
= aq_set_txsc(nic
, txsc_idx
);
1241 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1242 tx_sa
= rcu_dereference_bh(secy
->tx_sc
.sa
[i
]);
1244 ret
= aq_update_txsa(nic
, aq_txsc
->hw_sc_idx
, secy
,
1245 tx_sa
, aq_txsc
->tx_sa_key
[i
], i
);
1254 static int apply_rxsc_cfg(struct aq_nic_s
*nic
, const int rxsc_idx
)
1256 struct aq_macsec_rxsc
*aq_rxsc
= &nic
->macsec_cfg
->aq_rxsc
[rxsc_idx
];
1257 const struct macsec_secy
*secy
= aq_rxsc
->sw_secy
;
1258 struct macsec_rx_sa
*rx_sa
;
1262 if (!netif_running(secy
->netdev
))
1265 ret
= aq_set_rxsc(nic
, rxsc_idx
);
1269 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1270 rx_sa
= rcu_dereference_bh(aq_rxsc
->sw_rxsc
->sa
[i
]);
1272 ret
= aq_update_rxsa(nic
, aq_rxsc
->hw_sc_idx
, secy
,
1273 rx_sa
, aq_rxsc
->rx_sa_key
[i
], i
);
1282 static int aq_clear_secy(struct aq_nic_s
*nic
, const struct macsec_secy
*secy
,
1283 enum aq_clear_type clear_type
)
1285 struct macsec_rx_sc
*rx_sc
;
1290 txsc_idx
= aq_get_txsc_idx_from_secy(nic
->macsec_cfg
, secy
);
1291 if (txsc_idx
>= 0) {
1292 ret
= aq_clear_txsc(nic
, txsc_idx
, clear_type
);
1297 for (rx_sc
= rcu_dereference_bh(secy
->rx_sc
); rx_sc
;
1298 rx_sc
= rcu_dereference_bh(rx_sc
->next
)) {
1299 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(nic
->macsec_cfg
, rx_sc
);
1303 ret
= aq_clear_rxsc(nic
, rxsc_idx
, clear_type
);
1311 static int aq_apply_secy_cfg(struct aq_nic_s
*nic
,
1312 const struct macsec_secy
*secy
)
1314 struct macsec_rx_sc
*rx_sc
;
1319 txsc_idx
= aq_get_txsc_idx_from_secy(nic
->macsec_cfg
, secy
);
1321 apply_txsc_cfg(nic
, txsc_idx
);
1323 for (rx_sc
= rcu_dereference_bh(secy
->rx_sc
); rx_sc
&& rx_sc
->active
;
1324 rx_sc
= rcu_dereference_bh(rx_sc
->next
)) {
1325 rxsc_idx
= aq_get_rxsc_idx_from_rxsc(nic
->macsec_cfg
, rx_sc
);
1326 if (unlikely(rxsc_idx
< 0))
1329 ret
= apply_rxsc_cfg(nic
, rxsc_idx
);
1337 static int aq_apply_macsec_cfg(struct aq_nic_s
*nic
)
1342 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
1343 if (nic
->macsec_cfg
->txsc_idx_busy
& BIT(i
)) {
1344 ret
= apply_txsc_cfg(nic
, i
);
1350 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
1351 if (nic
->macsec_cfg
->rxsc_idx_busy
& BIT(i
)) {
1352 ret
= apply_rxsc_cfg(nic
, i
);
1361 static int aq_sa_from_sa_idx(const enum aq_macsec_sc_sa sc_sa
, const int sa_idx
)
1364 case aq_macsec_sa_sc_4sa_8sc
:
1366 case aq_macsec_sa_sc_2sa_16sc
:
1368 case aq_macsec_sa_sc_1sa_32sc
:
1371 WARN_ONCE(true, "Invalid sc_sa");
1376 static int aq_sc_idx_from_sa_idx(const enum aq_macsec_sc_sa sc_sa
,
1380 case aq_macsec_sa_sc_4sa_8sc
:
1382 case aq_macsec_sa_sc_2sa_16sc
:
1384 case aq_macsec_sa_sc_1sa_32sc
:
1387 WARN_ONCE(true, "Invalid sc_sa");
1392 static void aq_check_txsa_expiration(struct aq_nic_s
*nic
)
1394 u32 egress_sa_expired
, egress_sa_threshold_expired
;
1395 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1396 struct aq_hw_s
*hw
= nic
->aq_hw
;
1397 struct aq_macsec_txsc
*aq_txsc
;
1398 const struct macsec_secy
*secy
;
1399 int sc_idx
= 0, txsc_idx
= 0;
1400 enum aq_macsec_sc_sa sc_sa
;
1401 struct macsec_tx_sa
*tx_sa
;
1402 unsigned char an
= 0;
1408 ret
= aq_mss_get_egress_sa_expired(hw
, &egress_sa_expired
);
1412 ret
= aq_mss_get_egress_sa_threshold_expired(hw
,
1413 &egress_sa_threshold_expired
);
1415 for (i
= 0; i
< AQ_MACSEC_MAX_SA
; i
++) {
1416 if (egress_sa_expired
& BIT(i
)) {
1417 an
= aq_sa_from_sa_idx(sc_sa
, i
);
1418 sc_idx
= aq_sc_idx_from_sa_idx(sc_sa
, i
);
1419 txsc_idx
= aq_get_txsc_idx_from_sc_idx(sc_sa
, sc_idx
);
1423 aq_txsc
= &cfg
->aq_txsc
[txsc_idx
];
1424 if (!(cfg
->txsc_idx_busy
& BIT(txsc_idx
))) {
1425 netdev_warn(nic
->ndev
,
1426 "PN threshold expired on invalid TX SC");
1430 secy
= aq_txsc
->sw_secy
;
1431 if (!netif_running(secy
->netdev
)) {
1432 netdev_warn(nic
->ndev
,
1433 "PN threshold expired on down TX SC");
1437 if (unlikely(!(aq_txsc
->tx_sa_idx_busy
& BIT(an
)))) {
1438 netdev_warn(nic
->ndev
,
1439 "PN threshold expired on invalid TX SA");
1443 tx_sa
= rcu_dereference_bh(secy
->tx_sc
.sa
[an
]);
1444 macsec_pn_wrapped((struct macsec_secy
*)secy
, tx_sa
);
1448 aq_mss_set_egress_sa_expired(hw
, egress_sa_expired
);
1450 aq_mss_set_egress_sa_threshold_expired(hw
,
1451 egress_sa_threshold_expired
);
1454 const struct macsec_ops aq_macsec_ops
= {
1455 .mdo_dev_open
= aq_mdo_dev_open
,
1456 .mdo_dev_stop
= aq_mdo_dev_stop
,
1457 .mdo_add_secy
= aq_mdo_add_secy
,
1458 .mdo_upd_secy
= aq_mdo_upd_secy
,
1459 .mdo_del_secy
= aq_mdo_del_secy
,
1460 .mdo_add_rxsc
= aq_mdo_add_rxsc
,
1461 .mdo_upd_rxsc
= aq_mdo_upd_rxsc
,
1462 .mdo_del_rxsc
= aq_mdo_del_rxsc
,
1463 .mdo_add_rxsa
= aq_mdo_add_rxsa
,
1464 .mdo_upd_rxsa
= aq_mdo_upd_rxsa
,
1465 .mdo_del_rxsa
= aq_mdo_del_rxsa
,
1466 .mdo_add_txsa
= aq_mdo_add_txsa
,
1467 .mdo_upd_txsa
= aq_mdo_upd_txsa
,
1468 .mdo_del_txsa
= aq_mdo_del_txsa
,
1469 .mdo_get_dev_stats
= aq_mdo_get_dev_stats
,
1470 .mdo_get_tx_sc_stats
= aq_mdo_get_tx_sc_stats
,
1471 .mdo_get_tx_sa_stats
= aq_mdo_get_tx_sa_stats
,
1472 .mdo_get_rx_sc_stats
= aq_mdo_get_rx_sc_stats
,
1473 .mdo_get_rx_sa_stats
= aq_mdo_get_rx_sa_stats
,
1476 int aq_macsec_init(struct aq_nic_s
*nic
)
1478 struct aq_macsec_cfg
*cfg
;
1481 if (!nic
->aq_fw_ops
->get_link_capabilities
)
1484 caps_lo
= nic
->aq_fw_ops
->get_link_capabilities(nic
->aq_hw
);
1486 if (!(caps_lo
& BIT(CAPS_LO_MACSEC
)))
1489 nic
->macsec_cfg
= kzalloc(sizeof(*cfg
), GFP_KERNEL
);
1490 if (!nic
->macsec_cfg
)
1493 nic
->ndev
->features
|= NETIF_F_HW_MACSEC
;
1494 nic
->ndev
->macsec_ops
= &aq_macsec_ops
;
1499 void aq_macsec_free(struct aq_nic_s
*nic
)
1501 kfree(nic
->macsec_cfg
);
1502 nic
->macsec_cfg
= NULL
;
1505 int aq_macsec_enable(struct aq_nic_s
*nic
)
1507 u32 ctl_ether_types
[1] = { ETH_P_PAE
};
1508 struct macsec_msg_fw_response resp
= { 0 };
1509 struct macsec_msg_fw_request msg
= { 0 };
1510 struct aq_hw_s
*hw
= nic
->aq_hw
;
1511 int num_ctl_ether_types
= 0;
1512 int index
= 0, tbl_idx
;
1515 if (!nic
->macsec_cfg
)
1520 if (nic
->aq_fw_ops
->send_macsec_req
) {
1521 struct macsec_cfg_request cfg
= { 0 };
1524 cfg
.egress_threshold
= 0xffffffff;
1525 cfg
.ingress_threshold
= 0xffffffff;
1526 cfg
.interrupts_enabled
= 1;
1528 msg
.msg_type
= macsec_cfg_msg
;
1531 ret
= nic
->aq_fw_ops
->send_macsec_req(hw
, &msg
, &resp
);
1536 /* Init Ethertype bypass filters */
1537 for (index
= 0; index
< ARRAY_SIZE(ctl_ether_types
); index
++) {
1538 struct aq_mss_ingress_prectlf_record rx_prectlf_rec
;
1539 struct aq_mss_egress_ctlf_record tx_ctlf_rec
;
1541 if (ctl_ether_types
[index
] == 0)
1544 memset(&tx_ctlf_rec
, 0, sizeof(tx_ctlf_rec
));
1545 tx_ctlf_rec
.eth_type
= ctl_ether_types
[index
];
1546 tx_ctlf_rec
.match_type
= 4; /* Match eth_type only */
1547 tx_ctlf_rec
.match_mask
= 0xf; /* match for eth_type */
1548 tx_ctlf_rec
.action
= 0; /* Bypass MACSEC modules */
1549 tbl_idx
= NUMROWS_EGRESSCTLFRECORD
- num_ctl_ether_types
- 1;
1550 aq_mss_set_egress_ctlf_record(hw
, &tx_ctlf_rec
, tbl_idx
);
1552 memset(&rx_prectlf_rec
, 0, sizeof(rx_prectlf_rec
));
1553 rx_prectlf_rec
.eth_type
= ctl_ether_types
[index
];
1554 rx_prectlf_rec
.match_type
= 4; /* Match eth_type only */
1555 rx_prectlf_rec
.match_mask
= 0xf; /* match for eth_type */
1556 rx_prectlf_rec
.action
= 0; /* Bypass MACSEC modules */
1558 NUMROWS_INGRESSPRECTLFRECORD
- num_ctl_ether_types
- 1;
1559 aq_mss_set_ingress_prectlf_record(hw
, &rx_prectlf_rec
, tbl_idx
);
1561 num_ctl_ether_types
++;
1564 ret
= aq_apply_macsec_cfg(nic
);
1571 void aq_macsec_work(struct aq_nic_s
*nic
)
1573 if (!nic
->macsec_cfg
)
1576 if (!netif_carrier_ok(nic
->ndev
))
1580 aq_check_txsa_expiration(nic
);
1584 int aq_macsec_rx_sa_cnt(struct aq_nic_s
*nic
)
1586 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1592 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
1593 if (!test_bit(i
, &cfg
->rxsc_idx_busy
))
1595 cnt
+= hweight_long(cfg
->aq_rxsc
[i
].rx_sa_idx_busy
);
1601 int aq_macsec_tx_sc_cnt(struct aq_nic_s
*nic
)
1603 if (!nic
->macsec_cfg
)
1606 return hweight_long(nic
->macsec_cfg
->txsc_idx_busy
);
1609 int aq_macsec_tx_sa_cnt(struct aq_nic_s
*nic
)
1611 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1617 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
1618 if (!test_bit(i
, &cfg
->txsc_idx_busy
))
1620 cnt
+= hweight_long(cfg
->aq_txsc
[i
].tx_sa_idx_busy
);
1626 static int aq_macsec_update_stats(struct aq_nic_s
*nic
)
1628 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1629 struct aq_hw_s
*hw
= nic
->aq_hw
;
1630 struct aq_macsec_txsc
*aq_txsc
;
1631 struct aq_macsec_rxsc
*aq_rxsc
;
1632 int i
, sa_idx
, assoc_num
;
1635 aq_get_macsec_common_stats(hw
, &cfg
->stats
);
1637 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
1638 if (!(cfg
->txsc_idx_busy
& BIT(i
)))
1640 aq_txsc
= &cfg
->aq_txsc
[i
];
1642 ret
= aq_get_txsc_stats(hw
, aq_txsc
->hw_sc_idx
,
1647 for (assoc_num
= 0; assoc_num
< MACSEC_NUM_AN
; assoc_num
++) {
1648 if (!test_bit(assoc_num
, &aq_txsc
->tx_sa_idx_busy
))
1650 sa_idx
= aq_txsc
->hw_sc_idx
| assoc_num
;
1651 ret
= aq_get_txsa_stats(hw
, sa_idx
,
1652 &aq_txsc
->tx_sa_stats
[assoc_num
]);
1658 for (i
= 0; i
< AQ_MACSEC_MAX_SC
; i
++) {
1659 if (!(test_bit(i
, &cfg
->rxsc_idx_busy
)))
1661 aq_rxsc
= &cfg
->aq_rxsc
[i
];
1663 for (assoc_num
= 0; assoc_num
< MACSEC_NUM_AN
; assoc_num
++) {
1664 if (!test_bit(assoc_num
, &aq_rxsc
->rx_sa_idx_busy
))
1666 sa_idx
= aq_rxsc
->hw_sc_idx
| assoc_num
;
1668 ret
= aq_get_rxsa_stats(hw
, sa_idx
,
1669 &aq_rxsc
->rx_sa_stats
[assoc_num
]);
1678 u64
*aq_macsec_get_stats(struct aq_nic_s
*nic
, u64
*data
)
1680 struct aq_macsec_cfg
*cfg
= nic
->macsec_cfg
;
1681 struct aq_macsec_common_stats
*common_stats
;
1682 struct aq_macsec_tx_sc_stats
*txsc_stats
;
1683 struct aq_macsec_tx_sa_stats
*txsa_stats
;
1684 struct aq_macsec_rx_sa_stats
*rxsa_stats
;
1685 struct aq_macsec_txsc
*aq_txsc
;
1686 struct aq_macsec_rxsc
*aq_rxsc
;
1687 unsigned int assoc_num
;
1688 unsigned int sc_num
;
1689 unsigned int i
= 0U;
1694 aq_macsec_update_stats(nic
);
1696 common_stats
= &cfg
->stats
;
1697 data
[i
] = common_stats
->in
.ctl_pkts
;
1698 data
[++i
] = common_stats
->in
.tagged_miss_pkts
;
1699 data
[++i
] = common_stats
->in
.untagged_miss_pkts
;
1700 data
[++i
] = common_stats
->in
.notag_pkts
;
1701 data
[++i
] = common_stats
->in
.untagged_pkts
;
1702 data
[++i
] = common_stats
->in
.bad_tag_pkts
;
1703 data
[++i
] = common_stats
->in
.no_sci_pkts
;
1704 data
[++i
] = common_stats
->in
.unknown_sci_pkts
;
1705 data
[++i
] = common_stats
->in
.ctrl_prt_pass_pkts
;
1706 data
[++i
] = common_stats
->in
.unctrl_prt_pass_pkts
;
1707 data
[++i
] = common_stats
->in
.ctrl_prt_fail_pkts
;
1708 data
[++i
] = common_stats
->in
.unctrl_prt_fail_pkts
;
1709 data
[++i
] = common_stats
->in
.too_long_pkts
;
1710 data
[++i
] = common_stats
->in
.igpoc_ctl_pkts
;
1711 data
[++i
] = common_stats
->in
.ecc_error_pkts
;
1712 data
[++i
] = common_stats
->in
.unctrl_hit_drop_redir
;
1713 data
[++i
] = common_stats
->out
.ctl_pkts
;
1714 data
[++i
] = common_stats
->out
.unknown_sa_pkts
;
1715 data
[++i
] = common_stats
->out
.untagged_pkts
;
1716 data
[++i
] = common_stats
->out
.too_long
;
1717 data
[++i
] = common_stats
->out
.ecc_error_pkts
;
1718 data
[++i
] = common_stats
->out
.unctrl_hit_drop_redir
;
1720 for (sc_num
= 0; sc_num
< AQ_MACSEC_MAX_SC
; sc_num
++) {
1721 if (!(test_bit(sc_num
, &cfg
->txsc_idx_busy
)))
1724 aq_txsc
= &cfg
->aq_txsc
[sc_num
];
1725 txsc_stats
= &aq_txsc
->stats
;
1727 data
[++i
] = txsc_stats
->sc_protected_pkts
;
1728 data
[++i
] = txsc_stats
->sc_encrypted_pkts
;
1729 data
[++i
] = txsc_stats
->sc_protected_octets
;
1730 data
[++i
] = txsc_stats
->sc_encrypted_octets
;
1732 for (assoc_num
= 0; assoc_num
< MACSEC_NUM_AN
; assoc_num
++) {
1733 if (!test_bit(assoc_num
, &aq_txsc
->tx_sa_idx_busy
))
1736 txsa_stats
= &aq_txsc
->tx_sa_stats
[assoc_num
];
1738 data
[++i
] = txsa_stats
->sa_hit_drop_redirect
;
1739 data
[++i
] = txsa_stats
->sa_protected2_pkts
;
1740 data
[++i
] = txsa_stats
->sa_protected_pkts
;
1741 data
[++i
] = txsa_stats
->sa_encrypted_pkts
;
1745 for (sc_num
= 0; sc_num
< AQ_MACSEC_MAX_SC
; sc_num
++) {
1746 if (!(test_bit(sc_num
, &cfg
->rxsc_idx_busy
)))
1749 aq_rxsc
= &cfg
->aq_rxsc
[sc_num
];
1751 for (assoc_num
= 0; assoc_num
< MACSEC_NUM_AN
; assoc_num
++) {
1752 if (!test_bit(assoc_num
, &aq_rxsc
->rx_sa_idx_busy
))
1755 rxsa_stats
= &aq_rxsc
->rx_sa_stats
[assoc_num
];
1757 data
[++i
] = rxsa_stats
->untagged_hit_pkts
;
1758 data
[++i
] = rxsa_stats
->ctrl_hit_drop_redir_pkts
;
1759 data
[++i
] = rxsa_stats
->not_using_sa
;
1760 data
[++i
] = rxsa_stats
->unused_sa
;
1761 data
[++i
] = rxsa_stats
->not_valid_pkts
;
1762 data
[++i
] = rxsa_stats
->invalid_pkts
;
1763 data
[++i
] = rxsa_stats
->ok_pkts
;
1764 data
[++i
] = rxsa_stats
->late_pkts
;
1765 data
[++i
] = rxsa_stats
->delayed_pkts
;
1766 data
[++i
] = rxsa_stats
->unchecked_pkts
;
1767 data
[++i
] = rxsa_stats
->validated_octets
;
1768 data
[++i
] = rxsa_stats
->decrypted_octets
;