1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/macsec.c - MACsec device
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
8 #include <linux/types.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/module.h>
12 #include <crypto/aead.h>
13 #include <linux/etherdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/refcount.h>
17 #include <net/genetlink.h>
19 #include <net/gro_cells.h>
20 #include <net/macsec.h>
21 #include <linux/phy.h>
23 #include <uapi/linux/if_macsec.h>
25 #define MACSEC_SCI_LEN 8
27 /* SecTAG length = macsec_eth_header without the optional SCI */
28 #define MACSEC_TAG_LEN 6
30 struct macsec_eth_header
{
34 #if defined(__LITTLE_ENDIAN_BITFIELD)
37 #elif defined(__BIG_ENDIAN_BITFIELD)
41 #error "Please fix <asm/byteorder.h>"
44 u8 secure_channel_id
[8]; /* optional */
47 #define MACSEC_TCI_VERSION 0x80
48 #define MACSEC_TCI_ES 0x40 /* end station */
49 #define MACSEC_TCI_SC 0x20 /* SCI present */
50 #define MACSEC_TCI_SCB 0x10 /* epon */
51 #define MACSEC_TCI_E 0x08 /* encryption */
52 #define MACSEC_TCI_C 0x04 /* changed text */
53 #define MACSEC_AN_MASK 0x03 /* association number */
54 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
56 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
57 #define MIN_NON_SHORT_LEN 48
59 #define GCM_AES_IV_LEN 12
60 #define DEFAULT_ICV_LEN 16
62 #define for_each_rxsc(secy, sc) \
63 for (sc = rcu_dereference_bh(secy->rx_sc); \
65 sc = rcu_dereference_bh(sc->next))
66 #define for_each_rxsc_rtnl(secy, sc) \
67 for (sc = rtnl_dereference(secy->rx_sc); \
69 sc = rtnl_dereference(sc->next))
73 u8 secure_channel_id
[8];
79 struct macsec_dev_stats
{
80 __u64 OutPktsUntagged
;
85 __u64 InPktsUnknownSCI
;
90 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
92 struct pcpu_secy_stats
{
93 struct macsec_dev_stats stats
;
94 struct u64_stats_sync syncp
;
98 * struct macsec_dev - private data
100 * @real_dev: pointer to underlying netdevice
101 * @stats: MACsec device stats
102 * @secys: linked list of SecY's on the underlying device
103 * @offload: status of offloading on the MACsec device
106 struct macsec_secy secy
;
107 struct net_device
*real_dev
;
108 struct pcpu_secy_stats __percpu
*stats
;
109 struct list_head secys
;
110 struct gro_cells gro_cells
;
111 enum macsec_offload offload
;
115 * struct macsec_rxh_data - rx_handler private argument
116 * @secys: linked list of SecY's on this underlying device
118 struct macsec_rxh_data
{
119 struct list_head secys
;
122 static struct macsec_dev
*macsec_priv(const struct net_device
*dev
)
124 return (struct macsec_dev
*)netdev_priv(dev
);
127 static struct macsec_rxh_data
*macsec_data_rcu(const struct net_device
*dev
)
129 return rcu_dereference_bh(dev
->rx_handler_data
);
132 static struct macsec_rxh_data
*macsec_data_rtnl(const struct net_device
*dev
)
134 return rtnl_dereference(dev
->rx_handler_data
);
138 struct aead_request
*req
;
140 struct macsec_tx_sa
*tx_sa
;
141 struct macsec_rx_sa
*rx_sa
;
148 static struct macsec_rx_sa
*macsec_rxsa_get(struct macsec_rx_sa __rcu
*ptr
)
150 struct macsec_rx_sa
*sa
= rcu_dereference_bh(ptr
);
152 if (!sa
|| !sa
->active
)
155 if (!refcount_inc_not_zero(&sa
->refcnt
))
161 static void free_rx_sc_rcu(struct rcu_head
*head
)
163 struct macsec_rx_sc
*rx_sc
= container_of(head
, struct macsec_rx_sc
, rcu_head
);
165 free_percpu(rx_sc
->stats
);
169 static struct macsec_rx_sc
*macsec_rxsc_get(struct macsec_rx_sc
*sc
)
171 return refcount_inc_not_zero(&sc
->refcnt
) ? sc
: NULL
;
174 static void macsec_rxsc_put(struct macsec_rx_sc
*sc
)
176 if (refcount_dec_and_test(&sc
->refcnt
))
177 call_rcu(&sc
->rcu_head
, free_rx_sc_rcu
);
180 static void free_rxsa(struct rcu_head
*head
)
182 struct macsec_rx_sa
*sa
= container_of(head
, struct macsec_rx_sa
, rcu
);
184 crypto_free_aead(sa
->key
.tfm
);
185 free_percpu(sa
->stats
);
189 static void macsec_rxsa_put(struct macsec_rx_sa
*sa
)
191 if (refcount_dec_and_test(&sa
->refcnt
))
192 call_rcu(&sa
->rcu
, free_rxsa
);
195 static struct macsec_tx_sa
*macsec_txsa_get(struct macsec_tx_sa __rcu
*ptr
)
197 struct macsec_tx_sa
*sa
= rcu_dereference_bh(ptr
);
199 if (!sa
|| !sa
->active
)
202 if (!refcount_inc_not_zero(&sa
->refcnt
))
208 static void free_txsa(struct rcu_head
*head
)
210 struct macsec_tx_sa
*sa
= container_of(head
, struct macsec_tx_sa
, rcu
);
212 crypto_free_aead(sa
->key
.tfm
);
213 free_percpu(sa
->stats
);
217 static void macsec_txsa_put(struct macsec_tx_sa
*sa
)
219 if (refcount_dec_and_test(&sa
->refcnt
))
220 call_rcu(&sa
->rcu
, free_txsa
);
223 static struct macsec_cb
*macsec_skb_cb(struct sk_buff
*skb
)
225 BUILD_BUG_ON(sizeof(struct macsec_cb
) > sizeof(skb
->cb
));
226 return (struct macsec_cb
*)skb
->cb
;
229 #define MACSEC_PORT_ES (htons(0x0001))
230 #define MACSEC_PORT_SCB (0x0000)
231 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
233 #define MACSEC_GCM_AES_128_SAK_LEN 16
234 #define MACSEC_GCM_AES_256_SAK_LEN 32
236 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
237 #define DEFAULT_SEND_SCI true
238 #define DEFAULT_ENCRYPT false
239 #define DEFAULT_ENCODING_SA 0
241 static bool send_sci(const struct macsec_secy
*secy
)
243 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
245 return tx_sc
->send_sci
||
246 (secy
->n_rx_sc
> 1 && !tx_sc
->end_station
&& !tx_sc
->scb
);
249 static sci_t
make_sci(u8
*addr
, __be16 port
)
253 memcpy(&sci
, addr
, ETH_ALEN
);
254 memcpy(((char *)&sci
) + ETH_ALEN
, &port
, sizeof(port
));
259 static sci_t
macsec_frame_sci(struct macsec_eth_header
*hdr
, bool sci_present
)
264 memcpy(&sci
, hdr
->secure_channel_id
,
265 sizeof(hdr
->secure_channel_id
));
267 sci
= make_sci(hdr
->eth
.h_source
, MACSEC_PORT_ES
);
272 static unsigned int macsec_sectag_len(bool sci_present
)
274 return MACSEC_TAG_LEN
+ (sci_present
? MACSEC_SCI_LEN
: 0);
277 static unsigned int macsec_hdr_len(bool sci_present
)
279 return macsec_sectag_len(sci_present
) + ETH_HLEN
;
282 static unsigned int macsec_extra_len(bool sci_present
)
284 return macsec_sectag_len(sci_present
) + sizeof(__be16
);
287 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
288 static void macsec_fill_sectag(struct macsec_eth_header
*h
,
289 const struct macsec_secy
*secy
, u32 pn
,
292 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
294 memset(&h
->tci_an
, 0, macsec_sectag_len(sci_present
));
295 h
->eth
.h_proto
= htons(ETH_P_MACSEC
);
298 h
->tci_an
|= MACSEC_TCI_SC
;
299 memcpy(&h
->secure_channel_id
, &secy
->sci
,
300 sizeof(h
->secure_channel_id
));
302 if (tx_sc
->end_station
)
303 h
->tci_an
|= MACSEC_TCI_ES
;
305 h
->tci_an
|= MACSEC_TCI_SCB
;
308 h
->packet_number
= htonl(pn
);
310 /* with GCM, C/E clear for !encrypt, both set for encrypt */
312 h
->tci_an
|= MACSEC_TCI_CONFID
;
313 else if (secy
->icv_len
!= DEFAULT_ICV_LEN
)
314 h
->tci_an
|= MACSEC_TCI_C
;
316 h
->tci_an
|= tx_sc
->encoding_sa
;
319 static void macsec_set_shortlen(struct macsec_eth_header
*h
, size_t data_len
)
321 if (data_len
< MIN_NON_SHORT_LEN
)
322 h
->short_length
= data_len
;
325 /* Checks if a MACsec interface is being offloaded to an hardware engine */
326 static bool macsec_is_offloaded(struct macsec_dev
*macsec
)
328 if (macsec
->offload
== MACSEC_OFFLOAD_PHY
)
334 /* Checks if underlying layers implement MACsec offloading functions. */
335 static bool macsec_check_offload(enum macsec_offload offload
,
336 struct macsec_dev
*macsec
)
338 if (!macsec
|| !macsec
->real_dev
)
341 if (offload
== MACSEC_OFFLOAD_PHY
)
342 return macsec
->real_dev
->phydev
&&
343 macsec
->real_dev
->phydev
->macsec_ops
;
348 static const struct macsec_ops
*__macsec_get_ops(enum macsec_offload offload
,
349 struct macsec_dev
*macsec
,
350 struct macsec_context
*ctx
)
353 memset(ctx
, 0, sizeof(*ctx
));
354 ctx
->offload
= offload
;
356 if (offload
== MACSEC_OFFLOAD_PHY
)
357 ctx
->phydev
= macsec
->real_dev
->phydev
;
360 return macsec
->real_dev
->phydev
->macsec_ops
;
363 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec
364 * context device reference if provided.
366 static const struct macsec_ops
*macsec_get_ops(struct macsec_dev
*macsec
,
367 struct macsec_context
*ctx
)
369 if (!macsec_check_offload(macsec
->offload
, macsec
))
372 return __macsec_get_ops(macsec
->offload
, macsec
, ctx
);
375 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
376 static bool macsec_validate_skb(struct sk_buff
*skb
, u16 icv_len
)
378 struct macsec_eth_header
*h
= (struct macsec_eth_header
*)skb
->data
;
379 int len
= skb
->len
- 2 * ETH_ALEN
;
380 int extra_len
= macsec_extra_len(!!(h
->tci_an
& MACSEC_TCI_SC
)) + icv_len
;
382 /* a) It comprises at least 17 octets */
386 /* b) MACsec EtherType: already checked */
388 /* c) V bit is clear */
389 if (h
->tci_an
& MACSEC_TCI_VERSION
)
392 /* d) ES or SCB => !SC */
393 if ((h
->tci_an
& MACSEC_TCI_ES
|| h
->tci_an
& MACSEC_TCI_SCB
) &&
394 (h
->tci_an
& MACSEC_TCI_SC
))
397 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
401 /* rx.pn != 0 (figure 10-5) */
402 if (!h
->packet_number
)
405 /* length check, f) g) h) i) */
407 return len
== extra_len
+ h
->short_length
;
408 return len
>= extra_len
+ MIN_NON_SHORT_LEN
;
411 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
412 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
414 static void macsec_fill_iv(unsigned char *iv
, sci_t sci
, u32 pn
)
416 struct gcm_iv
*gcm_iv
= (struct gcm_iv
*)iv
;
419 gcm_iv
->pn
= htonl(pn
);
422 static struct macsec_eth_header
*macsec_ethhdr(struct sk_buff
*skb
)
424 return (struct macsec_eth_header
*)skb_mac_header(skb
);
427 static void __macsec_pn_wrapped(struct macsec_secy
*secy
,
428 struct macsec_tx_sa
*tx_sa
)
430 pr_debug("PN wrapped, transitioning to !oper\n");
431 tx_sa
->active
= false;
432 if (secy
->protect_frames
)
433 secy
->operational
= false;
436 void macsec_pn_wrapped(struct macsec_secy
*secy
, struct macsec_tx_sa
*tx_sa
)
438 spin_lock_bh(&tx_sa
->lock
);
439 __macsec_pn_wrapped(secy
, tx_sa
);
440 spin_unlock_bh(&tx_sa
->lock
);
442 EXPORT_SYMBOL_GPL(macsec_pn_wrapped
);
444 static u32
tx_sa_update_pn(struct macsec_tx_sa
*tx_sa
, struct macsec_secy
*secy
)
448 spin_lock_bh(&tx_sa
->lock
);
452 if (tx_sa
->next_pn
== 0)
453 __macsec_pn_wrapped(secy
, tx_sa
);
454 spin_unlock_bh(&tx_sa
->lock
);
459 static void macsec_encrypt_finish(struct sk_buff
*skb
, struct net_device
*dev
)
461 struct macsec_dev
*macsec
= netdev_priv(dev
);
463 skb
->dev
= macsec
->real_dev
;
464 skb_reset_mac_header(skb
);
465 skb
->protocol
= eth_hdr(skb
)->h_proto
;
468 static void macsec_count_tx(struct sk_buff
*skb
, struct macsec_tx_sc
*tx_sc
,
469 struct macsec_tx_sa
*tx_sa
)
471 struct pcpu_tx_sc_stats
*txsc_stats
= this_cpu_ptr(tx_sc
->stats
);
473 u64_stats_update_begin(&txsc_stats
->syncp
);
474 if (tx_sc
->encrypt
) {
475 txsc_stats
->stats
.OutOctetsEncrypted
+= skb
->len
;
476 txsc_stats
->stats
.OutPktsEncrypted
++;
477 this_cpu_inc(tx_sa
->stats
->OutPktsEncrypted
);
479 txsc_stats
->stats
.OutOctetsProtected
+= skb
->len
;
480 txsc_stats
->stats
.OutPktsProtected
++;
481 this_cpu_inc(tx_sa
->stats
->OutPktsProtected
);
483 u64_stats_update_end(&txsc_stats
->syncp
);
486 static void count_tx(struct net_device
*dev
, int ret
, int len
)
488 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
489 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
491 u64_stats_update_begin(&stats
->syncp
);
493 stats
->tx_bytes
+= len
;
494 u64_stats_update_end(&stats
->syncp
);
498 static void macsec_encrypt_done(struct crypto_async_request
*base
, int err
)
500 struct sk_buff
*skb
= base
->data
;
501 struct net_device
*dev
= skb
->dev
;
502 struct macsec_dev
*macsec
= macsec_priv(dev
);
503 struct macsec_tx_sa
*sa
= macsec_skb_cb(skb
)->tx_sa
;
506 aead_request_free(macsec_skb_cb(skb
)->req
);
509 macsec_encrypt_finish(skb
, dev
);
510 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
512 ret
= dev_queue_xmit(skb
);
513 count_tx(dev
, ret
, len
);
514 rcu_read_unlock_bh();
520 static struct aead_request
*macsec_alloc_req(struct crypto_aead
*tfm
,
522 struct scatterlist
**sg
,
525 size_t size
, iv_offset
, sg_offset
;
526 struct aead_request
*req
;
529 size
= sizeof(struct aead_request
) + crypto_aead_reqsize(tfm
);
531 size
+= GCM_AES_IV_LEN
;
533 size
= ALIGN(size
, __alignof__(struct scatterlist
));
535 size
+= sizeof(struct scatterlist
) * num_frags
;
537 tmp
= kmalloc(size
, GFP_ATOMIC
);
541 *iv
= (unsigned char *)(tmp
+ iv_offset
);
542 *sg
= (struct scatterlist
*)(tmp
+ sg_offset
);
545 aead_request_set_tfm(req
, tfm
);
550 static struct sk_buff
*macsec_encrypt(struct sk_buff
*skb
,
551 struct net_device
*dev
)
554 struct scatterlist
*sg
;
555 struct sk_buff
*trailer
;
558 struct macsec_eth_header
*hh
;
559 size_t unprotected_len
;
560 struct aead_request
*req
;
561 struct macsec_secy
*secy
;
562 struct macsec_tx_sc
*tx_sc
;
563 struct macsec_tx_sa
*tx_sa
;
564 struct macsec_dev
*macsec
= macsec_priv(dev
);
568 secy
= &macsec
->secy
;
569 tx_sc
= &secy
->tx_sc
;
571 /* 10.5.1 TX SA assignment */
572 tx_sa
= macsec_txsa_get(tx_sc
->sa
[tx_sc
->encoding_sa
]);
574 secy
->operational
= false;
576 return ERR_PTR(-EINVAL
);
579 if (unlikely(skb_headroom(skb
) < MACSEC_NEEDED_HEADROOM
||
580 skb_tailroom(skb
) < MACSEC_NEEDED_TAILROOM
)) {
581 struct sk_buff
*nskb
= skb_copy_expand(skb
,
582 MACSEC_NEEDED_HEADROOM
,
583 MACSEC_NEEDED_TAILROOM
,
589 macsec_txsa_put(tx_sa
);
591 return ERR_PTR(-ENOMEM
);
594 skb
= skb_unshare(skb
, GFP_ATOMIC
);
596 macsec_txsa_put(tx_sa
);
597 return ERR_PTR(-ENOMEM
);
601 unprotected_len
= skb
->len
;
603 sci_present
= send_sci(secy
);
604 hh
= skb_push(skb
, macsec_extra_len(sci_present
));
605 memmove(hh
, eth
, 2 * ETH_ALEN
);
607 pn
= tx_sa_update_pn(tx_sa
, secy
);
609 macsec_txsa_put(tx_sa
);
611 return ERR_PTR(-ENOLINK
);
613 macsec_fill_sectag(hh
, secy
, pn
, sci_present
);
614 macsec_set_shortlen(hh
, unprotected_len
- 2 * ETH_ALEN
);
616 skb_put(skb
, secy
->icv_len
);
618 if (skb
->len
- ETH_HLEN
> macsec_priv(dev
)->real_dev
->mtu
) {
619 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
621 u64_stats_update_begin(&secy_stats
->syncp
);
622 secy_stats
->stats
.OutPktsTooLong
++;
623 u64_stats_update_end(&secy_stats
->syncp
);
625 macsec_txsa_put(tx_sa
);
627 return ERR_PTR(-EINVAL
);
630 ret
= skb_cow_data(skb
, 0, &trailer
);
631 if (unlikely(ret
< 0)) {
632 macsec_txsa_put(tx_sa
);
637 req
= macsec_alloc_req(tx_sa
->key
.tfm
, &iv
, &sg
, ret
);
639 macsec_txsa_put(tx_sa
);
641 return ERR_PTR(-ENOMEM
);
644 macsec_fill_iv(iv
, secy
->sci
, pn
);
646 sg_init_table(sg
, ret
);
647 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
648 if (unlikely(ret
< 0)) {
649 aead_request_free(req
);
650 macsec_txsa_put(tx_sa
);
655 if (tx_sc
->encrypt
) {
656 int len
= skb
->len
- macsec_hdr_len(sci_present
) -
658 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
659 aead_request_set_ad(req
, macsec_hdr_len(sci_present
));
661 aead_request_set_crypt(req
, sg
, sg
, 0, iv
);
662 aead_request_set_ad(req
, skb
->len
- secy
->icv_len
);
665 macsec_skb_cb(skb
)->req
= req
;
666 macsec_skb_cb(skb
)->tx_sa
= tx_sa
;
667 aead_request_set_callback(req
, 0, macsec_encrypt_done
, skb
);
670 ret
= crypto_aead_encrypt(req
);
671 if (ret
== -EINPROGRESS
) {
673 } else if (ret
!= 0) {
676 aead_request_free(req
);
677 macsec_txsa_put(tx_sa
);
678 return ERR_PTR(-EINVAL
);
682 aead_request_free(req
);
683 macsec_txsa_put(tx_sa
);
688 static bool macsec_post_decrypt(struct sk_buff
*skb
, struct macsec_secy
*secy
, u32 pn
)
690 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
691 struct pcpu_rx_sc_stats
*rxsc_stats
= this_cpu_ptr(rx_sa
->sc
->stats
);
692 struct macsec_eth_header
*hdr
= macsec_ethhdr(skb
);
695 spin_lock(&rx_sa
->lock
);
696 if (rx_sa
->next_pn
>= secy
->replay_window
)
697 lowest_pn
= rx_sa
->next_pn
- secy
->replay_window
;
699 /* Now perform replay protection check again
700 * (see IEEE 802.1AE-2006 figure 10-5)
702 if (secy
->replay_protect
&& pn
< lowest_pn
) {
703 spin_unlock(&rx_sa
->lock
);
704 u64_stats_update_begin(&rxsc_stats
->syncp
);
705 rxsc_stats
->stats
.InPktsLate
++;
706 u64_stats_update_end(&rxsc_stats
->syncp
);
710 if (secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
) {
711 u64_stats_update_begin(&rxsc_stats
->syncp
);
712 if (hdr
->tci_an
& MACSEC_TCI_E
)
713 rxsc_stats
->stats
.InOctetsDecrypted
+= skb
->len
;
715 rxsc_stats
->stats
.InOctetsValidated
+= skb
->len
;
716 u64_stats_update_end(&rxsc_stats
->syncp
);
719 if (!macsec_skb_cb(skb
)->valid
) {
720 spin_unlock(&rx_sa
->lock
);
723 if (hdr
->tci_an
& MACSEC_TCI_C
||
724 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
725 u64_stats_update_begin(&rxsc_stats
->syncp
);
726 rxsc_stats
->stats
.InPktsNotValid
++;
727 u64_stats_update_end(&rxsc_stats
->syncp
);
731 u64_stats_update_begin(&rxsc_stats
->syncp
);
732 if (secy
->validate_frames
== MACSEC_VALIDATE_CHECK
) {
733 rxsc_stats
->stats
.InPktsInvalid
++;
734 this_cpu_inc(rx_sa
->stats
->InPktsInvalid
);
735 } else if (pn
< lowest_pn
) {
736 rxsc_stats
->stats
.InPktsDelayed
++;
738 rxsc_stats
->stats
.InPktsUnchecked
++;
740 u64_stats_update_end(&rxsc_stats
->syncp
);
742 u64_stats_update_begin(&rxsc_stats
->syncp
);
743 if (pn
< lowest_pn
) {
744 rxsc_stats
->stats
.InPktsDelayed
++;
746 rxsc_stats
->stats
.InPktsOK
++;
747 this_cpu_inc(rx_sa
->stats
->InPktsOK
);
749 u64_stats_update_end(&rxsc_stats
->syncp
);
751 if (pn
>= rx_sa
->next_pn
)
752 rx_sa
->next_pn
= pn
+ 1;
753 spin_unlock(&rx_sa
->lock
);
759 static void macsec_reset_skb(struct sk_buff
*skb
, struct net_device
*dev
)
761 skb
->pkt_type
= PACKET_HOST
;
762 skb
->protocol
= eth_type_trans(skb
, dev
);
764 skb_reset_network_header(skb
);
765 if (!skb_transport_header_was_set(skb
))
766 skb_reset_transport_header(skb
);
767 skb_reset_mac_len(skb
);
770 static void macsec_finalize_skb(struct sk_buff
*skb
, u8 icv_len
, u8 hdr_len
)
772 skb
->ip_summed
= CHECKSUM_NONE
;
773 memmove(skb
->data
+ hdr_len
, skb
->data
, 2 * ETH_ALEN
);
774 skb_pull(skb
, hdr_len
);
775 pskb_trim_unique(skb
, skb
->len
- icv_len
);
778 static void count_rx(struct net_device
*dev
, int len
)
780 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
782 u64_stats_update_begin(&stats
->syncp
);
784 stats
->rx_bytes
+= len
;
785 u64_stats_update_end(&stats
->syncp
);
788 static void macsec_decrypt_done(struct crypto_async_request
*base
, int err
)
790 struct sk_buff
*skb
= base
->data
;
791 struct net_device
*dev
= skb
->dev
;
792 struct macsec_dev
*macsec
= macsec_priv(dev
);
793 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
794 struct macsec_rx_sc
*rx_sc
= rx_sa
->sc
;
798 aead_request_free(macsec_skb_cb(skb
)->req
);
801 macsec_skb_cb(skb
)->valid
= true;
804 pn
= ntohl(macsec_ethhdr(skb
)->packet_number
);
805 if (!macsec_post_decrypt(skb
, &macsec
->secy
, pn
)) {
806 rcu_read_unlock_bh();
811 macsec_finalize_skb(skb
, macsec
->secy
.icv_len
,
812 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
813 macsec_reset_skb(skb
, macsec
->secy
.netdev
);
816 if (gro_cells_receive(&macsec
->gro_cells
, skb
) == NET_RX_SUCCESS
)
819 rcu_read_unlock_bh();
822 macsec_rxsa_put(rx_sa
);
823 macsec_rxsc_put(rx_sc
);
827 static struct sk_buff
*macsec_decrypt(struct sk_buff
*skb
,
828 struct net_device
*dev
,
829 struct macsec_rx_sa
*rx_sa
,
831 struct macsec_secy
*secy
)
834 struct scatterlist
*sg
;
835 struct sk_buff
*trailer
;
837 struct aead_request
*req
;
838 struct macsec_eth_header
*hdr
;
839 u16 icv_len
= secy
->icv_len
;
841 macsec_skb_cb(skb
)->valid
= false;
842 skb
= skb_share_check(skb
, GFP_ATOMIC
);
844 return ERR_PTR(-ENOMEM
);
846 ret
= skb_cow_data(skb
, 0, &trailer
);
847 if (unlikely(ret
< 0)) {
851 req
= macsec_alloc_req(rx_sa
->key
.tfm
, &iv
, &sg
, ret
);
854 return ERR_PTR(-ENOMEM
);
857 hdr
= (struct macsec_eth_header
*)skb
->data
;
858 macsec_fill_iv(iv
, sci
, ntohl(hdr
->packet_number
));
860 sg_init_table(sg
, ret
);
861 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
862 if (unlikely(ret
< 0)) {
863 aead_request_free(req
);
868 if (hdr
->tci_an
& MACSEC_TCI_E
) {
869 /* confidentiality: ethernet + macsec header
870 * authenticated, encrypted payload
872 int len
= skb
->len
- macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
);
874 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
875 aead_request_set_ad(req
, macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
));
876 skb
= skb_unshare(skb
, GFP_ATOMIC
);
878 aead_request_free(req
);
879 return ERR_PTR(-ENOMEM
);
882 /* integrity only: all headers + data authenticated */
883 aead_request_set_crypt(req
, sg
, sg
, icv_len
, iv
);
884 aead_request_set_ad(req
, skb
->len
- icv_len
);
887 macsec_skb_cb(skb
)->req
= req
;
889 aead_request_set_callback(req
, 0, macsec_decrypt_done
, skb
);
892 ret
= crypto_aead_decrypt(req
);
893 if (ret
== -EINPROGRESS
) {
895 } else if (ret
!= 0) {
896 /* decryption/authentication failed
897 * 10.6 if validateFrames is disabled, deliver anyway
899 if (ret
!= -EBADMSG
) {
904 macsec_skb_cb(skb
)->valid
= true;
908 aead_request_free(req
);
913 static struct macsec_rx_sc
*find_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
915 struct macsec_rx_sc
*rx_sc
;
917 for_each_rxsc(secy
, rx_sc
) {
918 if (rx_sc
->sci
== sci
)
925 static struct macsec_rx_sc
*find_rx_sc_rtnl(struct macsec_secy
*secy
, sci_t sci
)
927 struct macsec_rx_sc
*rx_sc
;
929 for_each_rxsc_rtnl(secy
, rx_sc
) {
930 if (rx_sc
->sci
== sci
)
937 static enum rx_handler_result
handle_not_macsec(struct sk_buff
*skb
)
939 /* Deliver to the uncontrolled port by default */
940 enum rx_handler_result ret
= RX_HANDLER_PASS
;
941 struct macsec_rxh_data
*rxd
;
942 struct macsec_dev
*macsec
;
945 rxd
= macsec_data_rcu(skb
->dev
);
947 /* 10.6 If the management control validateFrames is not
948 * Strict, frames without a SecTAG are received, counted, and
949 * delivered to the Controlled Port
951 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
952 struct sk_buff
*nskb
;
953 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
955 if (!macsec_is_offloaded(macsec
) &&
956 macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
957 u64_stats_update_begin(&secy_stats
->syncp
);
958 secy_stats
->stats
.InPktsNoTag
++;
959 u64_stats_update_end(&secy_stats
->syncp
);
963 /* deliver on this port */
964 nskb
= skb_clone(skb
, GFP_ATOMIC
);
968 nskb
->dev
= macsec
->secy
.netdev
;
970 if (netif_rx(nskb
) == NET_RX_SUCCESS
) {
971 u64_stats_update_begin(&secy_stats
->syncp
);
972 secy_stats
->stats
.InPktsUntagged
++;
973 u64_stats_update_end(&secy_stats
->syncp
);
976 if (netif_running(macsec
->secy
.netdev
) &&
977 macsec_is_offloaded(macsec
)) {
978 ret
= RX_HANDLER_EXACT
;
988 static rx_handler_result_t
macsec_handle_frame(struct sk_buff
**pskb
)
990 struct sk_buff
*skb
= *pskb
;
991 struct net_device
*dev
= skb
->dev
;
992 struct macsec_eth_header
*hdr
;
993 struct macsec_secy
*secy
= NULL
;
994 struct macsec_rx_sc
*rx_sc
;
995 struct macsec_rx_sa
*rx_sa
;
996 struct macsec_rxh_data
*rxd
;
997 struct macsec_dev
*macsec
;
1001 struct pcpu_rx_sc_stats
*rxsc_stats
;
1002 struct pcpu_secy_stats
*secy_stats
;
1006 if (skb_headroom(skb
) < ETH_HLEN
)
1009 hdr
= macsec_ethhdr(skb
);
1010 if (hdr
->eth
.h_proto
!= htons(ETH_P_MACSEC
))
1011 return handle_not_macsec(skb
);
1013 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1016 return RX_HANDLER_CONSUMED
;
1018 pulled_sci
= pskb_may_pull(skb
, macsec_extra_len(true));
1020 if (!pskb_may_pull(skb
, macsec_extra_len(false)))
1024 hdr
= macsec_ethhdr(skb
);
1026 /* Frames with a SecTAG that has the TCI E bit set but the C
1027 * bit clear are discarded, as this reserved encoding is used
1028 * to identify frames with a SecTAG that are not to be
1029 * delivered to the Controlled Port.
1031 if ((hdr
->tci_an
& (MACSEC_TCI_C
| MACSEC_TCI_E
)) == MACSEC_TCI_E
)
1032 return RX_HANDLER_PASS
;
1034 /* now, pull the extra length */
1035 if (hdr
->tci_an
& MACSEC_TCI_SC
) {
1040 /* ethernet header is part of crypto processing */
1041 skb_push(skb
, ETH_HLEN
);
1043 macsec_skb_cb(skb
)->has_sci
= !!(hdr
->tci_an
& MACSEC_TCI_SC
);
1044 macsec_skb_cb(skb
)->assoc_num
= hdr
->tci_an
& MACSEC_AN_MASK
;
1045 sci
= macsec_frame_sci(hdr
, macsec_skb_cb(skb
)->has_sci
);
1048 rxd
= macsec_data_rcu(skb
->dev
);
1050 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1051 struct macsec_rx_sc
*sc
= find_rx_sc(&macsec
->secy
, sci
);
1053 sc
= sc
? macsec_rxsc_get(sc
) : NULL
;
1056 secy
= &macsec
->secy
;
1066 macsec
= macsec_priv(dev
);
1067 secy_stats
= this_cpu_ptr(macsec
->stats
);
1068 rxsc_stats
= this_cpu_ptr(rx_sc
->stats
);
1070 if (!macsec_validate_skb(skb
, secy
->icv_len
)) {
1071 u64_stats_update_begin(&secy_stats
->syncp
);
1072 secy_stats
->stats
.InPktsBadTag
++;
1073 u64_stats_update_end(&secy_stats
->syncp
);
1077 rx_sa
= macsec_rxsa_get(rx_sc
->sa
[macsec_skb_cb(skb
)->assoc_num
]);
1079 /* 10.6.1 if the SA is not in use */
1081 /* If validateFrames is Strict or the C bit in the
1082 * SecTAG is set, discard
1084 if (hdr
->tci_an
& MACSEC_TCI_C
||
1085 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
1086 u64_stats_update_begin(&rxsc_stats
->syncp
);
1087 rxsc_stats
->stats
.InPktsNotUsingSA
++;
1088 u64_stats_update_end(&rxsc_stats
->syncp
);
1092 /* not Strict, the frame (with the SecTAG and ICV
1093 * removed) is delivered to the Controlled Port.
1095 u64_stats_update_begin(&rxsc_stats
->syncp
);
1096 rxsc_stats
->stats
.InPktsUnusedSA
++;
1097 u64_stats_update_end(&rxsc_stats
->syncp
);
1101 /* First, PN check to avoid decrypting obviously wrong packets */
1102 pn
= ntohl(hdr
->packet_number
);
1103 if (secy
->replay_protect
) {
1106 spin_lock(&rx_sa
->lock
);
1107 late
= rx_sa
->next_pn
>= secy
->replay_window
&&
1108 pn
< (rx_sa
->next_pn
- secy
->replay_window
);
1109 spin_unlock(&rx_sa
->lock
);
1112 u64_stats_update_begin(&rxsc_stats
->syncp
);
1113 rxsc_stats
->stats
.InPktsLate
++;
1114 u64_stats_update_end(&rxsc_stats
->syncp
);
1119 macsec_skb_cb(skb
)->rx_sa
= rx_sa
;
1121 /* Disabled && !changed text => skip validation */
1122 if (hdr
->tci_an
& MACSEC_TCI_C
||
1123 secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
)
1124 skb
= macsec_decrypt(skb
, dev
, rx_sa
, sci
, secy
);
1127 /* the decrypt callback needs the reference */
1128 if (PTR_ERR(skb
) != -EINPROGRESS
) {
1129 macsec_rxsa_put(rx_sa
);
1130 macsec_rxsc_put(rx_sc
);
1134 return RX_HANDLER_CONSUMED
;
1137 if (!macsec_post_decrypt(skb
, secy
, pn
))
1141 macsec_finalize_skb(skb
, secy
->icv_len
,
1142 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1143 macsec_reset_skb(skb
, secy
->netdev
);
1146 macsec_rxsa_put(rx_sa
);
1147 macsec_rxsc_put(rx_sc
);
1150 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
1151 if (ret
== NET_RX_SUCCESS
)
1152 count_rx(dev
, skb
->len
);
1154 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1159 return RX_HANDLER_CONSUMED
;
1162 macsec_rxsa_put(rx_sa
);
1164 macsec_rxsc_put(rx_sc
);
1169 return RX_HANDLER_CONSUMED
;
1172 /* 10.6.1 if the SC is not found */
1173 cbit
= !!(hdr
->tci_an
& MACSEC_TCI_C
);
1175 macsec_finalize_skb(skb
, DEFAULT_ICV_LEN
,
1176 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1178 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1179 struct sk_buff
*nskb
;
1181 secy_stats
= this_cpu_ptr(macsec
->stats
);
1183 /* If validateFrames is Strict or the C bit in the
1184 * SecTAG is set, discard
1187 macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1188 u64_stats_update_begin(&secy_stats
->syncp
);
1189 secy_stats
->stats
.InPktsNoSCI
++;
1190 u64_stats_update_end(&secy_stats
->syncp
);
1194 /* not strict, the frame (with the SecTAG and ICV
1195 * removed) is delivered to the Controlled Port.
1197 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1201 macsec_reset_skb(nskb
, macsec
->secy
.netdev
);
1203 ret
= netif_rx(nskb
);
1204 if (ret
== NET_RX_SUCCESS
) {
1205 u64_stats_update_begin(&secy_stats
->syncp
);
1206 secy_stats
->stats
.InPktsUnknownSCI
++;
1207 u64_stats_update_end(&secy_stats
->syncp
);
1209 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1215 return RX_HANDLER_PASS
;
1218 static struct crypto_aead
*macsec_alloc_tfm(char *key
, int key_len
, int icv_len
)
1220 struct crypto_aead
*tfm
;
1223 tfm
= crypto_alloc_aead("gcm(aes)", 0, 0);
1228 ret
= crypto_aead_setkey(tfm
, key
, key_len
);
1232 ret
= crypto_aead_setauthsize(tfm
, icv_len
);
1238 crypto_free_aead(tfm
);
1239 return ERR_PTR(ret
);
1242 static int init_rx_sa(struct macsec_rx_sa
*rx_sa
, char *sak
, int key_len
,
1245 rx_sa
->stats
= alloc_percpu(struct macsec_rx_sa_stats
);
1249 rx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1250 if (IS_ERR(rx_sa
->key
.tfm
)) {
1251 free_percpu(rx_sa
->stats
);
1252 return PTR_ERR(rx_sa
->key
.tfm
);
1255 rx_sa
->active
= false;
1257 refcount_set(&rx_sa
->refcnt
, 1);
1258 spin_lock_init(&rx_sa
->lock
);
1263 static void clear_rx_sa(struct macsec_rx_sa
*rx_sa
)
1265 rx_sa
->active
= false;
1267 macsec_rxsa_put(rx_sa
);
1270 static void free_rx_sc(struct macsec_rx_sc
*rx_sc
)
1274 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1275 struct macsec_rx_sa
*sa
= rtnl_dereference(rx_sc
->sa
[i
]);
1277 RCU_INIT_POINTER(rx_sc
->sa
[i
], NULL
);
1282 macsec_rxsc_put(rx_sc
);
1285 static struct macsec_rx_sc
*del_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1287 struct macsec_rx_sc
*rx_sc
, __rcu
**rx_scp
;
1289 for (rx_scp
= &secy
->rx_sc
, rx_sc
= rtnl_dereference(*rx_scp
);
1291 rx_scp
= &rx_sc
->next
, rx_sc
= rtnl_dereference(*rx_scp
)) {
1292 if (rx_sc
->sci
== sci
) {
1295 rcu_assign_pointer(*rx_scp
, rx_sc
->next
);
1303 static struct macsec_rx_sc
*create_rx_sc(struct net_device
*dev
, sci_t sci
)
1305 struct macsec_rx_sc
*rx_sc
;
1306 struct macsec_dev
*macsec
;
1307 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
1308 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
1309 struct macsec_secy
*secy
;
1311 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
1312 if (find_rx_sc_rtnl(&macsec
->secy
, sci
))
1313 return ERR_PTR(-EEXIST
);
1316 rx_sc
= kzalloc(sizeof(*rx_sc
), GFP_KERNEL
);
1318 return ERR_PTR(-ENOMEM
);
1320 rx_sc
->stats
= netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats
);
1321 if (!rx_sc
->stats
) {
1323 return ERR_PTR(-ENOMEM
);
1327 rx_sc
->active
= true;
1328 refcount_set(&rx_sc
->refcnt
, 1);
1330 secy
= &macsec_priv(dev
)->secy
;
1331 rcu_assign_pointer(rx_sc
->next
, secy
->rx_sc
);
1332 rcu_assign_pointer(secy
->rx_sc
, rx_sc
);
1340 static int init_tx_sa(struct macsec_tx_sa
*tx_sa
, char *sak
, int key_len
,
1343 tx_sa
->stats
= alloc_percpu(struct macsec_tx_sa_stats
);
1347 tx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1348 if (IS_ERR(tx_sa
->key
.tfm
)) {
1349 free_percpu(tx_sa
->stats
);
1350 return PTR_ERR(tx_sa
->key
.tfm
);
1353 tx_sa
->active
= false;
1354 refcount_set(&tx_sa
->refcnt
, 1);
1355 spin_lock_init(&tx_sa
->lock
);
1360 static void clear_tx_sa(struct macsec_tx_sa
*tx_sa
)
1362 tx_sa
->active
= false;
1364 macsec_txsa_put(tx_sa
);
1367 static struct genl_family macsec_fam
;
1369 static struct net_device
*get_dev_from_nl(struct net
*net
,
1370 struct nlattr
**attrs
)
1372 int ifindex
= nla_get_u32(attrs
[MACSEC_ATTR_IFINDEX
]);
1373 struct net_device
*dev
;
1375 dev
= __dev_get_by_index(net
, ifindex
);
1377 return ERR_PTR(-ENODEV
);
1379 if (!netif_is_macsec(dev
))
1380 return ERR_PTR(-ENODEV
);
1385 static sci_t
nla_get_sci(const struct nlattr
*nla
)
1387 return (__force sci_t
)nla_get_u64(nla
);
1390 static int nla_put_sci(struct sk_buff
*skb
, int attrtype
, sci_t value
,
1393 return nla_put_u64_64bit(skb
, attrtype
, (__force u64
)value
, padattr
);
1396 static struct macsec_tx_sa
*get_txsa_from_nl(struct net
*net
,
1397 struct nlattr
**attrs
,
1398 struct nlattr
**tb_sa
,
1399 struct net_device
**devp
,
1400 struct macsec_secy
**secyp
,
1401 struct macsec_tx_sc
**scp
,
1404 struct net_device
*dev
;
1405 struct macsec_secy
*secy
;
1406 struct macsec_tx_sc
*tx_sc
;
1407 struct macsec_tx_sa
*tx_sa
;
1409 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1410 return ERR_PTR(-EINVAL
);
1412 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1414 dev
= get_dev_from_nl(net
, attrs
);
1416 return ERR_CAST(dev
);
1418 if (*assoc_num
>= MACSEC_NUM_AN
)
1419 return ERR_PTR(-EINVAL
);
1421 secy
= &macsec_priv(dev
)->secy
;
1422 tx_sc
= &secy
->tx_sc
;
1424 tx_sa
= rtnl_dereference(tx_sc
->sa
[*assoc_num
]);
1426 return ERR_PTR(-ENODEV
);
1434 static struct macsec_rx_sc
*get_rxsc_from_nl(struct net
*net
,
1435 struct nlattr
**attrs
,
1436 struct nlattr
**tb_rxsc
,
1437 struct net_device
**devp
,
1438 struct macsec_secy
**secyp
)
1440 struct net_device
*dev
;
1441 struct macsec_secy
*secy
;
1442 struct macsec_rx_sc
*rx_sc
;
1445 dev
= get_dev_from_nl(net
, attrs
);
1447 return ERR_CAST(dev
);
1449 secy
= &macsec_priv(dev
)->secy
;
1451 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1452 return ERR_PTR(-EINVAL
);
1454 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1455 rx_sc
= find_rx_sc_rtnl(secy
, sci
);
1457 return ERR_PTR(-ENODEV
);
1465 static struct macsec_rx_sa
*get_rxsa_from_nl(struct net
*net
,
1466 struct nlattr
**attrs
,
1467 struct nlattr
**tb_rxsc
,
1468 struct nlattr
**tb_sa
,
1469 struct net_device
**devp
,
1470 struct macsec_secy
**secyp
,
1471 struct macsec_rx_sc
**scp
,
1474 struct macsec_rx_sc
*rx_sc
;
1475 struct macsec_rx_sa
*rx_sa
;
1477 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1478 return ERR_PTR(-EINVAL
);
1480 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1481 if (*assoc_num
>= MACSEC_NUM_AN
)
1482 return ERR_PTR(-EINVAL
);
1484 rx_sc
= get_rxsc_from_nl(net
, attrs
, tb_rxsc
, devp
, secyp
);
1486 return ERR_CAST(rx_sc
);
1488 rx_sa
= rtnl_dereference(rx_sc
->sa
[*assoc_num
]);
1490 return ERR_PTR(-ENODEV
);
1496 static const struct nla_policy macsec_genl_policy
[NUM_MACSEC_ATTR
] = {
1497 [MACSEC_ATTR_IFINDEX
] = { .type
= NLA_U32
},
1498 [MACSEC_ATTR_RXSC_CONFIG
] = { .type
= NLA_NESTED
},
1499 [MACSEC_ATTR_SA_CONFIG
] = { .type
= NLA_NESTED
},
1500 [MACSEC_ATTR_OFFLOAD
] = { .type
= NLA_NESTED
},
1503 static const struct nla_policy macsec_genl_rxsc_policy
[NUM_MACSEC_RXSC_ATTR
] = {
1504 [MACSEC_RXSC_ATTR_SCI
] = { .type
= NLA_U64
},
1505 [MACSEC_RXSC_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1508 static const struct nla_policy macsec_genl_sa_policy
[NUM_MACSEC_SA_ATTR
] = {
1509 [MACSEC_SA_ATTR_AN
] = { .type
= NLA_U8
},
1510 [MACSEC_SA_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1511 [MACSEC_SA_ATTR_PN
] = { .type
= NLA_U32
},
1512 [MACSEC_SA_ATTR_KEYID
] = { .type
= NLA_BINARY
,
1513 .len
= MACSEC_KEYID_LEN
, },
1514 [MACSEC_SA_ATTR_KEY
] = { .type
= NLA_BINARY
,
1515 .len
= MACSEC_MAX_KEY_LEN
, },
1518 static const struct nla_policy macsec_genl_offload_policy
[NUM_MACSEC_OFFLOAD_ATTR
] = {
1519 [MACSEC_OFFLOAD_ATTR_TYPE
] = { .type
= NLA_U8
},
1522 /* Offloads an operation to a device driver */
1523 static int macsec_offload(int (* const func
)(struct macsec_context
*),
1524 struct macsec_context
*ctx
)
1528 if (unlikely(!func
))
1531 if (ctx
->offload
== MACSEC_OFFLOAD_PHY
)
1532 mutex_lock(&ctx
->phydev
->lock
);
1534 /* Phase I: prepare. The drive should fail here if there are going to be
1535 * issues in the commit phase.
1537 ctx
->prepare
= true;
1542 /* Phase II: commit. This step cannot fail. */
1543 ctx
->prepare
= false;
1545 /* This should never happen: commit is not allowed to fail */
1547 WARN(1, "MACsec offloading commit failed (%d)\n", ret
);
1550 if (ctx
->offload
== MACSEC_OFFLOAD_PHY
)
1551 mutex_unlock(&ctx
->phydev
->lock
);
1556 static int parse_sa_config(struct nlattr
**attrs
, struct nlattr
**tb_sa
)
1558 if (!attrs
[MACSEC_ATTR_SA_CONFIG
])
1561 if (nla_parse_nested_deprecated(tb_sa
, MACSEC_SA_ATTR_MAX
, attrs
[MACSEC_ATTR_SA_CONFIG
], macsec_genl_sa_policy
, NULL
))
1567 static int parse_rxsc_config(struct nlattr
**attrs
, struct nlattr
**tb_rxsc
)
1569 if (!attrs
[MACSEC_ATTR_RXSC_CONFIG
])
1572 if (nla_parse_nested_deprecated(tb_rxsc
, MACSEC_RXSC_ATTR_MAX
, attrs
[MACSEC_ATTR_RXSC_CONFIG
], macsec_genl_rxsc_policy
, NULL
))
1578 static bool validate_add_rxsa(struct nlattr
**attrs
)
1580 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1581 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1582 !attrs
[MACSEC_SA_ATTR_KEYID
])
1585 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1588 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1591 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1592 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1596 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1602 static int macsec_add_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1604 struct net_device
*dev
;
1605 struct nlattr
**attrs
= info
->attrs
;
1606 struct macsec_secy
*secy
;
1607 struct macsec_rx_sc
*rx_sc
;
1608 struct macsec_rx_sa
*rx_sa
;
1609 unsigned char assoc_num
;
1610 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1611 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1614 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1617 if (parse_sa_config(attrs
, tb_sa
))
1620 if (parse_rxsc_config(attrs
, tb_rxsc
))
1623 if (!validate_add_rxsa(tb_sa
))
1627 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
1628 if (IS_ERR(rx_sc
)) {
1630 return PTR_ERR(rx_sc
);
1633 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1635 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1636 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1637 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1642 rx_sa
= rtnl_dereference(rx_sc
->sa
[assoc_num
]);
1648 rx_sa
= kmalloc(sizeof(*rx_sa
), GFP_KERNEL
);
1654 err
= init_rx_sa(rx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1655 secy
->key_len
, secy
->icv_len
);
1662 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
1663 spin_lock_bh(&rx_sa
->lock
);
1664 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1665 spin_unlock_bh(&rx_sa
->lock
);
1668 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1669 rx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1673 /* If h/w offloading is available, propagate to the device */
1674 if (macsec_is_offloaded(netdev_priv(dev
))) {
1675 const struct macsec_ops
*ops
;
1676 struct macsec_context ctx
;
1678 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
1684 ctx
.sa
.assoc_num
= assoc_num
;
1685 ctx
.sa
.rx_sa
= rx_sa
;
1686 memcpy(ctx
.sa
.key
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1689 err
= macsec_offload(ops
->mdo_add_rxsa
, &ctx
);
1694 nla_memcpy(rx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1695 rcu_assign_pointer(rx_sc
->sa
[assoc_num
], rx_sa
);
1707 static bool validate_add_rxsc(struct nlattr
**attrs
)
1709 if (!attrs
[MACSEC_RXSC_ATTR_SCI
])
1712 if (attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) {
1713 if (nla_get_u8(attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) > 1)
1720 static int macsec_add_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1722 struct net_device
*dev
;
1723 sci_t sci
= MACSEC_UNDEF_SCI
;
1724 struct nlattr
**attrs
= info
->attrs
;
1725 struct macsec_rx_sc
*rx_sc
;
1726 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1730 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1733 if (parse_rxsc_config(attrs
, tb_rxsc
))
1736 if (!validate_add_rxsc(tb_rxsc
))
1740 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1743 return PTR_ERR(dev
);
1746 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1748 rx_sc
= create_rx_sc(dev
, sci
);
1749 if (IS_ERR(rx_sc
)) {
1751 return PTR_ERR(rx_sc
);
1754 was_active
= rx_sc
->active
;
1755 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
])
1756 rx_sc
->active
= !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
1758 if (macsec_is_offloaded(netdev_priv(dev
))) {
1759 const struct macsec_ops
*ops
;
1760 struct macsec_context ctx
;
1762 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
1770 ret
= macsec_offload(ops
->mdo_add_rxsc
, &ctx
);
1780 rx_sc
->active
= was_active
;
1785 static bool validate_add_txsa(struct nlattr
**attrs
)
1787 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1788 !attrs
[MACSEC_SA_ATTR_PN
] ||
1789 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1790 !attrs
[MACSEC_SA_ATTR_KEYID
])
1793 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1796 if (nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1799 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1800 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1804 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1810 static int macsec_add_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1812 struct net_device
*dev
;
1813 struct nlattr
**attrs
= info
->attrs
;
1814 struct macsec_secy
*secy
;
1815 struct macsec_tx_sc
*tx_sc
;
1816 struct macsec_tx_sa
*tx_sa
;
1817 unsigned char assoc_num
;
1818 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1819 bool was_operational
;
1822 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1825 if (parse_sa_config(attrs
, tb_sa
))
1828 if (!validate_add_txsa(tb_sa
))
1832 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1835 return PTR_ERR(dev
);
1838 secy
= &macsec_priv(dev
)->secy
;
1839 tx_sc
= &secy
->tx_sc
;
1841 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1843 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1844 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1845 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1850 tx_sa
= rtnl_dereference(tx_sc
->sa
[assoc_num
]);
1856 tx_sa
= kmalloc(sizeof(*tx_sa
), GFP_KERNEL
);
1862 err
= init_tx_sa(tx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1863 secy
->key_len
, secy
->icv_len
);
1870 spin_lock_bh(&tx_sa
->lock
);
1871 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1872 spin_unlock_bh(&tx_sa
->lock
);
1874 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1875 tx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1877 was_operational
= secy
->operational
;
1878 if (assoc_num
== tx_sc
->encoding_sa
&& tx_sa
->active
)
1879 secy
->operational
= true;
1881 /* If h/w offloading is available, propagate to the device */
1882 if (macsec_is_offloaded(netdev_priv(dev
))) {
1883 const struct macsec_ops
*ops
;
1884 struct macsec_context ctx
;
1886 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
1892 ctx
.sa
.assoc_num
= assoc_num
;
1893 ctx
.sa
.tx_sa
= tx_sa
;
1894 memcpy(ctx
.sa
.key
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1897 err
= macsec_offload(ops
->mdo_add_txsa
, &ctx
);
1902 nla_memcpy(tx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1903 rcu_assign_pointer(tx_sc
->sa
[assoc_num
], tx_sa
);
1910 secy
->operational
= was_operational
;
1916 static int macsec_del_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1918 struct nlattr
**attrs
= info
->attrs
;
1919 struct net_device
*dev
;
1920 struct macsec_secy
*secy
;
1921 struct macsec_rx_sc
*rx_sc
;
1922 struct macsec_rx_sa
*rx_sa
;
1924 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1925 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1928 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1931 if (parse_sa_config(attrs
, tb_sa
))
1934 if (parse_rxsc_config(attrs
, tb_rxsc
))
1938 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
1939 &dev
, &secy
, &rx_sc
, &assoc_num
);
1940 if (IS_ERR(rx_sa
)) {
1942 return PTR_ERR(rx_sa
);
1945 if (rx_sa
->active
) {
1950 /* If h/w offloading is available, propagate to the device */
1951 if (macsec_is_offloaded(netdev_priv(dev
))) {
1952 const struct macsec_ops
*ops
;
1953 struct macsec_context ctx
;
1955 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
1961 ctx
.sa
.assoc_num
= assoc_num
;
1962 ctx
.sa
.rx_sa
= rx_sa
;
1964 ret
= macsec_offload(ops
->mdo_del_rxsa
, &ctx
);
1969 RCU_INIT_POINTER(rx_sc
->sa
[assoc_num
], NULL
);
1981 static int macsec_del_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1983 struct nlattr
**attrs
= info
->attrs
;
1984 struct net_device
*dev
;
1985 struct macsec_secy
*secy
;
1986 struct macsec_rx_sc
*rx_sc
;
1988 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1991 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1994 if (parse_rxsc_config(attrs
, tb_rxsc
))
1997 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
2001 dev
= get_dev_from_nl(genl_info_net(info
), info
->attrs
);
2004 return PTR_ERR(dev
);
2007 secy
= &macsec_priv(dev
)->secy
;
2008 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
2010 rx_sc
= del_rx_sc(secy
, sci
);
2016 /* If h/w offloading is available, propagate to the device */
2017 if (macsec_is_offloaded(netdev_priv(dev
))) {
2018 const struct macsec_ops
*ops
;
2019 struct macsec_context ctx
;
2021 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2028 ret
= macsec_offload(ops
->mdo_del_rxsc
, &ctx
);
2043 static int macsec_del_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2045 struct nlattr
**attrs
= info
->attrs
;
2046 struct net_device
*dev
;
2047 struct macsec_secy
*secy
;
2048 struct macsec_tx_sc
*tx_sc
;
2049 struct macsec_tx_sa
*tx_sa
;
2051 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2054 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2057 if (parse_sa_config(attrs
, tb_sa
))
2061 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2062 &dev
, &secy
, &tx_sc
, &assoc_num
);
2063 if (IS_ERR(tx_sa
)) {
2065 return PTR_ERR(tx_sa
);
2068 if (tx_sa
->active
) {
2073 /* If h/w offloading is available, propagate to the device */
2074 if (macsec_is_offloaded(netdev_priv(dev
))) {
2075 const struct macsec_ops
*ops
;
2076 struct macsec_context ctx
;
2078 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2084 ctx
.sa
.assoc_num
= assoc_num
;
2085 ctx
.sa
.tx_sa
= tx_sa
;
2087 ret
= macsec_offload(ops
->mdo_del_txsa
, &ctx
);
2092 RCU_INIT_POINTER(tx_sc
->sa
[assoc_num
], NULL
);
2104 static bool validate_upd_sa(struct nlattr
**attrs
)
2106 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
2107 attrs
[MACSEC_SA_ATTR_KEY
] ||
2108 attrs
[MACSEC_SA_ATTR_KEYID
])
2111 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
2114 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
2117 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
2118 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
2125 static int macsec_upd_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2127 struct nlattr
**attrs
= info
->attrs
;
2128 struct net_device
*dev
;
2129 struct macsec_secy
*secy
;
2130 struct macsec_tx_sc
*tx_sc
;
2131 struct macsec_tx_sa
*tx_sa
;
2133 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2134 bool was_operational
, was_active
;
2138 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2141 if (parse_sa_config(attrs
, tb_sa
))
2144 if (!validate_upd_sa(tb_sa
))
2148 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2149 &dev
, &secy
, &tx_sc
, &assoc_num
);
2150 if (IS_ERR(tx_sa
)) {
2152 return PTR_ERR(tx_sa
);
2155 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2156 spin_lock_bh(&tx_sa
->lock
);
2157 prev_pn
= tx_sa
->next_pn
;
2158 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2159 spin_unlock_bh(&tx_sa
->lock
);
2162 was_active
= tx_sa
->active
;
2163 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2164 tx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2166 was_operational
= secy
->operational
;
2167 if (assoc_num
== tx_sc
->encoding_sa
)
2168 secy
->operational
= tx_sa
->active
;
2170 /* If h/w offloading is available, propagate to the device */
2171 if (macsec_is_offloaded(netdev_priv(dev
))) {
2172 const struct macsec_ops
*ops
;
2173 struct macsec_context ctx
;
2175 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2181 ctx
.sa
.assoc_num
= assoc_num
;
2182 ctx
.sa
.tx_sa
= tx_sa
;
2184 ret
= macsec_offload(ops
->mdo_upd_txsa
, &ctx
);
2194 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2195 spin_lock_bh(&tx_sa
->lock
);
2196 tx_sa
->next_pn
= prev_pn
;
2197 spin_unlock_bh(&tx_sa
->lock
);
2199 tx_sa
->active
= was_active
;
2200 secy
->operational
= was_operational
;
2205 static int macsec_upd_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
2207 struct nlattr
**attrs
= info
->attrs
;
2208 struct net_device
*dev
;
2209 struct macsec_secy
*secy
;
2210 struct macsec_rx_sc
*rx_sc
;
2211 struct macsec_rx_sa
*rx_sa
;
2213 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2214 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2219 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2222 if (parse_rxsc_config(attrs
, tb_rxsc
))
2225 if (parse_sa_config(attrs
, tb_sa
))
2228 if (!validate_upd_sa(tb_sa
))
2232 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
2233 &dev
, &secy
, &rx_sc
, &assoc_num
);
2234 if (IS_ERR(rx_sa
)) {
2236 return PTR_ERR(rx_sa
);
2239 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2240 spin_lock_bh(&rx_sa
->lock
);
2241 prev_pn
= rx_sa
->next_pn
;
2242 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2243 spin_unlock_bh(&rx_sa
->lock
);
2246 was_active
= rx_sa
->active
;
2247 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2248 rx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2250 /* If h/w offloading is available, propagate to the device */
2251 if (macsec_is_offloaded(netdev_priv(dev
))) {
2252 const struct macsec_ops
*ops
;
2253 struct macsec_context ctx
;
2255 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2261 ctx
.sa
.assoc_num
= assoc_num
;
2262 ctx
.sa
.rx_sa
= rx_sa
;
2264 ret
= macsec_offload(ops
->mdo_upd_rxsa
, &ctx
);
2273 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2274 spin_lock_bh(&rx_sa
->lock
);
2275 rx_sa
->next_pn
= prev_pn
;
2276 spin_unlock_bh(&rx_sa
->lock
);
2278 rx_sa
->active
= was_active
;
2283 static int macsec_upd_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
2285 struct nlattr
**attrs
= info
->attrs
;
2286 struct net_device
*dev
;
2287 struct macsec_secy
*secy
;
2288 struct macsec_rx_sc
*rx_sc
;
2289 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2290 unsigned int prev_n_rx_sc
;
2294 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2297 if (parse_rxsc_config(attrs
, tb_rxsc
))
2300 if (!validate_add_rxsc(tb_rxsc
))
2304 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
2305 if (IS_ERR(rx_sc
)) {
2307 return PTR_ERR(rx_sc
);
2310 was_active
= rx_sc
->active
;
2311 prev_n_rx_sc
= secy
->n_rx_sc
;
2312 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]) {
2313 bool new = !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
2315 if (rx_sc
->active
!= new)
2316 secy
->n_rx_sc
+= new ? 1 : -1;
2318 rx_sc
->active
= new;
2321 /* If h/w offloading is available, propagate to the device */
2322 if (macsec_is_offloaded(netdev_priv(dev
))) {
2323 const struct macsec_ops
*ops
;
2324 struct macsec_context ctx
;
2326 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2334 ret
= macsec_offload(ops
->mdo_upd_rxsc
, &ctx
);
2344 secy
->n_rx_sc
= prev_n_rx_sc
;
2345 rx_sc
->active
= was_active
;
2350 static bool macsec_is_configured(struct macsec_dev
*macsec
)
2352 struct macsec_secy
*secy
= &macsec
->secy
;
2353 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2356 if (secy
->n_rx_sc
> 0)
2359 for (i
= 0; i
< MACSEC_NUM_AN
; i
++)
2366 static int macsec_upd_offload(struct sk_buff
*skb
, struct genl_info
*info
)
2368 struct nlattr
*tb_offload
[MACSEC_OFFLOAD_ATTR_MAX
+ 1];
2369 enum macsec_offload offload
, prev_offload
;
2370 int (*func
)(struct macsec_context
*ctx
);
2371 struct nlattr
**attrs
= info
->attrs
;
2372 struct net_device
*dev
, *loop_dev
;
2373 const struct macsec_ops
*ops
;
2374 struct macsec_context ctx
;
2375 struct macsec_dev
*macsec
;
2376 struct net
*loop_net
;
2379 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2382 if (!attrs
[MACSEC_ATTR_OFFLOAD
])
2385 if (nla_parse_nested_deprecated(tb_offload
, MACSEC_OFFLOAD_ATTR_MAX
,
2386 attrs
[MACSEC_ATTR_OFFLOAD
],
2387 macsec_genl_offload_policy
, NULL
))
2390 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
2392 return PTR_ERR(dev
);
2393 macsec
= macsec_priv(dev
);
2395 offload
= nla_get_u8(tb_offload
[MACSEC_OFFLOAD_ATTR_TYPE
]);
2396 if (macsec
->offload
== offload
)
2399 /* Check if the offloading mode is supported by the underlying layers */
2400 if (offload
!= MACSEC_OFFLOAD_OFF
&&
2401 !macsec_check_offload(offload
, macsec
))
2404 if (offload
== MACSEC_OFFLOAD_OFF
)
2405 goto skip_limitation
;
2407 /* Check the physical interface isn't offloading another interface
2410 for_each_net(loop_net
) {
2411 for_each_netdev(loop_net
, loop_dev
) {
2412 struct macsec_dev
*priv
;
2414 if (!netif_is_macsec(loop_dev
))
2417 priv
= macsec_priv(loop_dev
);
2419 if (priv
->real_dev
== macsec
->real_dev
&&
2420 priv
->offload
!= MACSEC_OFFLOAD_OFF
)
2426 /* Check if the net device is busy. */
2427 if (netif_running(dev
))
2432 prev_offload
= macsec
->offload
;
2433 macsec
->offload
= offload
;
2435 /* Check if the device already has rules configured: we do not support
2438 if (macsec_is_configured(macsec
)) {
2443 ops
= __macsec_get_ops(offload
== MACSEC_OFFLOAD_OFF
? prev_offload
: offload
,
2450 if (prev_offload
== MACSEC_OFFLOAD_OFF
)
2451 func
= ops
->mdo_add_secy
;
2453 func
= ops
->mdo_del_secy
;
2455 ctx
.secy
= &macsec
->secy
;
2456 ret
= macsec_offload(func
, &ctx
);
2464 macsec
->offload
= prev_offload
;
2470 static int copy_tx_sa_stats(struct sk_buff
*skb
,
2471 struct macsec_tx_sa_stats __percpu
*pstats
)
2473 struct macsec_tx_sa_stats sum
= {0, };
2476 for_each_possible_cpu(cpu
) {
2477 const struct macsec_tx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2479 sum
.OutPktsProtected
+= stats
->OutPktsProtected
;
2480 sum
.OutPktsEncrypted
+= stats
->OutPktsEncrypted
;
2483 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED
, sum
.OutPktsProtected
) ||
2484 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED
, sum
.OutPktsEncrypted
))
2490 static noinline_for_stack
int
2491 copy_rx_sa_stats(struct sk_buff
*skb
,
2492 struct macsec_rx_sa_stats __percpu
*pstats
)
2494 struct macsec_rx_sa_stats sum
= {0, };
2497 for_each_possible_cpu(cpu
) {
2498 const struct macsec_rx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2500 sum
.InPktsOK
+= stats
->InPktsOK
;
2501 sum
.InPktsInvalid
+= stats
->InPktsInvalid
;
2502 sum
.InPktsNotValid
+= stats
->InPktsNotValid
;
2503 sum
.InPktsNotUsingSA
+= stats
->InPktsNotUsingSA
;
2504 sum
.InPktsUnusedSA
+= stats
->InPktsUnusedSA
;
2507 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_OK
, sum
.InPktsOK
) ||
2508 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID
, sum
.InPktsInvalid
) ||
2509 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID
, sum
.InPktsNotValid
) ||
2510 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA
, sum
.InPktsNotUsingSA
) ||
2511 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA
, sum
.InPktsUnusedSA
))
2517 static noinline_for_stack
int
2518 copy_rx_sc_stats(struct sk_buff
*skb
, struct pcpu_rx_sc_stats __percpu
*pstats
)
2520 struct macsec_rx_sc_stats sum
= {0, };
2523 for_each_possible_cpu(cpu
) {
2524 const struct pcpu_rx_sc_stats
*stats
;
2525 struct macsec_rx_sc_stats tmp
;
2528 stats
= per_cpu_ptr(pstats
, cpu
);
2530 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2531 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2532 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2534 sum
.InOctetsValidated
+= tmp
.InOctetsValidated
;
2535 sum
.InOctetsDecrypted
+= tmp
.InOctetsDecrypted
;
2536 sum
.InPktsUnchecked
+= tmp
.InPktsUnchecked
;
2537 sum
.InPktsDelayed
+= tmp
.InPktsDelayed
;
2538 sum
.InPktsOK
+= tmp
.InPktsOK
;
2539 sum
.InPktsInvalid
+= tmp
.InPktsInvalid
;
2540 sum
.InPktsLate
+= tmp
.InPktsLate
;
2541 sum
.InPktsNotValid
+= tmp
.InPktsNotValid
;
2542 sum
.InPktsNotUsingSA
+= tmp
.InPktsNotUsingSA
;
2543 sum
.InPktsUnusedSA
+= tmp
.InPktsUnusedSA
;
2546 if (nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED
,
2547 sum
.InOctetsValidated
,
2548 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2549 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED
,
2550 sum
.InOctetsDecrypted
,
2551 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2552 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED
,
2553 sum
.InPktsUnchecked
,
2554 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2555 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED
,
2557 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2558 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK
,
2560 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2561 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID
,
2563 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2564 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE
,
2566 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2567 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID
,
2569 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2570 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA
,
2571 sum
.InPktsNotUsingSA
,
2572 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2573 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA
,
2575 MACSEC_RXSC_STATS_ATTR_PAD
))
2581 static noinline_for_stack
int
2582 copy_tx_sc_stats(struct sk_buff
*skb
, struct pcpu_tx_sc_stats __percpu
*pstats
)
2584 struct macsec_tx_sc_stats sum
= {0, };
2587 for_each_possible_cpu(cpu
) {
2588 const struct pcpu_tx_sc_stats
*stats
;
2589 struct macsec_tx_sc_stats tmp
;
2592 stats
= per_cpu_ptr(pstats
, cpu
);
2594 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2595 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2596 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2598 sum
.OutPktsProtected
+= tmp
.OutPktsProtected
;
2599 sum
.OutPktsEncrypted
+= tmp
.OutPktsEncrypted
;
2600 sum
.OutOctetsProtected
+= tmp
.OutOctetsProtected
;
2601 sum
.OutOctetsEncrypted
+= tmp
.OutOctetsEncrypted
;
2604 if (nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED
,
2605 sum
.OutPktsProtected
,
2606 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2607 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED
,
2608 sum
.OutPktsEncrypted
,
2609 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2610 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED
,
2611 sum
.OutOctetsProtected
,
2612 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2613 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED
,
2614 sum
.OutOctetsEncrypted
,
2615 MACSEC_TXSC_STATS_ATTR_PAD
))
2621 static noinline_for_stack
int
2622 copy_secy_stats(struct sk_buff
*skb
, struct pcpu_secy_stats __percpu
*pstats
)
2624 struct macsec_dev_stats sum
= {0, };
2627 for_each_possible_cpu(cpu
) {
2628 const struct pcpu_secy_stats
*stats
;
2629 struct macsec_dev_stats tmp
;
2632 stats
= per_cpu_ptr(pstats
, cpu
);
2634 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2635 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2636 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2638 sum
.OutPktsUntagged
+= tmp
.OutPktsUntagged
;
2639 sum
.InPktsUntagged
+= tmp
.InPktsUntagged
;
2640 sum
.OutPktsTooLong
+= tmp
.OutPktsTooLong
;
2641 sum
.InPktsNoTag
+= tmp
.InPktsNoTag
;
2642 sum
.InPktsBadTag
+= tmp
.InPktsBadTag
;
2643 sum
.InPktsUnknownSCI
+= tmp
.InPktsUnknownSCI
;
2644 sum
.InPktsNoSCI
+= tmp
.InPktsNoSCI
;
2645 sum
.InPktsOverrun
+= tmp
.InPktsOverrun
;
2648 if (nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED
,
2649 sum
.OutPktsUntagged
,
2650 MACSEC_SECY_STATS_ATTR_PAD
) ||
2651 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED
,
2653 MACSEC_SECY_STATS_ATTR_PAD
) ||
2654 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG
,
2656 MACSEC_SECY_STATS_ATTR_PAD
) ||
2657 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG
,
2659 MACSEC_SECY_STATS_ATTR_PAD
) ||
2660 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG
,
2662 MACSEC_SECY_STATS_ATTR_PAD
) ||
2663 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI
,
2664 sum
.InPktsUnknownSCI
,
2665 MACSEC_SECY_STATS_ATTR_PAD
) ||
2666 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI
,
2668 MACSEC_SECY_STATS_ATTR_PAD
) ||
2669 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN
,
2671 MACSEC_SECY_STATS_ATTR_PAD
))
2677 static int nla_put_secy(struct macsec_secy
*secy
, struct sk_buff
*skb
)
2679 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2680 struct nlattr
*secy_nest
= nla_nest_start_noflag(skb
,
2687 switch (secy
->key_len
) {
2688 case MACSEC_GCM_AES_128_SAK_LEN
:
2689 csid
= MACSEC_DEFAULT_CIPHER_ID
;
2691 case MACSEC_GCM_AES_256_SAK_LEN
:
2692 csid
= MACSEC_CIPHER_ID_GCM_AES_256
;
2698 if (nla_put_sci(skb
, MACSEC_SECY_ATTR_SCI
, secy
->sci
,
2699 MACSEC_SECY_ATTR_PAD
) ||
2700 nla_put_u64_64bit(skb
, MACSEC_SECY_ATTR_CIPHER_SUITE
,
2701 csid
, MACSEC_SECY_ATTR_PAD
) ||
2702 nla_put_u8(skb
, MACSEC_SECY_ATTR_ICV_LEN
, secy
->icv_len
) ||
2703 nla_put_u8(skb
, MACSEC_SECY_ATTR_OPER
, secy
->operational
) ||
2704 nla_put_u8(skb
, MACSEC_SECY_ATTR_PROTECT
, secy
->protect_frames
) ||
2705 nla_put_u8(skb
, MACSEC_SECY_ATTR_REPLAY
, secy
->replay_protect
) ||
2706 nla_put_u8(skb
, MACSEC_SECY_ATTR_VALIDATE
, secy
->validate_frames
) ||
2707 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCRYPT
, tx_sc
->encrypt
) ||
2708 nla_put_u8(skb
, MACSEC_SECY_ATTR_INC_SCI
, tx_sc
->send_sci
) ||
2709 nla_put_u8(skb
, MACSEC_SECY_ATTR_ES
, tx_sc
->end_station
) ||
2710 nla_put_u8(skb
, MACSEC_SECY_ATTR_SCB
, tx_sc
->scb
) ||
2711 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCODING_SA
, tx_sc
->encoding_sa
))
2714 if (secy
->replay_protect
) {
2715 if (nla_put_u32(skb
, MACSEC_SECY_ATTR_WINDOW
, secy
->replay_window
))
2719 nla_nest_end(skb
, secy_nest
);
2723 nla_nest_cancel(skb
, secy_nest
);
2727 static noinline_for_stack
int
2728 dump_secy(struct macsec_secy
*secy
, struct net_device
*dev
,
2729 struct sk_buff
*skb
, struct netlink_callback
*cb
)
2731 struct macsec_dev
*macsec
= netdev_priv(dev
);
2732 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2733 struct nlattr
*txsa_list
, *rxsc_list
;
2734 struct macsec_rx_sc
*rx_sc
;
2735 struct nlattr
*attr
;
2739 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
2740 &macsec_fam
, NLM_F_MULTI
, MACSEC_CMD_GET_TXSC
);
2744 genl_dump_check_consistent(cb
, hdr
);
2746 if (nla_put_u32(skb
, MACSEC_ATTR_IFINDEX
, dev
->ifindex
))
2747 goto nla_put_failure
;
2749 attr
= nla_nest_start_noflag(skb
, MACSEC_ATTR_OFFLOAD
);
2751 goto nla_put_failure
;
2752 if (nla_put_u8(skb
, MACSEC_OFFLOAD_ATTR_TYPE
, macsec
->offload
))
2753 goto nla_put_failure
;
2754 nla_nest_end(skb
, attr
);
2756 if (nla_put_secy(secy
, skb
))
2757 goto nla_put_failure
;
2759 attr
= nla_nest_start_noflag(skb
, MACSEC_ATTR_TXSC_STATS
);
2761 goto nla_put_failure
;
2762 if (copy_tx_sc_stats(skb
, tx_sc
->stats
)) {
2763 nla_nest_cancel(skb
, attr
);
2764 goto nla_put_failure
;
2766 nla_nest_end(skb
, attr
);
2768 attr
= nla_nest_start_noflag(skb
, MACSEC_ATTR_SECY_STATS
);
2770 goto nla_put_failure
;
2771 if (copy_secy_stats(skb
, macsec_priv(dev
)->stats
)) {
2772 nla_nest_cancel(skb
, attr
);
2773 goto nla_put_failure
;
2775 nla_nest_end(skb
, attr
);
2777 txsa_list
= nla_nest_start_noflag(skb
, MACSEC_ATTR_TXSA_LIST
);
2779 goto nla_put_failure
;
2780 for (i
= 0, j
= 1; i
< MACSEC_NUM_AN
; i
++) {
2781 struct macsec_tx_sa
*tx_sa
= rtnl_dereference(tx_sc
->sa
[i
]);
2782 struct nlattr
*txsa_nest
;
2787 txsa_nest
= nla_nest_start_noflag(skb
, j
++);
2789 nla_nest_cancel(skb
, txsa_list
);
2790 goto nla_put_failure
;
2793 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2794 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, tx_sa
->next_pn
) ||
2795 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, tx_sa
->key
.id
) ||
2796 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, tx_sa
->active
)) {
2797 nla_nest_cancel(skb
, txsa_nest
);
2798 nla_nest_cancel(skb
, txsa_list
);
2799 goto nla_put_failure
;
2802 attr
= nla_nest_start_noflag(skb
, MACSEC_SA_ATTR_STATS
);
2804 nla_nest_cancel(skb
, txsa_nest
);
2805 nla_nest_cancel(skb
, txsa_list
);
2806 goto nla_put_failure
;
2808 if (copy_tx_sa_stats(skb
, tx_sa
->stats
)) {
2809 nla_nest_cancel(skb
, attr
);
2810 nla_nest_cancel(skb
, txsa_nest
);
2811 nla_nest_cancel(skb
, txsa_list
);
2812 goto nla_put_failure
;
2814 nla_nest_end(skb
, attr
);
2816 nla_nest_end(skb
, txsa_nest
);
2818 nla_nest_end(skb
, txsa_list
);
2820 rxsc_list
= nla_nest_start_noflag(skb
, MACSEC_ATTR_RXSC_LIST
);
2822 goto nla_put_failure
;
2825 for_each_rxsc_rtnl(secy
, rx_sc
) {
2827 struct nlattr
*rxsa_list
;
2828 struct nlattr
*rxsc_nest
= nla_nest_start_noflag(skb
, j
++);
2831 nla_nest_cancel(skb
, rxsc_list
);
2832 goto nla_put_failure
;
2835 if (nla_put_u8(skb
, MACSEC_RXSC_ATTR_ACTIVE
, rx_sc
->active
) ||
2836 nla_put_sci(skb
, MACSEC_RXSC_ATTR_SCI
, rx_sc
->sci
,
2837 MACSEC_RXSC_ATTR_PAD
)) {
2838 nla_nest_cancel(skb
, rxsc_nest
);
2839 nla_nest_cancel(skb
, rxsc_list
);
2840 goto nla_put_failure
;
2843 attr
= nla_nest_start_noflag(skb
, MACSEC_RXSC_ATTR_STATS
);
2845 nla_nest_cancel(skb
, rxsc_nest
);
2846 nla_nest_cancel(skb
, rxsc_list
);
2847 goto nla_put_failure
;
2849 if (copy_rx_sc_stats(skb
, rx_sc
->stats
)) {
2850 nla_nest_cancel(skb
, attr
);
2851 nla_nest_cancel(skb
, rxsc_nest
);
2852 nla_nest_cancel(skb
, rxsc_list
);
2853 goto nla_put_failure
;
2855 nla_nest_end(skb
, attr
);
2857 rxsa_list
= nla_nest_start_noflag(skb
,
2858 MACSEC_RXSC_ATTR_SA_LIST
);
2860 nla_nest_cancel(skb
, rxsc_nest
);
2861 nla_nest_cancel(skb
, rxsc_list
);
2862 goto nla_put_failure
;
2865 for (i
= 0, k
= 1; i
< MACSEC_NUM_AN
; i
++) {
2866 struct macsec_rx_sa
*rx_sa
= rtnl_dereference(rx_sc
->sa
[i
]);
2867 struct nlattr
*rxsa_nest
;
2872 rxsa_nest
= nla_nest_start_noflag(skb
, k
++);
2874 nla_nest_cancel(skb
, rxsa_list
);
2875 nla_nest_cancel(skb
, rxsc_nest
);
2876 nla_nest_cancel(skb
, rxsc_list
);
2877 goto nla_put_failure
;
2880 attr
= nla_nest_start_noflag(skb
,
2881 MACSEC_SA_ATTR_STATS
);
2883 nla_nest_cancel(skb
, rxsa_list
);
2884 nla_nest_cancel(skb
, rxsc_nest
);
2885 nla_nest_cancel(skb
, rxsc_list
);
2886 goto nla_put_failure
;
2888 if (copy_rx_sa_stats(skb
, rx_sa
->stats
)) {
2889 nla_nest_cancel(skb
, attr
);
2890 nla_nest_cancel(skb
, rxsa_list
);
2891 nla_nest_cancel(skb
, rxsc_nest
);
2892 nla_nest_cancel(skb
, rxsc_list
);
2893 goto nla_put_failure
;
2895 nla_nest_end(skb
, attr
);
2897 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2898 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, rx_sa
->next_pn
) ||
2899 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, rx_sa
->key
.id
) ||
2900 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, rx_sa
->active
)) {
2901 nla_nest_cancel(skb
, rxsa_nest
);
2902 nla_nest_cancel(skb
, rxsc_nest
);
2903 nla_nest_cancel(skb
, rxsc_list
);
2904 goto nla_put_failure
;
2906 nla_nest_end(skb
, rxsa_nest
);
2909 nla_nest_end(skb
, rxsa_list
);
2910 nla_nest_end(skb
, rxsc_nest
);
2913 nla_nest_end(skb
, rxsc_list
);
2915 genlmsg_end(skb
, hdr
);
2920 genlmsg_cancel(skb
, hdr
);
2924 static int macsec_generation
= 1; /* protected by RTNL */
2926 static int macsec_dump_txsc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2928 struct net
*net
= sock_net(skb
->sk
);
2929 struct net_device
*dev
;
2932 dev_idx
= cb
->args
[0];
2937 cb
->seq
= macsec_generation
;
2939 for_each_netdev(net
, dev
) {
2940 struct macsec_secy
*secy
;
2945 if (!netif_is_macsec(dev
))
2948 secy
= &macsec_priv(dev
)->secy
;
2949 if (dump_secy(secy
, dev
, skb
, cb
) < 0)
2961 static const struct genl_ops macsec_genl_ops
[] = {
2963 .cmd
= MACSEC_CMD_GET_TXSC
,
2964 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2965 .dumpit
= macsec_dump_txsc
,
2968 .cmd
= MACSEC_CMD_ADD_RXSC
,
2969 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2970 .doit
= macsec_add_rxsc
,
2971 .flags
= GENL_ADMIN_PERM
,
2974 .cmd
= MACSEC_CMD_DEL_RXSC
,
2975 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2976 .doit
= macsec_del_rxsc
,
2977 .flags
= GENL_ADMIN_PERM
,
2980 .cmd
= MACSEC_CMD_UPD_RXSC
,
2981 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2982 .doit
= macsec_upd_rxsc
,
2983 .flags
= GENL_ADMIN_PERM
,
2986 .cmd
= MACSEC_CMD_ADD_TXSA
,
2987 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2988 .doit
= macsec_add_txsa
,
2989 .flags
= GENL_ADMIN_PERM
,
2992 .cmd
= MACSEC_CMD_DEL_TXSA
,
2993 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2994 .doit
= macsec_del_txsa
,
2995 .flags
= GENL_ADMIN_PERM
,
2998 .cmd
= MACSEC_CMD_UPD_TXSA
,
2999 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3000 .doit
= macsec_upd_txsa
,
3001 .flags
= GENL_ADMIN_PERM
,
3004 .cmd
= MACSEC_CMD_ADD_RXSA
,
3005 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3006 .doit
= macsec_add_rxsa
,
3007 .flags
= GENL_ADMIN_PERM
,
3010 .cmd
= MACSEC_CMD_DEL_RXSA
,
3011 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3012 .doit
= macsec_del_rxsa
,
3013 .flags
= GENL_ADMIN_PERM
,
3016 .cmd
= MACSEC_CMD_UPD_RXSA
,
3017 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3018 .doit
= macsec_upd_rxsa
,
3019 .flags
= GENL_ADMIN_PERM
,
3022 .cmd
= MACSEC_CMD_UPD_OFFLOAD
,
3023 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3024 .doit
= macsec_upd_offload
,
3025 .flags
= GENL_ADMIN_PERM
,
3029 static struct genl_family macsec_fam __ro_after_init
= {
3030 .name
= MACSEC_GENL_NAME
,
3032 .version
= MACSEC_GENL_VERSION
,
3033 .maxattr
= MACSEC_ATTR_MAX
,
3034 .policy
= macsec_genl_policy
,
3036 .module
= THIS_MODULE
,
3037 .ops
= macsec_genl_ops
,
3038 .n_ops
= ARRAY_SIZE(macsec_genl_ops
),
3041 static netdev_tx_t
macsec_start_xmit(struct sk_buff
*skb
,
3042 struct net_device
*dev
)
3044 struct macsec_dev
*macsec
= netdev_priv(dev
);
3045 struct macsec_secy
*secy
= &macsec
->secy
;
3046 struct pcpu_secy_stats
*secy_stats
;
3049 if (macsec_is_offloaded(netdev_priv(dev
))) {
3050 skb
->dev
= macsec
->real_dev
;
3051 return dev_queue_xmit(skb
);
3055 if (!secy
->protect_frames
) {
3056 secy_stats
= this_cpu_ptr(macsec
->stats
);
3057 u64_stats_update_begin(&secy_stats
->syncp
);
3058 secy_stats
->stats
.OutPktsUntagged
++;
3059 u64_stats_update_end(&secy_stats
->syncp
);
3060 skb
->dev
= macsec
->real_dev
;
3062 ret
= dev_queue_xmit(skb
);
3063 count_tx(dev
, ret
, len
);
3067 if (!secy
->operational
) {
3069 dev
->stats
.tx_dropped
++;
3070 return NETDEV_TX_OK
;
3073 skb
= macsec_encrypt(skb
, dev
);
3075 if (PTR_ERR(skb
) != -EINPROGRESS
)
3076 dev
->stats
.tx_dropped
++;
3077 return NETDEV_TX_OK
;
3080 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
3082 macsec_encrypt_finish(skb
, dev
);
3084 ret
= dev_queue_xmit(skb
);
3085 count_tx(dev
, ret
, len
);
3089 #define MACSEC_FEATURES \
3090 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3092 static int macsec_dev_init(struct net_device
*dev
)
3094 struct macsec_dev
*macsec
= macsec_priv(dev
);
3095 struct net_device
*real_dev
= macsec
->real_dev
;
3098 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
3102 err
= gro_cells_init(&macsec
->gro_cells
, dev
);
3104 free_percpu(dev
->tstats
);
3108 dev
->features
= real_dev
->features
& MACSEC_FEATURES
;
3109 dev
->features
|= NETIF_F_LLTX
| NETIF_F_GSO_SOFTWARE
;
3111 dev
->needed_headroom
= real_dev
->needed_headroom
+
3112 MACSEC_NEEDED_HEADROOM
;
3113 dev
->needed_tailroom
= real_dev
->needed_tailroom
+
3114 MACSEC_NEEDED_TAILROOM
;
3116 if (is_zero_ether_addr(dev
->dev_addr
))
3117 eth_hw_addr_inherit(dev
, real_dev
);
3118 if (is_zero_ether_addr(dev
->broadcast
))
3119 memcpy(dev
->broadcast
, real_dev
->broadcast
, dev
->addr_len
);
3124 static void macsec_dev_uninit(struct net_device
*dev
)
3126 struct macsec_dev
*macsec
= macsec_priv(dev
);
3128 gro_cells_destroy(&macsec
->gro_cells
);
3129 free_percpu(dev
->tstats
);
3132 static netdev_features_t
macsec_fix_features(struct net_device
*dev
,
3133 netdev_features_t features
)
3135 struct macsec_dev
*macsec
= macsec_priv(dev
);
3136 struct net_device
*real_dev
= macsec
->real_dev
;
3138 features
&= (real_dev
->features
& MACSEC_FEATURES
) |
3139 NETIF_F_GSO_SOFTWARE
| NETIF_F_SOFT_FEATURES
;
3140 features
|= NETIF_F_LLTX
;
3145 static int macsec_dev_open(struct net_device
*dev
)
3147 struct macsec_dev
*macsec
= macsec_priv(dev
);
3148 struct net_device
*real_dev
= macsec
->real_dev
;
3151 err
= dev_uc_add(real_dev
, dev
->dev_addr
);
3155 if (dev
->flags
& IFF_ALLMULTI
) {
3156 err
= dev_set_allmulti(real_dev
, 1);
3161 if (dev
->flags
& IFF_PROMISC
) {
3162 err
= dev_set_promiscuity(real_dev
, 1);
3164 goto clear_allmulti
;
3167 /* If h/w offloading is available, propagate to the device */
3168 if (macsec_is_offloaded(macsec
)) {
3169 const struct macsec_ops
*ops
;
3170 struct macsec_context ctx
;
3172 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
3175 goto clear_allmulti
;
3178 err
= macsec_offload(ops
->mdo_dev_open
, &ctx
);
3180 goto clear_allmulti
;
3183 if (netif_carrier_ok(real_dev
))
3184 netif_carrier_on(dev
);
3188 if (dev
->flags
& IFF_ALLMULTI
)
3189 dev_set_allmulti(real_dev
, -1);
3191 dev_uc_del(real_dev
, dev
->dev_addr
);
3192 netif_carrier_off(dev
);
3196 static int macsec_dev_stop(struct net_device
*dev
)
3198 struct macsec_dev
*macsec
= macsec_priv(dev
);
3199 struct net_device
*real_dev
= macsec
->real_dev
;
3201 netif_carrier_off(dev
);
3203 /* If h/w offloading is available, propagate to the device */
3204 if (macsec_is_offloaded(macsec
)) {
3205 const struct macsec_ops
*ops
;
3206 struct macsec_context ctx
;
3208 ops
= macsec_get_ops(macsec
, &ctx
);
3210 macsec_offload(ops
->mdo_dev_stop
, &ctx
);
3213 dev_mc_unsync(real_dev
, dev
);
3214 dev_uc_unsync(real_dev
, dev
);
3216 if (dev
->flags
& IFF_ALLMULTI
)
3217 dev_set_allmulti(real_dev
, -1);
3219 if (dev
->flags
& IFF_PROMISC
)
3220 dev_set_promiscuity(real_dev
, -1);
3222 dev_uc_del(real_dev
, dev
->dev_addr
);
3227 static void macsec_dev_change_rx_flags(struct net_device
*dev
, int change
)
3229 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
3231 if (!(dev
->flags
& IFF_UP
))
3234 if (change
& IFF_ALLMULTI
)
3235 dev_set_allmulti(real_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
3237 if (change
& IFF_PROMISC
)
3238 dev_set_promiscuity(real_dev
,
3239 dev
->flags
& IFF_PROMISC
? 1 : -1);
3242 static void macsec_dev_set_rx_mode(struct net_device
*dev
)
3244 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
3246 dev_mc_sync(real_dev
, dev
);
3247 dev_uc_sync(real_dev
, dev
);
3250 static int macsec_set_mac_address(struct net_device
*dev
, void *p
)
3252 struct macsec_dev
*macsec
= macsec_priv(dev
);
3253 struct net_device
*real_dev
= macsec
->real_dev
;
3254 struct sockaddr
*addr
= p
;
3257 if (!is_valid_ether_addr(addr
->sa_data
))
3258 return -EADDRNOTAVAIL
;
3260 if (!(dev
->flags
& IFF_UP
))
3263 err
= dev_uc_add(real_dev
, addr
->sa_data
);
3267 dev_uc_del(real_dev
, dev
->dev_addr
);
3270 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
3274 static int macsec_change_mtu(struct net_device
*dev
, int new_mtu
)
3276 struct macsec_dev
*macsec
= macsec_priv(dev
);
3277 unsigned int extra
= macsec
->secy
.icv_len
+ macsec_extra_len(true);
3279 if (macsec
->real_dev
->mtu
- extra
< new_mtu
)
3287 static void macsec_get_stats64(struct net_device
*dev
,
3288 struct rtnl_link_stats64
*s
)
3295 for_each_possible_cpu(cpu
) {
3296 struct pcpu_sw_netstats
*stats
;
3297 struct pcpu_sw_netstats tmp
;
3300 stats
= per_cpu_ptr(dev
->tstats
, cpu
);
3302 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
3303 tmp
.rx_packets
= stats
->rx_packets
;
3304 tmp
.rx_bytes
= stats
->rx_bytes
;
3305 tmp
.tx_packets
= stats
->tx_packets
;
3306 tmp
.tx_bytes
= stats
->tx_bytes
;
3307 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
3309 s
->rx_packets
+= tmp
.rx_packets
;
3310 s
->rx_bytes
+= tmp
.rx_bytes
;
3311 s
->tx_packets
+= tmp
.tx_packets
;
3312 s
->tx_bytes
+= tmp
.tx_bytes
;
3315 s
->rx_dropped
= dev
->stats
.rx_dropped
;
3316 s
->tx_dropped
= dev
->stats
.tx_dropped
;
3319 static int macsec_get_iflink(const struct net_device
*dev
)
3321 return macsec_priv(dev
)->real_dev
->ifindex
;
3324 static const struct net_device_ops macsec_netdev_ops
= {
3325 .ndo_init
= macsec_dev_init
,
3326 .ndo_uninit
= macsec_dev_uninit
,
3327 .ndo_open
= macsec_dev_open
,
3328 .ndo_stop
= macsec_dev_stop
,
3329 .ndo_fix_features
= macsec_fix_features
,
3330 .ndo_change_mtu
= macsec_change_mtu
,
3331 .ndo_set_rx_mode
= macsec_dev_set_rx_mode
,
3332 .ndo_change_rx_flags
= macsec_dev_change_rx_flags
,
3333 .ndo_set_mac_address
= macsec_set_mac_address
,
3334 .ndo_start_xmit
= macsec_start_xmit
,
3335 .ndo_get_stats64
= macsec_get_stats64
,
3336 .ndo_get_iflink
= macsec_get_iflink
,
3339 static const struct device_type macsec_type
= {
3343 static const struct nla_policy macsec_rtnl_policy
[IFLA_MACSEC_MAX
+ 1] = {
3344 [IFLA_MACSEC_SCI
] = { .type
= NLA_U64
},
3345 [IFLA_MACSEC_ICV_LEN
] = { .type
= NLA_U8
},
3346 [IFLA_MACSEC_CIPHER_SUITE
] = { .type
= NLA_U64
},
3347 [IFLA_MACSEC_WINDOW
] = { .type
= NLA_U32
},
3348 [IFLA_MACSEC_ENCODING_SA
] = { .type
= NLA_U8
},
3349 [IFLA_MACSEC_ENCRYPT
] = { .type
= NLA_U8
},
3350 [IFLA_MACSEC_PROTECT
] = { .type
= NLA_U8
},
3351 [IFLA_MACSEC_INC_SCI
] = { .type
= NLA_U8
},
3352 [IFLA_MACSEC_ES
] = { .type
= NLA_U8
},
3353 [IFLA_MACSEC_SCB
] = { .type
= NLA_U8
},
3354 [IFLA_MACSEC_REPLAY_PROTECT
] = { .type
= NLA_U8
},
3355 [IFLA_MACSEC_VALIDATION
] = { .type
= NLA_U8
},
3358 static void macsec_free_netdev(struct net_device
*dev
)
3360 struct macsec_dev
*macsec
= macsec_priv(dev
);
3362 free_percpu(macsec
->stats
);
3363 free_percpu(macsec
->secy
.tx_sc
.stats
);
3367 static void macsec_setup(struct net_device
*dev
)
3371 dev
->max_mtu
= ETH_MAX_MTU
;
3372 dev
->priv_flags
|= IFF_NO_QUEUE
;
3373 dev
->netdev_ops
= &macsec_netdev_ops
;
3374 dev
->needs_free_netdev
= true;
3375 dev
->priv_destructor
= macsec_free_netdev
;
3376 SET_NETDEV_DEVTYPE(dev
, &macsec_type
);
3378 eth_zero_addr(dev
->broadcast
);
3381 static int macsec_changelink_common(struct net_device
*dev
,
3382 struct nlattr
*data
[])
3384 struct macsec_secy
*secy
;
3385 struct macsec_tx_sc
*tx_sc
;
3387 secy
= &macsec_priv(dev
)->secy
;
3388 tx_sc
= &secy
->tx_sc
;
3390 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3391 struct macsec_tx_sa
*tx_sa
;
3393 tx_sc
->encoding_sa
= nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]);
3394 tx_sa
= rtnl_dereference(tx_sc
->sa
[tx_sc
->encoding_sa
]);
3396 secy
->operational
= tx_sa
&& tx_sa
->active
;
3399 if (data
[IFLA_MACSEC_WINDOW
])
3400 secy
->replay_window
= nla_get_u32(data
[IFLA_MACSEC_WINDOW
]);
3402 if (data
[IFLA_MACSEC_ENCRYPT
])
3403 tx_sc
->encrypt
= !!nla_get_u8(data
[IFLA_MACSEC_ENCRYPT
]);
3405 if (data
[IFLA_MACSEC_PROTECT
])
3406 secy
->protect_frames
= !!nla_get_u8(data
[IFLA_MACSEC_PROTECT
]);
3408 if (data
[IFLA_MACSEC_INC_SCI
])
3409 tx_sc
->send_sci
= !!nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]);
3411 if (data
[IFLA_MACSEC_ES
])
3412 tx_sc
->end_station
= !!nla_get_u8(data
[IFLA_MACSEC_ES
]);
3414 if (data
[IFLA_MACSEC_SCB
])
3415 tx_sc
->scb
= !!nla_get_u8(data
[IFLA_MACSEC_SCB
]);
3417 if (data
[IFLA_MACSEC_REPLAY_PROTECT
])
3418 secy
->replay_protect
= !!nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
]);
3420 if (data
[IFLA_MACSEC_VALIDATION
])
3421 secy
->validate_frames
= nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]);
3423 if (data
[IFLA_MACSEC_CIPHER_SUITE
]) {
3424 switch (nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
])) {
3425 case MACSEC_CIPHER_ID_GCM_AES_128
:
3426 case MACSEC_DEFAULT_CIPHER_ID
:
3427 secy
->key_len
= MACSEC_GCM_AES_128_SAK_LEN
;
3429 case MACSEC_CIPHER_ID_GCM_AES_256
:
3430 secy
->key_len
= MACSEC_GCM_AES_256_SAK_LEN
;
3440 static int macsec_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
3441 struct nlattr
*data
[],
3442 struct netlink_ext_ack
*extack
)
3444 struct macsec_dev
*macsec
= macsec_priv(dev
);
3445 struct macsec_tx_sa tx_sc
;
3446 struct macsec_secy secy
;
3452 if (data
[IFLA_MACSEC_CIPHER_SUITE
] ||
3453 data
[IFLA_MACSEC_ICV_LEN
] ||
3454 data
[IFLA_MACSEC_SCI
] ||
3455 data
[IFLA_MACSEC_PORT
])
3458 /* Keep a copy of unmodified secy and tx_sc, in case the offload
3459 * propagation fails, to revert macsec_changelink_common.
3461 memcpy(&secy
, &macsec
->secy
, sizeof(secy
));
3462 memcpy(&tx_sc
, &macsec
->secy
.tx_sc
, sizeof(tx_sc
));
3464 ret
= macsec_changelink_common(dev
, data
);
3468 /* If h/w offloading is available, propagate to the device */
3469 if (macsec_is_offloaded(macsec
)) {
3470 const struct macsec_ops
*ops
;
3471 struct macsec_context ctx
;
3474 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
3480 ctx
.secy
= &macsec
->secy
;
3481 ret
= macsec_offload(ops
->mdo_upd_secy
, &ctx
);
3489 memcpy(&macsec
->secy
.tx_sc
, &tx_sc
, sizeof(tx_sc
));
3490 memcpy(&macsec
->secy
, &secy
, sizeof(secy
));
3495 static void macsec_del_dev(struct macsec_dev
*macsec
)
3499 while (macsec
->secy
.rx_sc
) {
3500 struct macsec_rx_sc
*rx_sc
= rtnl_dereference(macsec
->secy
.rx_sc
);
3502 rcu_assign_pointer(macsec
->secy
.rx_sc
, rx_sc
->next
);
3506 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
3507 struct macsec_tx_sa
*sa
= rtnl_dereference(macsec
->secy
.tx_sc
.sa
[i
]);
3510 RCU_INIT_POINTER(macsec
->secy
.tx_sc
.sa
[i
], NULL
);
3516 static void macsec_common_dellink(struct net_device
*dev
, struct list_head
*head
)
3518 struct macsec_dev
*macsec
= macsec_priv(dev
);
3519 struct net_device
*real_dev
= macsec
->real_dev
;
3521 unregister_netdevice_queue(dev
, head
);
3522 list_del_rcu(&macsec
->secys
);
3523 macsec_del_dev(macsec
);
3524 netdev_upper_dev_unlink(real_dev
, dev
);
3526 macsec_generation
++;
3529 static void macsec_dellink(struct net_device
*dev
, struct list_head
*head
)
3531 struct macsec_dev
*macsec
= macsec_priv(dev
);
3532 struct net_device
*real_dev
= macsec
->real_dev
;
3533 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3535 /* If h/w offloading is available, propagate to the device */
3536 if (macsec_is_offloaded(macsec
)) {
3537 const struct macsec_ops
*ops
;
3538 struct macsec_context ctx
;
3540 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
3542 ctx
.secy
= &macsec
->secy
;
3543 macsec_offload(ops
->mdo_del_secy
, &ctx
);
3547 macsec_common_dellink(dev
, head
);
3549 if (list_empty(&rxd
->secys
)) {
3550 netdev_rx_handler_unregister(real_dev
);
3555 static int register_macsec_dev(struct net_device
*real_dev
,
3556 struct net_device
*dev
)
3558 struct macsec_dev
*macsec
= macsec_priv(dev
);
3559 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3564 rxd
= kmalloc(sizeof(*rxd
), GFP_KERNEL
);
3568 INIT_LIST_HEAD(&rxd
->secys
);
3570 err
= netdev_rx_handler_register(real_dev
, macsec_handle_frame
,
3578 list_add_tail_rcu(&macsec
->secys
, &rxd
->secys
);
3582 static bool sci_exists(struct net_device
*dev
, sci_t sci
)
3584 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(dev
);
3585 struct macsec_dev
*macsec
;
3587 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
3588 if (macsec
->secy
.sci
== sci
)
3595 static sci_t
dev_to_sci(struct net_device
*dev
, __be16 port
)
3597 return make_sci(dev
->dev_addr
, port
);
3600 static int macsec_add_dev(struct net_device
*dev
, sci_t sci
, u8 icv_len
)
3602 struct macsec_dev
*macsec
= macsec_priv(dev
);
3603 struct macsec_secy
*secy
= &macsec
->secy
;
3605 macsec
->stats
= netdev_alloc_pcpu_stats(struct pcpu_secy_stats
);
3609 secy
->tx_sc
.stats
= netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats
);
3610 if (!secy
->tx_sc
.stats
) {
3611 free_percpu(macsec
->stats
);
3615 if (sci
== MACSEC_UNDEF_SCI
)
3616 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3619 secy
->operational
= true;
3620 secy
->key_len
= DEFAULT_SAK_LEN
;
3621 secy
->icv_len
= icv_len
;
3622 secy
->validate_frames
= MACSEC_VALIDATE_DEFAULT
;
3623 secy
->protect_frames
= true;
3624 secy
->replay_protect
= false;
3627 secy
->tx_sc
.active
= true;
3628 secy
->tx_sc
.encoding_sa
= DEFAULT_ENCODING_SA
;
3629 secy
->tx_sc
.encrypt
= DEFAULT_ENCRYPT
;
3630 secy
->tx_sc
.send_sci
= DEFAULT_SEND_SCI
;
3631 secy
->tx_sc
.end_station
= false;
3632 secy
->tx_sc
.scb
= false;
3637 static int macsec_newlink(struct net
*net
, struct net_device
*dev
,
3638 struct nlattr
*tb
[], struct nlattr
*data
[],
3639 struct netlink_ext_ack
*extack
)
3641 struct macsec_dev
*macsec
= macsec_priv(dev
);
3642 struct net_device
*real_dev
;
3645 u8 icv_len
= DEFAULT_ICV_LEN
;
3646 rx_handler_func_t
*rx_handler
;
3650 real_dev
= __dev_get_by_index(net
, nla_get_u32(tb
[IFLA_LINK
]));
3654 dev
->priv_flags
|= IFF_MACSEC
;
3656 macsec
->real_dev
= real_dev
;
3658 /* MACsec offloading is off by default */
3659 macsec
->offload
= MACSEC_OFFLOAD_OFF
;
3661 if (data
&& data
[IFLA_MACSEC_ICV_LEN
])
3662 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3663 dev
->mtu
= real_dev
->mtu
- icv_len
- macsec_extra_len(true);
3665 rx_handler
= rtnl_dereference(real_dev
->rx_handler
);
3666 if (rx_handler
&& rx_handler
!= macsec_handle_frame
)
3669 err
= register_netdevice(dev
);
3673 err
= netdev_upper_dev_link(real_dev
, dev
, extack
);
3677 /* need to be already registered so that ->init has run and
3678 * the MAC addr is set
3680 if (data
&& data
[IFLA_MACSEC_SCI
])
3681 sci
= nla_get_sci(data
[IFLA_MACSEC_SCI
]);
3682 else if (data
&& data
[IFLA_MACSEC_PORT
])
3683 sci
= dev_to_sci(dev
, nla_get_be16(data
[IFLA_MACSEC_PORT
]));
3685 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3687 if (rx_handler
&& sci_exists(real_dev
, sci
)) {
3692 err
= macsec_add_dev(dev
, sci
, icv_len
);
3697 err
= macsec_changelink_common(dev
, data
);
3702 err
= register_macsec_dev(real_dev
, dev
);
3706 netif_stacked_transfer_operstate(real_dev
, dev
);
3707 linkwatch_fire_event(dev
);
3709 macsec_generation
++;
3714 macsec_del_dev(macsec
);
3716 netdev_upper_dev_unlink(real_dev
, dev
);
3718 unregister_netdevice(dev
);
3722 static int macsec_validate_attr(struct nlattr
*tb
[], struct nlattr
*data
[],
3723 struct netlink_ext_ack
*extack
)
3725 u64 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3726 u8 icv_len
= DEFAULT_ICV_LEN
;
3733 if (data
[IFLA_MACSEC_CIPHER_SUITE
])
3734 csid
= nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
]);
3736 if (data
[IFLA_MACSEC_ICV_LEN
]) {
3737 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3738 if (icv_len
!= DEFAULT_ICV_LEN
) {
3739 char dummy_key
[DEFAULT_SAK_LEN
] = { 0 };
3740 struct crypto_aead
*dummy_tfm
;
3742 dummy_tfm
= macsec_alloc_tfm(dummy_key
,
3745 if (IS_ERR(dummy_tfm
))
3746 return PTR_ERR(dummy_tfm
);
3747 crypto_free_aead(dummy_tfm
);
3752 case MACSEC_CIPHER_ID_GCM_AES_128
:
3753 case MACSEC_CIPHER_ID_GCM_AES_256
:
3754 case MACSEC_DEFAULT_CIPHER_ID
:
3755 if (icv_len
< MACSEC_MIN_ICV_LEN
||
3756 icv_len
> MACSEC_STD_ICV_LEN
)
3763 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3764 if (nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]) >= MACSEC_NUM_AN
)
3768 for (flag
= IFLA_MACSEC_ENCODING_SA
+ 1;
3769 flag
< IFLA_MACSEC_VALIDATION
;
3772 if (nla_get_u8(data
[flag
]) > 1)
3777 es
= data
[IFLA_MACSEC_ES
] ? nla_get_u8(data
[IFLA_MACSEC_ES
]) : false;
3778 sci
= data
[IFLA_MACSEC_INC_SCI
] ? nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]) : false;
3779 scb
= data
[IFLA_MACSEC_SCB
] ? nla_get_u8(data
[IFLA_MACSEC_SCB
]) : false;
3781 if ((sci
&& (scb
|| es
)) || (scb
&& es
))
3784 if (data
[IFLA_MACSEC_VALIDATION
] &&
3785 nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]) > MACSEC_VALIDATE_MAX
)
3788 if ((data
[IFLA_MACSEC_REPLAY_PROTECT
] &&
3789 nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
])) &&
3790 !data
[IFLA_MACSEC_WINDOW
])
3796 static struct net
*macsec_get_link_net(const struct net_device
*dev
)
3798 return dev_net(macsec_priv(dev
)->real_dev
);
3801 static size_t macsec_get_size(const struct net_device
*dev
)
3803 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
3804 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
3805 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
3806 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
3807 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
3808 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
3809 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
3810 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
3811 nla_total_size(1) + /* IFLA_MACSEC_ES */
3812 nla_total_size(1) + /* IFLA_MACSEC_SCB */
3813 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
3814 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
3818 static int macsec_fill_info(struct sk_buff
*skb
,
3819 const struct net_device
*dev
)
3821 struct macsec_secy
*secy
= &macsec_priv(dev
)->secy
;
3822 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
3825 switch (secy
->key_len
) {
3826 case MACSEC_GCM_AES_128_SAK_LEN
:
3827 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3829 case MACSEC_GCM_AES_256_SAK_LEN
:
3830 csid
= MACSEC_CIPHER_ID_GCM_AES_256
;
3833 goto nla_put_failure
;
3836 if (nla_put_sci(skb
, IFLA_MACSEC_SCI
, secy
->sci
,
3838 nla_put_u8(skb
, IFLA_MACSEC_ICV_LEN
, secy
->icv_len
) ||
3839 nla_put_u64_64bit(skb
, IFLA_MACSEC_CIPHER_SUITE
,
3840 csid
, IFLA_MACSEC_PAD
) ||
3841 nla_put_u8(skb
, IFLA_MACSEC_ENCODING_SA
, tx_sc
->encoding_sa
) ||
3842 nla_put_u8(skb
, IFLA_MACSEC_ENCRYPT
, tx_sc
->encrypt
) ||
3843 nla_put_u8(skb
, IFLA_MACSEC_PROTECT
, secy
->protect_frames
) ||
3844 nla_put_u8(skb
, IFLA_MACSEC_INC_SCI
, tx_sc
->send_sci
) ||
3845 nla_put_u8(skb
, IFLA_MACSEC_ES
, tx_sc
->end_station
) ||
3846 nla_put_u8(skb
, IFLA_MACSEC_SCB
, tx_sc
->scb
) ||
3847 nla_put_u8(skb
, IFLA_MACSEC_REPLAY_PROTECT
, secy
->replay_protect
) ||
3848 nla_put_u8(skb
, IFLA_MACSEC_VALIDATION
, secy
->validate_frames
) ||
3850 goto nla_put_failure
;
3852 if (secy
->replay_protect
) {
3853 if (nla_put_u32(skb
, IFLA_MACSEC_WINDOW
, secy
->replay_window
))
3854 goto nla_put_failure
;
3863 static struct rtnl_link_ops macsec_link_ops __read_mostly
= {
3865 .priv_size
= sizeof(struct macsec_dev
),
3866 .maxtype
= IFLA_MACSEC_MAX
,
3867 .policy
= macsec_rtnl_policy
,
3868 .setup
= macsec_setup
,
3869 .validate
= macsec_validate_attr
,
3870 .newlink
= macsec_newlink
,
3871 .changelink
= macsec_changelink
,
3872 .dellink
= macsec_dellink
,
3873 .get_size
= macsec_get_size
,
3874 .fill_info
= macsec_fill_info
,
3875 .get_link_net
= macsec_get_link_net
,
3878 static bool is_macsec_master(struct net_device
*dev
)
3880 return rcu_access_pointer(dev
->rx_handler
) == macsec_handle_frame
;
3883 static int macsec_notify(struct notifier_block
*this, unsigned long event
,
3886 struct net_device
*real_dev
= netdev_notifier_info_to_dev(ptr
);
3889 if (!is_macsec_master(real_dev
))
3895 case NETDEV_CHANGE
: {
3896 struct macsec_dev
*m
, *n
;
3897 struct macsec_rxh_data
*rxd
;
3899 rxd
= macsec_data_rtnl(real_dev
);
3900 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
3901 struct net_device
*dev
= m
->secy
.netdev
;
3903 netif_stacked_transfer_operstate(real_dev
, dev
);
3907 case NETDEV_UNREGISTER
: {
3908 struct macsec_dev
*m
, *n
;
3909 struct macsec_rxh_data
*rxd
;
3911 rxd
= macsec_data_rtnl(real_dev
);
3912 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
3913 macsec_common_dellink(m
->secy
.netdev
, &head
);
3916 netdev_rx_handler_unregister(real_dev
);
3919 unregister_netdevice_many(&head
);
3922 case NETDEV_CHANGEMTU
: {
3923 struct macsec_dev
*m
;
3924 struct macsec_rxh_data
*rxd
;
3926 rxd
= macsec_data_rtnl(real_dev
);
3927 list_for_each_entry(m
, &rxd
->secys
, secys
) {
3928 struct net_device
*dev
= m
->secy
.netdev
;
3929 unsigned int mtu
= real_dev
->mtu
- (m
->secy
.icv_len
+
3930 macsec_extra_len(true));
3933 dev_set_mtu(dev
, mtu
);
3941 static struct notifier_block macsec_notifier
= {
3942 .notifier_call
= macsec_notify
,
3945 static int __init
macsec_init(void)
3949 pr_info("MACsec IEEE 802.1AE\n");
3950 err
= register_netdevice_notifier(&macsec_notifier
);
3954 err
= rtnl_link_register(&macsec_link_ops
);
3958 err
= genl_register_family(&macsec_fam
);
3965 rtnl_link_unregister(&macsec_link_ops
);
3967 unregister_netdevice_notifier(&macsec_notifier
);
3971 static void __exit
macsec_exit(void)
3973 genl_unregister_family(&macsec_fam
);
3974 rtnl_link_unregister(&macsec_link_ops
);
3975 unregister_netdevice_notifier(&macsec_notifier
);
3979 module_init(macsec_init
);
3980 module_exit(macsec_exit
);
3982 MODULE_ALIAS_RTNL_LINK("macsec");
3983 MODULE_ALIAS_GENL_FAMILY("macsec");
3985 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3986 MODULE_LICENSE("GPL v2");