2 * drivers/net/macsec.c - MACsec device
4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/socket.h>
15 #include <linux/module.h>
16 #include <crypto/aead.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/refcount.h>
20 #include <net/genetlink.h>
22 #include <net/gro_cells.h>
24 #include <uapi/linux/if_macsec.h>
26 typedef u64 __bitwise sci_t
;
28 #define MACSEC_SCI_LEN 8
30 /* SecTAG length = macsec_eth_header without the optional SCI */
31 #define MACSEC_TAG_LEN 6
33 struct macsec_eth_header
{
37 #if defined(__LITTLE_ENDIAN_BITFIELD)
40 #elif defined(__BIG_ENDIAN_BITFIELD)
44 #error "Please fix <asm/byteorder.h>"
47 u8 secure_channel_id
[8]; /* optional */
50 #define MACSEC_TCI_VERSION 0x80
51 #define MACSEC_TCI_ES 0x40 /* end station */
52 #define MACSEC_TCI_SC 0x20 /* SCI present */
53 #define MACSEC_TCI_SCB 0x10 /* epon */
54 #define MACSEC_TCI_E 0x08 /* encryption */
55 #define MACSEC_TCI_C 0x04 /* changed text */
56 #define MACSEC_AN_MASK 0x03 /* association number */
57 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
59 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
60 #define MIN_NON_SHORT_LEN 48
62 #define GCM_AES_IV_LEN 12
63 #define DEFAULT_ICV_LEN 16
65 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
67 #define for_each_rxsc(secy, sc) \
68 for (sc = rcu_dereference_bh(secy->rx_sc); \
70 sc = rcu_dereference_bh(sc->next))
71 #define for_each_rxsc_rtnl(secy, sc) \
72 for (sc = rtnl_dereference(secy->rx_sc); \
74 sc = rtnl_dereference(sc->next))
78 u8 secure_channel_id
[8];
85 * struct macsec_key - SA key
86 * @id: user-provided key identifier
87 * @tfm: crypto struct, key storage
90 u8 id
[MACSEC_KEYID_LEN
];
91 struct crypto_aead
*tfm
;
94 struct macsec_rx_sc_stats
{
95 __u64 InOctetsValidated
;
96 __u64 InOctetsDecrypted
;
97 __u64 InPktsUnchecked
;
102 __u64 InPktsNotValid
;
103 __u64 InPktsNotUsingSA
;
104 __u64 InPktsUnusedSA
;
107 struct macsec_rx_sa_stats
{
110 __u32 InPktsNotValid
;
111 __u32 InPktsNotUsingSA
;
112 __u32 InPktsUnusedSA
;
115 struct macsec_tx_sa_stats
{
116 __u32 OutPktsProtected
;
117 __u32 OutPktsEncrypted
;
120 struct macsec_tx_sc_stats
{
121 __u64 OutPktsProtected
;
122 __u64 OutPktsEncrypted
;
123 __u64 OutOctetsProtected
;
124 __u64 OutOctetsEncrypted
;
127 struct macsec_dev_stats
{
128 __u64 OutPktsUntagged
;
129 __u64 InPktsUntagged
;
130 __u64 OutPktsTooLong
;
133 __u64 InPktsUnknownSCI
;
139 * struct macsec_rx_sa - receive secure association
141 * @next_pn: packet number expected for the next packet
142 * @lock: protects next_pn manipulations
143 * @key: key structure
144 * @stats: per-SA stats
146 struct macsec_rx_sa
{
147 struct macsec_key key
;
152 struct macsec_rx_sa_stats __percpu
*stats
;
153 struct macsec_rx_sc
*sc
;
157 struct pcpu_rx_sc_stats
{
158 struct macsec_rx_sc_stats stats
;
159 struct u64_stats_sync syncp
;
163 * struct macsec_rx_sc - receive secure channel
164 * @sci: secure channel identifier for this SC
165 * @active: channel is active
166 * @sa: array of secure associations
167 * @stats: per-SC stats
169 struct macsec_rx_sc
{
170 struct macsec_rx_sc __rcu
*next
;
173 struct macsec_rx_sa __rcu
*sa
[MACSEC_NUM_AN
];
174 struct pcpu_rx_sc_stats __percpu
*stats
;
176 struct rcu_head rcu_head
;
180 * struct macsec_tx_sa - transmit secure association
182 * @next_pn: packet number to use for the next packet
183 * @lock: protects next_pn manipulations
184 * @key: key structure
185 * @stats: per-SA stats
187 struct macsec_tx_sa
{
188 struct macsec_key key
;
193 struct macsec_tx_sa_stats __percpu
*stats
;
197 struct pcpu_tx_sc_stats
{
198 struct macsec_tx_sc_stats stats
;
199 struct u64_stats_sync syncp
;
203 * struct macsec_tx_sc - transmit secure channel
205 * @encoding_sa: association number of the SA currently in use
206 * @encrypt: encrypt packets on transmit, or authenticate only
207 * @send_sci: always include the SCI in the SecTAG
209 * @scb: single copy broadcast flag
210 * @sa: array of secure associations
211 * @stats: stats for this TXSC
213 struct macsec_tx_sc
{
220 struct macsec_tx_sa __rcu
*sa
[MACSEC_NUM_AN
];
221 struct pcpu_tx_sc_stats __percpu
*stats
;
224 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
227 * struct macsec_secy - MACsec Security Entity
228 * @netdev: netdevice for this SecY
229 * @n_rx_sc: number of receive secure channels configured on this SecY
230 * @sci: secure channel identifier used for tx
231 * @key_len: length of keys used by the cipher suite
232 * @icv_len: length of ICV used by the cipher suite
233 * @validate_frames: validation mode
234 * @operational: MAC_Operational flag
235 * @protect_frames: enable protection for this SecY
236 * @replay_protect: enable packet number checks on receive
237 * @replay_window: size of the replay window
238 * @tx_sc: transmit secure channel
239 * @rx_sc: linked list of receive secure channels
242 struct net_device
*netdev
;
243 unsigned int n_rx_sc
;
247 enum macsec_validation_type validate_frames
;
252 struct macsec_tx_sc tx_sc
;
253 struct macsec_rx_sc __rcu
*rx_sc
;
256 struct pcpu_secy_stats
{
257 struct macsec_dev_stats stats
;
258 struct u64_stats_sync syncp
;
262 * struct macsec_dev - private data
264 * @real_dev: pointer to underlying netdevice
265 * @stats: MACsec device stats
266 * @secys: linked list of SecY's on the underlying device
269 struct macsec_secy secy
;
270 struct net_device
*real_dev
;
271 struct pcpu_secy_stats __percpu
*stats
;
272 struct list_head secys
;
273 struct gro_cells gro_cells
;
274 unsigned int nest_level
;
278 * struct macsec_rxh_data - rx_handler private argument
279 * @secys: linked list of SecY's on this underlying device
281 struct macsec_rxh_data
{
282 struct list_head secys
;
285 static struct macsec_dev
*macsec_priv(const struct net_device
*dev
)
287 return (struct macsec_dev
*)netdev_priv(dev
);
290 static struct macsec_rxh_data
*macsec_data_rcu(const struct net_device
*dev
)
292 return rcu_dereference_bh(dev
->rx_handler_data
);
295 static struct macsec_rxh_data
*macsec_data_rtnl(const struct net_device
*dev
)
297 return rtnl_dereference(dev
->rx_handler_data
);
301 struct aead_request
*req
;
303 struct macsec_tx_sa
*tx_sa
;
304 struct macsec_rx_sa
*rx_sa
;
311 static struct macsec_rx_sa
*macsec_rxsa_get(struct macsec_rx_sa __rcu
*ptr
)
313 struct macsec_rx_sa
*sa
= rcu_dereference_bh(ptr
);
315 if (!sa
|| !sa
->active
)
318 if (!refcount_inc_not_zero(&sa
->refcnt
))
324 static void free_rx_sc_rcu(struct rcu_head
*head
)
326 struct macsec_rx_sc
*rx_sc
= container_of(head
, struct macsec_rx_sc
, rcu_head
);
328 free_percpu(rx_sc
->stats
);
332 static struct macsec_rx_sc
*macsec_rxsc_get(struct macsec_rx_sc
*sc
)
334 return refcount_inc_not_zero(&sc
->refcnt
) ? sc
: NULL
;
337 static void macsec_rxsc_put(struct macsec_rx_sc
*sc
)
339 if (refcount_dec_and_test(&sc
->refcnt
))
340 call_rcu(&sc
->rcu_head
, free_rx_sc_rcu
);
343 static void free_rxsa(struct rcu_head
*head
)
345 struct macsec_rx_sa
*sa
= container_of(head
, struct macsec_rx_sa
, rcu
);
347 crypto_free_aead(sa
->key
.tfm
);
348 free_percpu(sa
->stats
);
352 static void macsec_rxsa_put(struct macsec_rx_sa
*sa
)
354 if (refcount_dec_and_test(&sa
->refcnt
))
355 call_rcu(&sa
->rcu
, free_rxsa
);
358 static struct macsec_tx_sa
*macsec_txsa_get(struct macsec_tx_sa __rcu
*ptr
)
360 struct macsec_tx_sa
*sa
= rcu_dereference_bh(ptr
);
362 if (!sa
|| !sa
->active
)
365 if (!refcount_inc_not_zero(&sa
->refcnt
))
371 static void free_txsa(struct rcu_head
*head
)
373 struct macsec_tx_sa
*sa
= container_of(head
, struct macsec_tx_sa
, rcu
);
375 crypto_free_aead(sa
->key
.tfm
);
376 free_percpu(sa
->stats
);
380 static void macsec_txsa_put(struct macsec_tx_sa
*sa
)
382 if (refcount_dec_and_test(&sa
->refcnt
))
383 call_rcu(&sa
->rcu
, free_txsa
);
386 static struct macsec_cb
*macsec_skb_cb(struct sk_buff
*skb
)
388 BUILD_BUG_ON(sizeof(struct macsec_cb
) > sizeof(skb
->cb
));
389 return (struct macsec_cb
*)skb
->cb
;
392 #define MACSEC_PORT_ES (htons(0x0001))
393 #define MACSEC_PORT_SCB (0x0000)
394 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
396 #define MACSEC_GCM_AES_128_SAK_LEN 16
397 #define MACSEC_GCM_AES_256_SAK_LEN 32
399 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
400 #define DEFAULT_SEND_SCI true
401 #define DEFAULT_ENCRYPT false
402 #define DEFAULT_ENCODING_SA 0
404 static bool send_sci(const struct macsec_secy
*secy
)
406 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
408 return tx_sc
->send_sci
||
409 (secy
->n_rx_sc
> 1 && !tx_sc
->end_station
&& !tx_sc
->scb
);
412 static sci_t
make_sci(u8
*addr
, __be16 port
)
416 memcpy(&sci
, addr
, ETH_ALEN
);
417 memcpy(((char *)&sci
) + ETH_ALEN
, &port
, sizeof(port
));
422 static sci_t
macsec_frame_sci(struct macsec_eth_header
*hdr
, bool sci_present
)
427 memcpy(&sci
, hdr
->secure_channel_id
,
428 sizeof(hdr
->secure_channel_id
));
430 sci
= make_sci(hdr
->eth
.h_source
, MACSEC_PORT_ES
);
435 static unsigned int macsec_sectag_len(bool sci_present
)
437 return MACSEC_TAG_LEN
+ (sci_present
? MACSEC_SCI_LEN
: 0);
440 static unsigned int macsec_hdr_len(bool sci_present
)
442 return macsec_sectag_len(sci_present
) + ETH_HLEN
;
445 static unsigned int macsec_extra_len(bool sci_present
)
447 return macsec_sectag_len(sci_present
) + sizeof(__be16
);
450 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
451 static void macsec_fill_sectag(struct macsec_eth_header
*h
,
452 const struct macsec_secy
*secy
, u32 pn
,
455 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
457 memset(&h
->tci_an
, 0, macsec_sectag_len(sci_present
));
458 h
->eth
.h_proto
= htons(ETH_P_MACSEC
);
461 h
->tci_an
|= MACSEC_TCI_SC
;
462 memcpy(&h
->secure_channel_id
, &secy
->sci
,
463 sizeof(h
->secure_channel_id
));
465 if (tx_sc
->end_station
)
466 h
->tci_an
|= MACSEC_TCI_ES
;
468 h
->tci_an
|= MACSEC_TCI_SCB
;
471 h
->packet_number
= htonl(pn
);
473 /* with GCM, C/E clear for !encrypt, both set for encrypt */
475 h
->tci_an
|= MACSEC_TCI_CONFID
;
476 else if (secy
->icv_len
!= DEFAULT_ICV_LEN
)
477 h
->tci_an
|= MACSEC_TCI_C
;
479 h
->tci_an
|= tx_sc
->encoding_sa
;
482 static void macsec_set_shortlen(struct macsec_eth_header
*h
, size_t data_len
)
484 if (data_len
< MIN_NON_SHORT_LEN
)
485 h
->short_length
= data_len
;
488 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
489 static bool macsec_validate_skb(struct sk_buff
*skb
, u16 icv_len
)
491 struct macsec_eth_header
*h
= (struct macsec_eth_header
*)skb
->data
;
492 int len
= skb
->len
- 2 * ETH_ALEN
;
493 int extra_len
= macsec_extra_len(!!(h
->tci_an
& MACSEC_TCI_SC
)) + icv_len
;
495 /* a) It comprises at least 17 octets */
499 /* b) MACsec EtherType: already checked */
501 /* c) V bit is clear */
502 if (h
->tci_an
& MACSEC_TCI_VERSION
)
505 /* d) ES or SCB => !SC */
506 if ((h
->tci_an
& MACSEC_TCI_ES
|| h
->tci_an
& MACSEC_TCI_SCB
) &&
507 (h
->tci_an
& MACSEC_TCI_SC
))
510 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
514 /* rx.pn != 0 (figure 10-5) */
515 if (!h
->packet_number
)
518 /* length check, f) g) h) i) */
520 return len
== extra_len
+ h
->short_length
;
521 return len
>= extra_len
+ MIN_NON_SHORT_LEN
;
524 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
525 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
527 static void macsec_fill_iv(unsigned char *iv
, sci_t sci
, u32 pn
)
529 struct gcm_iv
*gcm_iv
= (struct gcm_iv
*)iv
;
532 gcm_iv
->pn
= htonl(pn
);
535 static struct macsec_eth_header
*macsec_ethhdr(struct sk_buff
*skb
)
537 return (struct macsec_eth_header
*)skb_mac_header(skb
);
540 static u32
tx_sa_update_pn(struct macsec_tx_sa
*tx_sa
, struct macsec_secy
*secy
)
544 spin_lock_bh(&tx_sa
->lock
);
548 if (tx_sa
->next_pn
== 0) {
549 pr_debug("PN wrapped, transitioning to !oper\n");
550 tx_sa
->active
= false;
551 if (secy
->protect_frames
)
552 secy
->operational
= false;
554 spin_unlock_bh(&tx_sa
->lock
);
559 static void macsec_encrypt_finish(struct sk_buff
*skb
, struct net_device
*dev
)
561 struct macsec_dev
*macsec
= netdev_priv(dev
);
563 skb
->dev
= macsec
->real_dev
;
564 skb_reset_mac_header(skb
);
565 skb
->protocol
= eth_hdr(skb
)->h_proto
;
568 static void macsec_count_tx(struct sk_buff
*skb
, struct macsec_tx_sc
*tx_sc
,
569 struct macsec_tx_sa
*tx_sa
)
571 struct pcpu_tx_sc_stats
*txsc_stats
= this_cpu_ptr(tx_sc
->stats
);
573 u64_stats_update_begin(&txsc_stats
->syncp
);
574 if (tx_sc
->encrypt
) {
575 txsc_stats
->stats
.OutOctetsEncrypted
+= skb
->len
;
576 txsc_stats
->stats
.OutPktsEncrypted
++;
577 this_cpu_inc(tx_sa
->stats
->OutPktsEncrypted
);
579 txsc_stats
->stats
.OutOctetsProtected
+= skb
->len
;
580 txsc_stats
->stats
.OutPktsProtected
++;
581 this_cpu_inc(tx_sa
->stats
->OutPktsProtected
);
583 u64_stats_update_end(&txsc_stats
->syncp
);
586 static void count_tx(struct net_device
*dev
, int ret
, int len
)
588 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
589 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
591 u64_stats_update_begin(&stats
->syncp
);
593 stats
->tx_bytes
+= len
;
594 u64_stats_update_end(&stats
->syncp
);
598 static void macsec_encrypt_done(struct crypto_async_request
*base
, int err
)
600 struct sk_buff
*skb
= base
->data
;
601 struct net_device
*dev
= skb
->dev
;
602 struct macsec_dev
*macsec
= macsec_priv(dev
);
603 struct macsec_tx_sa
*sa
= macsec_skb_cb(skb
)->tx_sa
;
606 aead_request_free(macsec_skb_cb(skb
)->req
);
609 macsec_encrypt_finish(skb
, dev
);
610 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
612 ret
= dev_queue_xmit(skb
);
613 count_tx(dev
, ret
, len
);
614 rcu_read_unlock_bh();
620 static struct aead_request
*macsec_alloc_req(struct crypto_aead
*tfm
,
622 struct scatterlist
**sg
,
625 size_t size
, iv_offset
, sg_offset
;
626 struct aead_request
*req
;
629 size
= sizeof(struct aead_request
) + crypto_aead_reqsize(tfm
);
631 size
+= GCM_AES_IV_LEN
;
633 size
= ALIGN(size
, __alignof__(struct scatterlist
));
635 size
+= sizeof(struct scatterlist
) * num_frags
;
637 tmp
= kmalloc(size
, GFP_ATOMIC
);
641 *iv
= (unsigned char *)(tmp
+ iv_offset
);
642 *sg
= (struct scatterlist
*)(tmp
+ sg_offset
);
645 aead_request_set_tfm(req
, tfm
);
650 static struct sk_buff
*macsec_encrypt(struct sk_buff
*skb
,
651 struct net_device
*dev
)
654 struct scatterlist
*sg
;
655 struct sk_buff
*trailer
;
658 struct macsec_eth_header
*hh
;
659 size_t unprotected_len
;
660 struct aead_request
*req
;
661 struct macsec_secy
*secy
;
662 struct macsec_tx_sc
*tx_sc
;
663 struct macsec_tx_sa
*tx_sa
;
664 struct macsec_dev
*macsec
= macsec_priv(dev
);
668 secy
= &macsec
->secy
;
669 tx_sc
= &secy
->tx_sc
;
671 /* 10.5.1 TX SA assignment */
672 tx_sa
= macsec_txsa_get(tx_sc
->sa
[tx_sc
->encoding_sa
]);
674 secy
->operational
= false;
676 return ERR_PTR(-EINVAL
);
679 if (unlikely(skb_headroom(skb
) < MACSEC_NEEDED_HEADROOM
||
680 skb_tailroom(skb
) < MACSEC_NEEDED_TAILROOM
)) {
681 struct sk_buff
*nskb
= skb_copy_expand(skb
,
682 MACSEC_NEEDED_HEADROOM
,
683 MACSEC_NEEDED_TAILROOM
,
689 macsec_txsa_put(tx_sa
);
691 return ERR_PTR(-ENOMEM
);
694 skb
= skb_unshare(skb
, GFP_ATOMIC
);
696 macsec_txsa_put(tx_sa
);
697 return ERR_PTR(-ENOMEM
);
701 unprotected_len
= skb
->len
;
703 sci_present
= send_sci(secy
);
704 hh
= skb_push(skb
, macsec_extra_len(sci_present
));
705 memmove(hh
, eth
, 2 * ETH_ALEN
);
707 pn
= tx_sa_update_pn(tx_sa
, secy
);
709 macsec_txsa_put(tx_sa
);
711 return ERR_PTR(-ENOLINK
);
713 macsec_fill_sectag(hh
, secy
, pn
, sci_present
);
714 macsec_set_shortlen(hh
, unprotected_len
- 2 * ETH_ALEN
);
716 skb_put(skb
, secy
->icv_len
);
718 if (skb
->len
- ETH_HLEN
> macsec_priv(dev
)->real_dev
->mtu
) {
719 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
721 u64_stats_update_begin(&secy_stats
->syncp
);
722 secy_stats
->stats
.OutPktsTooLong
++;
723 u64_stats_update_end(&secy_stats
->syncp
);
725 macsec_txsa_put(tx_sa
);
727 return ERR_PTR(-EINVAL
);
730 ret
= skb_cow_data(skb
, 0, &trailer
);
731 if (unlikely(ret
< 0)) {
732 macsec_txsa_put(tx_sa
);
737 req
= macsec_alloc_req(tx_sa
->key
.tfm
, &iv
, &sg
, ret
);
739 macsec_txsa_put(tx_sa
);
741 return ERR_PTR(-ENOMEM
);
744 macsec_fill_iv(iv
, secy
->sci
, pn
);
746 sg_init_table(sg
, ret
);
747 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
748 if (unlikely(ret
< 0)) {
749 aead_request_free(req
);
750 macsec_txsa_put(tx_sa
);
755 if (tx_sc
->encrypt
) {
756 int len
= skb
->len
- macsec_hdr_len(sci_present
) -
758 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
759 aead_request_set_ad(req
, macsec_hdr_len(sci_present
));
761 aead_request_set_crypt(req
, sg
, sg
, 0, iv
);
762 aead_request_set_ad(req
, skb
->len
- secy
->icv_len
);
765 macsec_skb_cb(skb
)->req
= req
;
766 macsec_skb_cb(skb
)->tx_sa
= tx_sa
;
767 aead_request_set_callback(req
, 0, macsec_encrypt_done
, skb
);
770 ret
= crypto_aead_encrypt(req
);
771 if (ret
== -EINPROGRESS
) {
773 } else if (ret
!= 0) {
776 aead_request_free(req
);
777 macsec_txsa_put(tx_sa
);
778 return ERR_PTR(-EINVAL
);
782 aead_request_free(req
);
783 macsec_txsa_put(tx_sa
);
788 static bool macsec_post_decrypt(struct sk_buff
*skb
, struct macsec_secy
*secy
, u32 pn
)
790 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
791 struct pcpu_rx_sc_stats
*rxsc_stats
= this_cpu_ptr(rx_sa
->sc
->stats
);
792 struct macsec_eth_header
*hdr
= macsec_ethhdr(skb
);
795 spin_lock(&rx_sa
->lock
);
796 if (rx_sa
->next_pn
>= secy
->replay_window
)
797 lowest_pn
= rx_sa
->next_pn
- secy
->replay_window
;
799 /* Now perform replay protection check again
800 * (see IEEE 802.1AE-2006 figure 10-5)
802 if (secy
->replay_protect
&& pn
< lowest_pn
) {
803 spin_unlock(&rx_sa
->lock
);
804 u64_stats_update_begin(&rxsc_stats
->syncp
);
805 rxsc_stats
->stats
.InPktsLate
++;
806 u64_stats_update_end(&rxsc_stats
->syncp
);
810 if (secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
) {
811 u64_stats_update_begin(&rxsc_stats
->syncp
);
812 if (hdr
->tci_an
& MACSEC_TCI_E
)
813 rxsc_stats
->stats
.InOctetsDecrypted
+= skb
->len
;
815 rxsc_stats
->stats
.InOctetsValidated
+= skb
->len
;
816 u64_stats_update_end(&rxsc_stats
->syncp
);
819 if (!macsec_skb_cb(skb
)->valid
) {
820 spin_unlock(&rx_sa
->lock
);
823 if (hdr
->tci_an
& MACSEC_TCI_C
||
824 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
825 u64_stats_update_begin(&rxsc_stats
->syncp
);
826 rxsc_stats
->stats
.InPktsNotValid
++;
827 u64_stats_update_end(&rxsc_stats
->syncp
);
831 u64_stats_update_begin(&rxsc_stats
->syncp
);
832 if (secy
->validate_frames
== MACSEC_VALIDATE_CHECK
) {
833 rxsc_stats
->stats
.InPktsInvalid
++;
834 this_cpu_inc(rx_sa
->stats
->InPktsInvalid
);
835 } else if (pn
< lowest_pn
) {
836 rxsc_stats
->stats
.InPktsDelayed
++;
838 rxsc_stats
->stats
.InPktsUnchecked
++;
840 u64_stats_update_end(&rxsc_stats
->syncp
);
842 u64_stats_update_begin(&rxsc_stats
->syncp
);
843 if (pn
< lowest_pn
) {
844 rxsc_stats
->stats
.InPktsDelayed
++;
846 rxsc_stats
->stats
.InPktsOK
++;
847 this_cpu_inc(rx_sa
->stats
->InPktsOK
);
849 u64_stats_update_end(&rxsc_stats
->syncp
);
851 if (pn
>= rx_sa
->next_pn
)
852 rx_sa
->next_pn
= pn
+ 1;
853 spin_unlock(&rx_sa
->lock
);
859 static void macsec_reset_skb(struct sk_buff
*skb
, struct net_device
*dev
)
861 skb
->pkt_type
= PACKET_HOST
;
862 skb
->protocol
= eth_type_trans(skb
, dev
);
864 skb_reset_network_header(skb
);
865 if (!skb_transport_header_was_set(skb
))
866 skb_reset_transport_header(skb
);
867 skb_reset_mac_len(skb
);
870 static void macsec_finalize_skb(struct sk_buff
*skb
, u8 icv_len
, u8 hdr_len
)
872 memmove(skb
->data
+ hdr_len
, skb
->data
, 2 * ETH_ALEN
);
873 skb_pull(skb
, hdr_len
);
874 pskb_trim_unique(skb
, skb
->len
- icv_len
);
877 static void count_rx(struct net_device
*dev
, int len
)
879 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
881 u64_stats_update_begin(&stats
->syncp
);
883 stats
->rx_bytes
+= len
;
884 u64_stats_update_end(&stats
->syncp
);
887 static void macsec_decrypt_done(struct crypto_async_request
*base
, int err
)
889 struct sk_buff
*skb
= base
->data
;
890 struct net_device
*dev
= skb
->dev
;
891 struct macsec_dev
*macsec
= macsec_priv(dev
);
892 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
893 struct macsec_rx_sc
*rx_sc
= rx_sa
->sc
;
897 aead_request_free(macsec_skb_cb(skb
)->req
);
900 macsec_skb_cb(skb
)->valid
= true;
903 pn
= ntohl(macsec_ethhdr(skb
)->packet_number
);
904 if (!macsec_post_decrypt(skb
, &macsec
->secy
, pn
)) {
905 rcu_read_unlock_bh();
910 macsec_finalize_skb(skb
, macsec
->secy
.icv_len
,
911 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
912 macsec_reset_skb(skb
, macsec
->secy
.netdev
);
915 if (gro_cells_receive(&macsec
->gro_cells
, skb
) == NET_RX_SUCCESS
)
918 rcu_read_unlock_bh();
921 macsec_rxsa_put(rx_sa
);
922 macsec_rxsc_put(rx_sc
);
926 static struct sk_buff
*macsec_decrypt(struct sk_buff
*skb
,
927 struct net_device
*dev
,
928 struct macsec_rx_sa
*rx_sa
,
930 struct macsec_secy
*secy
)
933 struct scatterlist
*sg
;
934 struct sk_buff
*trailer
;
936 struct aead_request
*req
;
937 struct macsec_eth_header
*hdr
;
938 u16 icv_len
= secy
->icv_len
;
940 macsec_skb_cb(skb
)->valid
= false;
941 skb
= skb_share_check(skb
, GFP_ATOMIC
);
943 return ERR_PTR(-ENOMEM
);
945 ret
= skb_cow_data(skb
, 0, &trailer
);
946 if (unlikely(ret
< 0)) {
950 req
= macsec_alloc_req(rx_sa
->key
.tfm
, &iv
, &sg
, ret
);
953 return ERR_PTR(-ENOMEM
);
956 hdr
= (struct macsec_eth_header
*)skb
->data
;
957 macsec_fill_iv(iv
, sci
, ntohl(hdr
->packet_number
));
959 sg_init_table(sg
, ret
);
960 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
961 if (unlikely(ret
< 0)) {
962 aead_request_free(req
);
967 if (hdr
->tci_an
& MACSEC_TCI_E
) {
968 /* confidentiality: ethernet + macsec header
969 * authenticated, encrypted payload
971 int len
= skb
->len
- macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
);
973 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
974 aead_request_set_ad(req
, macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
));
975 skb
= skb_unshare(skb
, GFP_ATOMIC
);
977 aead_request_free(req
);
978 return ERR_PTR(-ENOMEM
);
981 /* integrity only: all headers + data authenticated */
982 aead_request_set_crypt(req
, sg
, sg
, icv_len
, iv
);
983 aead_request_set_ad(req
, skb
->len
- icv_len
);
986 macsec_skb_cb(skb
)->req
= req
;
988 aead_request_set_callback(req
, 0, macsec_decrypt_done
, skb
);
991 ret
= crypto_aead_decrypt(req
);
992 if (ret
== -EINPROGRESS
) {
994 } else if (ret
!= 0) {
995 /* decryption/authentication failed
996 * 10.6 if validateFrames is disabled, deliver anyway
998 if (ret
!= -EBADMSG
) {
1003 macsec_skb_cb(skb
)->valid
= true;
1007 aead_request_free(req
);
1012 static struct macsec_rx_sc
*find_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1014 struct macsec_rx_sc
*rx_sc
;
1016 for_each_rxsc(secy
, rx_sc
) {
1017 if (rx_sc
->sci
== sci
)
1024 static struct macsec_rx_sc
*find_rx_sc_rtnl(struct macsec_secy
*secy
, sci_t sci
)
1026 struct macsec_rx_sc
*rx_sc
;
1028 for_each_rxsc_rtnl(secy
, rx_sc
) {
1029 if (rx_sc
->sci
== sci
)
1036 static void handle_not_macsec(struct sk_buff
*skb
)
1038 struct macsec_rxh_data
*rxd
;
1039 struct macsec_dev
*macsec
;
1042 rxd
= macsec_data_rcu(skb
->dev
);
1044 /* 10.6 If the management control validateFrames is not
1045 * Strict, frames without a SecTAG are received, counted, and
1046 * delivered to the Controlled Port
1048 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1049 struct sk_buff
*nskb
;
1050 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
1052 if (macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1053 u64_stats_update_begin(&secy_stats
->syncp
);
1054 secy_stats
->stats
.InPktsNoTag
++;
1055 u64_stats_update_end(&secy_stats
->syncp
);
1059 /* deliver on this port */
1060 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1064 nskb
->dev
= macsec
->secy
.netdev
;
1066 if (netif_rx(nskb
) == NET_RX_SUCCESS
) {
1067 u64_stats_update_begin(&secy_stats
->syncp
);
1068 secy_stats
->stats
.InPktsUntagged
++;
1069 u64_stats_update_end(&secy_stats
->syncp
);
1076 static rx_handler_result_t
macsec_handle_frame(struct sk_buff
**pskb
)
1078 struct sk_buff
*skb
= *pskb
;
1079 struct net_device
*dev
= skb
->dev
;
1080 struct macsec_eth_header
*hdr
;
1081 struct macsec_secy
*secy
= NULL
;
1082 struct macsec_rx_sc
*rx_sc
;
1083 struct macsec_rx_sa
*rx_sa
;
1084 struct macsec_rxh_data
*rxd
;
1085 struct macsec_dev
*macsec
;
1089 struct pcpu_rx_sc_stats
*rxsc_stats
;
1090 struct pcpu_secy_stats
*secy_stats
;
1094 if (skb_headroom(skb
) < ETH_HLEN
)
1097 hdr
= macsec_ethhdr(skb
);
1098 if (hdr
->eth
.h_proto
!= htons(ETH_P_MACSEC
)) {
1099 handle_not_macsec(skb
);
1101 /* and deliver to the uncontrolled port */
1102 return RX_HANDLER_PASS
;
1105 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1108 return RX_HANDLER_CONSUMED
;
1111 pulled_sci
= pskb_may_pull(skb
, macsec_extra_len(true));
1113 if (!pskb_may_pull(skb
, macsec_extra_len(false)))
1117 hdr
= macsec_ethhdr(skb
);
1119 /* Frames with a SecTAG that has the TCI E bit set but the C
1120 * bit clear are discarded, as this reserved encoding is used
1121 * to identify frames with a SecTAG that are not to be
1122 * delivered to the Controlled Port.
1124 if ((hdr
->tci_an
& (MACSEC_TCI_C
| MACSEC_TCI_E
)) == MACSEC_TCI_E
)
1125 return RX_HANDLER_PASS
;
1127 /* now, pull the extra length */
1128 if (hdr
->tci_an
& MACSEC_TCI_SC
) {
1133 /* ethernet header is part of crypto processing */
1134 skb_push(skb
, ETH_HLEN
);
1136 macsec_skb_cb(skb
)->has_sci
= !!(hdr
->tci_an
& MACSEC_TCI_SC
);
1137 macsec_skb_cb(skb
)->assoc_num
= hdr
->tci_an
& MACSEC_AN_MASK
;
1138 sci
= macsec_frame_sci(hdr
, macsec_skb_cb(skb
)->has_sci
);
1141 rxd
= macsec_data_rcu(skb
->dev
);
1143 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1144 struct macsec_rx_sc
*sc
= find_rx_sc(&macsec
->secy
, sci
);
1145 sc
= sc
? macsec_rxsc_get(sc
) : NULL
;
1148 secy
= &macsec
->secy
;
1158 macsec
= macsec_priv(dev
);
1159 secy_stats
= this_cpu_ptr(macsec
->stats
);
1160 rxsc_stats
= this_cpu_ptr(rx_sc
->stats
);
1162 if (!macsec_validate_skb(skb
, secy
->icv_len
)) {
1163 u64_stats_update_begin(&secy_stats
->syncp
);
1164 secy_stats
->stats
.InPktsBadTag
++;
1165 u64_stats_update_end(&secy_stats
->syncp
);
1169 rx_sa
= macsec_rxsa_get(rx_sc
->sa
[macsec_skb_cb(skb
)->assoc_num
]);
1171 /* 10.6.1 if the SA is not in use */
1173 /* If validateFrames is Strict or the C bit in the
1174 * SecTAG is set, discard
1176 if (hdr
->tci_an
& MACSEC_TCI_C
||
1177 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
1178 u64_stats_update_begin(&rxsc_stats
->syncp
);
1179 rxsc_stats
->stats
.InPktsNotUsingSA
++;
1180 u64_stats_update_end(&rxsc_stats
->syncp
);
1184 /* not Strict, the frame (with the SecTAG and ICV
1185 * removed) is delivered to the Controlled Port.
1187 u64_stats_update_begin(&rxsc_stats
->syncp
);
1188 rxsc_stats
->stats
.InPktsUnusedSA
++;
1189 u64_stats_update_end(&rxsc_stats
->syncp
);
1193 /* First, PN check to avoid decrypting obviously wrong packets */
1194 pn
= ntohl(hdr
->packet_number
);
1195 if (secy
->replay_protect
) {
1198 spin_lock(&rx_sa
->lock
);
1199 late
= rx_sa
->next_pn
>= secy
->replay_window
&&
1200 pn
< (rx_sa
->next_pn
- secy
->replay_window
);
1201 spin_unlock(&rx_sa
->lock
);
1204 u64_stats_update_begin(&rxsc_stats
->syncp
);
1205 rxsc_stats
->stats
.InPktsLate
++;
1206 u64_stats_update_end(&rxsc_stats
->syncp
);
1211 macsec_skb_cb(skb
)->rx_sa
= rx_sa
;
1213 /* Disabled && !changed text => skip validation */
1214 if (hdr
->tci_an
& MACSEC_TCI_C
||
1215 secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
)
1216 skb
= macsec_decrypt(skb
, dev
, rx_sa
, sci
, secy
);
1219 /* the decrypt callback needs the reference */
1220 if (PTR_ERR(skb
) != -EINPROGRESS
) {
1221 macsec_rxsa_put(rx_sa
);
1222 macsec_rxsc_put(rx_sc
);
1226 return RX_HANDLER_CONSUMED
;
1229 if (!macsec_post_decrypt(skb
, secy
, pn
))
1233 macsec_finalize_skb(skb
, secy
->icv_len
,
1234 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1235 macsec_reset_skb(skb
, secy
->netdev
);
1238 macsec_rxsa_put(rx_sa
);
1239 macsec_rxsc_put(rx_sc
);
1241 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
1242 if (ret
== NET_RX_SUCCESS
)
1243 count_rx(dev
, skb
->len
);
1245 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1250 return RX_HANDLER_CONSUMED
;
1253 macsec_rxsa_put(rx_sa
);
1255 macsec_rxsc_put(rx_sc
);
1260 return RX_HANDLER_CONSUMED
;
1263 /* 10.6.1 if the SC is not found */
1264 cbit
= !!(hdr
->tci_an
& MACSEC_TCI_C
);
1266 macsec_finalize_skb(skb
, DEFAULT_ICV_LEN
,
1267 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1269 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1270 struct sk_buff
*nskb
;
1272 secy_stats
= this_cpu_ptr(macsec
->stats
);
1274 /* If validateFrames is Strict or the C bit in the
1275 * SecTAG is set, discard
1278 macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1279 u64_stats_update_begin(&secy_stats
->syncp
);
1280 secy_stats
->stats
.InPktsNoSCI
++;
1281 u64_stats_update_end(&secy_stats
->syncp
);
1285 /* not strict, the frame (with the SecTAG and ICV
1286 * removed) is delivered to the Controlled Port.
1288 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1292 macsec_reset_skb(nskb
, macsec
->secy
.netdev
);
1294 ret
= netif_rx(nskb
);
1295 if (ret
== NET_RX_SUCCESS
) {
1296 u64_stats_update_begin(&secy_stats
->syncp
);
1297 secy_stats
->stats
.InPktsUnknownSCI
++;
1298 u64_stats_update_end(&secy_stats
->syncp
);
1300 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1306 return RX_HANDLER_PASS
;
1309 static struct crypto_aead
*macsec_alloc_tfm(char *key
, int key_len
, int icv_len
)
1311 struct crypto_aead
*tfm
;
1314 tfm
= crypto_alloc_aead("gcm(aes)", 0, 0);
1319 ret
= crypto_aead_setkey(tfm
, key
, key_len
);
1323 ret
= crypto_aead_setauthsize(tfm
, icv_len
);
1329 crypto_free_aead(tfm
);
1330 return ERR_PTR(ret
);
1333 static int init_rx_sa(struct macsec_rx_sa
*rx_sa
, char *sak
, int key_len
,
1336 rx_sa
->stats
= alloc_percpu(struct macsec_rx_sa_stats
);
1340 rx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1341 if (IS_ERR(rx_sa
->key
.tfm
)) {
1342 free_percpu(rx_sa
->stats
);
1343 return PTR_ERR(rx_sa
->key
.tfm
);
1346 rx_sa
->active
= false;
1348 refcount_set(&rx_sa
->refcnt
, 1);
1349 spin_lock_init(&rx_sa
->lock
);
1354 static void clear_rx_sa(struct macsec_rx_sa
*rx_sa
)
1356 rx_sa
->active
= false;
1358 macsec_rxsa_put(rx_sa
);
1361 static void free_rx_sc(struct macsec_rx_sc
*rx_sc
)
1365 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1366 struct macsec_rx_sa
*sa
= rtnl_dereference(rx_sc
->sa
[i
]);
1368 RCU_INIT_POINTER(rx_sc
->sa
[i
], NULL
);
1373 macsec_rxsc_put(rx_sc
);
1376 static struct macsec_rx_sc
*del_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1378 struct macsec_rx_sc
*rx_sc
, __rcu
**rx_scp
;
1380 for (rx_scp
= &secy
->rx_sc
, rx_sc
= rtnl_dereference(*rx_scp
);
1382 rx_scp
= &rx_sc
->next
, rx_sc
= rtnl_dereference(*rx_scp
)) {
1383 if (rx_sc
->sci
== sci
) {
1386 rcu_assign_pointer(*rx_scp
, rx_sc
->next
);
1394 static struct macsec_rx_sc
*create_rx_sc(struct net_device
*dev
, sci_t sci
)
1396 struct macsec_rx_sc
*rx_sc
;
1397 struct macsec_dev
*macsec
;
1398 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
1399 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
1400 struct macsec_secy
*secy
;
1402 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
1403 if (find_rx_sc_rtnl(&macsec
->secy
, sci
))
1404 return ERR_PTR(-EEXIST
);
1407 rx_sc
= kzalloc(sizeof(*rx_sc
), GFP_KERNEL
);
1409 return ERR_PTR(-ENOMEM
);
1411 rx_sc
->stats
= netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats
);
1412 if (!rx_sc
->stats
) {
1414 return ERR_PTR(-ENOMEM
);
1418 rx_sc
->active
= true;
1419 refcount_set(&rx_sc
->refcnt
, 1);
1421 secy
= &macsec_priv(dev
)->secy
;
1422 rcu_assign_pointer(rx_sc
->next
, secy
->rx_sc
);
1423 rcu_assign_pointer(secy
->rx_sc
, rx_sc
);
1431 static int init_tx_sa(struct macsec_tx_sa
*tx_sa
, char *sak
, int key_len
,
1434 tx_sa
->stats
= alloc_percpu(struct macsec_tx_sa_stats
);
1438 tx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1439 if (IS_ERR(tx_sa
->key
.tfm
)) {
1440 free_percpu(tx_sa
->stats
);
1441 return PTR_ERR(tx_sa
->key
.tfm
);
1444 tx_sa
->active
= false;
1445 refcount_set(&tx_sa
->refcnt
, 1);
1446 spin_lock_init(&tx_sa
->lock
);
1451 static void clear_tx_sa(struct macsec_tx_sa
*tx_sa
)
1453 tx_sa
->active
= false;
1455 macsec_txsa_put(tx_sa
);
1458 static struct genl_family macsec_fam
;
1460 static struct net_device
*get_dev_from_nl(struct net
*net
,
1461 struct nlattr
**attrs
)
1463 int ifindex
= nla_get_u32(attrs
[MACSEC_ATTR_IFINDEX
]);
1464 struct net_device
*dev
;
1466 dev
= __dev_get_by_index(net
, ifindex
);
1468 return ERR_PTR(-ENODEV
);
1470 if (!netif_is_macsec(dev
))
1471 return ERR_PTR(-ENODEV
);
1476 static sci_t
nla_get_sci(const struct nlattr
*nla
)
1478 return (__force sci_t
)nla_get_u64(nla
);
1481 static int nla_put_sci(struct sk_buff
*skb
, int attrtype
, sci_t value
,
1484 return nla_put_u64_64bit(skb
, attrtype
, (__force u64
)value
, padattr
);
1487 static struct macsec_tx_sa
*get_txsa_from_nl(struct net
*net
,
1488 struct nlattr
**attrs
,
1489 struct nlattr
**tb_sa
,
1490 struct net_device
**devp
,
1491 struct macsec_secy
**secyp
,
1492 struct macsec_tx_sc
**scp
,
1495 struct net_device
*dev
;
1496 struct macsec_secy
*secy
;
1497 struct macsec_tx_sc
*tx_sc
;
1498 struct macsec_tx_sa
*tx_sa
;
1500 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1501 return ERR_PTR(-EINVAL
);
1503 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1505 dev
= get_dev_from_nl(net
, attrs
);
1507 return ERR_CAST(dev
);
1509 if (*assoc_num
>= MACSEC_NUM_AN
)
1510 return ERR_PTR(-EINVAL
);
1512 secy
= &macsec_priv(dev
)->secy
;
1513 tx_sc
= &secy
->tx_sc
;
1515 tx_sa
= rtnl_dereference(tx_sc
->sa
[*assoc_num
]);
1517 return ERR_PTR(-ENODEV
);
1525 static struct macsec_rx_sc
*get_rxsc_from_nl(struct net
*net
,
1526 struct nlattr
**attrs
,
1527 struct nlattr
**tb_rxsc
,
1528 struct net_device
**devp
,
1529 struct macsec_secy
**secyp
)
1531 struct net_device
*dev
;
1532 struct macsec_secy
*secy
;
1533 struct macsec_rx_sc
*rx_sc
;
1536 dev
= get_dev_from_nl(net
, attrs
);
1538 return ERR_CAST(dev
);
1540 secy
= &macsec_priv(dev
)->secy
;
1542 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1543 return ERR_PTR(-EINVAL
);
1545 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1546 rx_sc
= find_rx_sc_rtnl(secy
, sci
);
1548 return ERR_PTR(-ENODEV
);
1556 static struct macsec_rx_sa
*get_rxsa_from_nl(struct net
*net
,
1557 struct nlattr
**attrs
,
1558 struct nlattr
**tb_rxsc
,
1559 struct nlattr
**tb_sa
,
1560 struct net_device
**devp
,
1561 struct macsec_secy
**secyp
,
1562 struct macsec_rx_sc
**scp
,
1565 struct macsec_rx_sc
*rx_sc
;
1566 struct macsec_rx_sa
*rx_sa
;
1568 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1569 return ERR_PTR(-EINVAL
);
1571 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1572 if (*assoc_num
>= MACSEC_NUM_AN
)
1573 return ERR_PTR(-EINVAL
);
1575 rx_sc
= get_rxsc_from_nl(net
, attrs
, tb_rxsc
, devp
, secyp
);
1577 return ERR_CAST(rx_sc
);
1579 rx_sa
= rtnl_dereference(rx_sc
->sa
[*assoc_num
]);
1581 return ERR_PTR(-ENODEV
);
1588 static const struct nla_policy macsec_genl_policy
[NUM_MACSEC_ATTR
] = {
1589 [MACSEC_ATTR_IFINDEX
] = { .type
= NLA_U32
},
1590 [MACSEC_ATTR_RXSC_CONFIG
] = { .type
= NLA_NESTED
},
1591 [MACSEC_ATTR_SA_CONFIG
] = { .type
= NLA_NESTED
},
1594 static const struct nla_policy macsec_genl_rxsc_policy
[NUM_MACSEC_RXSC_ATTR
] = {
1595 [MACSEC_RXSC_ATTR_SCI
] = { .type
= NLA_U64
},
1596 [MACSEC_RXSC_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1599 static const struct nla_policy macsec_genl_sa_policy
[NUM_MACSEC_SA_ATTR
] = {
1600 [MACSEC_SA_ATTR_AN
] = { .type
= NLA_U8
},
1601 [MACSEC_SA_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1602 [MACSEC_SA_ATTR_PN
] = { .type
= NLA_U32
},
1603 [MACSEC_SA_ATTR_KEYID
] = { .type
= NLA_BINARY
,
1604 .len
= MACSEC_KEYID_LEN
, },
1605 [MACSEC_SA_ATTR_KEY
] = { .type
= NLA_BINARY
,
1606 .len
= MACSEC_MAX_KEY_LEN
, },
1609 static int parse_sa_config(struct nlattr
**attrs
, struct nlattr
**tb_sa
)
1611 if (!attrs
[MACSEC_ATTR_SA_CONFIG
])
1614 if (nla_parse_nested(tb_sa
, MACSEC_SA_ATTR_MAX
,
1615 attrs
[MACSEC_ATTR_SA_CONFIG
],
1616 macsec_genl_sa_policy
, NULL
))
1622 static int parse_rxsc_config(struct nlattr
**attrs
, struct nlattr
**tb_rxsc
)
1624 if (!attrs
[MACSEC_ATTR_RXSC_CONFIG
])
1627 if (nla_parse_nested(tb_rxsc
, MACSEC_RXSC_ATTR_MAX
,
1628 attrs
[MACSEC_ATTR_RXSC_CONFIG
],
1629 macsec_genl_rxsc_policy
, NULL
))
1635 static bool validate_add_rxsa(struct nlattr
**attrs
)
1637 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1638 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1639 !attrs
[MACSEC_SA_ATTR_KEYID
])
1642 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1645 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1648 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1649 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1653 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1659 static int macsec_add_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1661 struct net_device
*dev
;
1662 struct nlattr
**attrs
= info
->attrs
;
1663 struct macsec_secy
*secy
;
1664 struct macsec_rx_sc
*rx_sc
;
1665 struct macsec_rx_sa
*rx_sa
;
1666 unsigned char assoc_num
;
1667 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1668 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1671 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1674 if (parse_sa_config(attrs
, tb_sa
))
1677 if (parse_rxsc_config(attrs
, tb_rxsc
))
1680 if (!validate_add_rxsa(tb_sa
))
1684 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
1685 if (IS_ERR(rx_sc
)) {
1687 return PTR_ERR(rx_sc
);
1690 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1692 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1693 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1694 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1699 rx_sa
= rtnl_dereference(rx_sc
->sa
[assoc_num
]);
1705 rx_sa
= kmalloc(sizeof(*rx_sa
), GFP_KERNEL
);
1711 err
= init_rx_sa(rx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1712 secy
->key_len
, secy
->icv_len
);
1719 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
1720 spin_lock_bh(&rx_sa
->lock
);
1721 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1722 spin_unlock_bh(&rx_sa
->lock
);
1725 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1726 rx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1728 nla_memcpy(rx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1730 rcu_assign_pointer(rx_sc
->sa
[assoc_num
], rx_sa
);
1737 static bool validate_add_rxsc(struct nlattr
**attrs
)
1739 if (!attrs
[MACSEC_RXSC_ATTR_SCI
])
1742 if (attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) {
1743 if (nla_get_u8(attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) > 1)
1750 static int macsec_add_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1752 struct net_device
*dev
;
1753 sci_t sci
= MACSEC_UNDEF_SCI
;
1754 struct nlattr
**attrs
= info
->attrs
;
1755 struct macsec_rx_sc
*rx_sc
;
1756 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1758 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1761 if (parse_rxsc_config(attrs
, tb_rxsc
))
1764 if (!validate_add_rxsc(tb_rxsc
))
1768 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1771 return PTR_ERR(dev
);
1774 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1776 rx_sc
= create_rx_sc(dev
, sci
);
1777 if (IS_ERR(rx_sc
)) {
1779 return PTR_ERR(rx_sc
);
1782 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
])
1783 rx_sc
->active
= !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
1790 static bool validate_add_txsa(struct nlattr
**attrs
)
1792 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1793 !attrs
[MACSEC_SA_ATTR_PN
] ||
1794 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1795 !attrs
[MACSEC_SA_ATTR_KEYID
])
1798 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1801 if (nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1804 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1805 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1809 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1815 static int macsec_add_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1817 struct net_device
*dev
;
1818 struct nlattr
**attrs
= info
->attrs
;
1819 struct macsec_secy
*secy
;
1820 struct macsec_tx_sc
*tx_sc
;
1821 struct macsec_tx_sa
*tx_sa
;
1822 unsigned char assoc_num
;
1823 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1826 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1829 if (parse_sa_config(attrs
, tb_sa
))
1832 if (!validate_add_txsa(tb_sa
))
1836 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1839 return PTR_ERR(dev
);
1842 secy
= &macsec_priv(dev
)->secy
;
1843 tx_sc
= &secy
->tx_sc
;
1845 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1847 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1848 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1849 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1854 tx_sa
= rtnl_dereference(tx_sc
->sa
[assoc_num
]);
1860 tx_sa
= kmalloc(sizeof(*tx_sa
), GFP_KERNEL
);
1866 err
= init_tx_sa(tx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1867 secy
->key_len
, secy
->icv_len
);
1874 nla_memcpy(tx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1876 spin_lock_bh(&tx_sa
->lock
);
1877 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1878 spin_unlock_bh(&tx_sa
->lock
);
1880 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1881 tx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1883 if (assoc_num
== tx_sc
->encoding_sa
&& tx_sa
->active
)
1884 secy
->operational
= true;
1886 rcu_assign_pointer(tx_sc
->sa
[assoc_num
], tx_sa
);
1893 static int macsec_del_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1895 struct nlattr
**attrs
= info
->attrs
;
1896 struct net_device
*dev
;
1897 struct macsec_secy
*secy
;
1898 struct macsec_rx_sc
*rx_sc
;
1899 struct macsec_rx_sa
*rx_sa
;
1901 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1902 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1904 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1907 if (parse_sa_config(attrs
, tb_sa
))
1910 if (parse_rxsc_config(attrs
, tb_rxsc
))
1914 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
1915 &dev
, &secy
, &rx_sc
, &assoc_num
);
1916 if (IS_ERR(rx_sa
)) {
1918 return PTR_ERR(rx_sa
);
1921 if (rx_sa
->active
) {
1926 RCU_INIT_POINTER(rx_sc
->sa
[assoc_num
], NULL
);
1934 static int macsec_del_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1936 struct nlattr
**attrs
= info
->attrs
;
1937 struct net_device
*dev
;
1938 struct macsec_secy
*secy
;
1939 struct macsec_rx_sc
*rx_sc
;
1941 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1943 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1946 if (parse_rxsc_config(attrs
, tb_rxsc
))
1949 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1953 dev
= get_dev_from_nl(genl_info_net(info
), info
->attrs
);
1956 return PTR_ERR(dev
);
1959 secy
= &macsec_priv(dev
)->secy
;
1960 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1962 rx_sc
= del_rx_sc(secy
, sci
);
1974 static int macsec_del_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1976 struct nlattr
**attrs
= info
->attrs
;
1977 struct net_device
*dev
;
1978 struct macsec_secy
*secy
;
1979 struct macsec_tx_sc
*tx_sc
;
1980 struct macsec_tx_sa
*tx_sa
;
1982 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1984 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1987 if (parse_sa_config(attrs
, tb_sa
))
1991 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
1992 &dev
, &secy
, &tx_sc
, &assoc_num
);
1993 if (IS_ERR(tx_sa
)) {
1995 return PTR_ERR(tx_sa
);
1998 if (tx_sa
->active
) {
2003 RCU_INIT_POINTER(tx_sc
->sa
[assoc_num
], NULL
);
2011 static bool validate_upd_sa(struct nlattr
**attrs
)
2013 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
2014 attrs
[MACSEC_SA_ATTR_KEY
] ||
2015 attrs
[MACSEC_SA_ATTR_KEYID
])
2018 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
2021 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
2024 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
2025 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
2032 static int macsec_upd_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2034 struct nlattr
**attrs
= info
->attrs
;
2035 struct net_device
*dev
;
2036 struct macsec_secy
*secy
;
2037 struct macsec_tx_sc
*tx_sc
;
2038 struct macsec_tx_sa
*tx_sa
;
2040 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2042 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2045 if (parse_sa_config(attrs
, tb_sa
))
2048 if (!validate_upd_sa(tb_sa
))
2052 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2053 &dev
, &secy
, &tx_sc
, &assoc_num
);
2054 if (IS_ERR(tx_sa
)) {
2056 return PTR_ERR(tx_sa
);
2059 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2060 spin_lock_bh(&tx_sa
->lock
);
2061 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2062 spin_unlock_bh(&tx_sa
->lock
);
2065 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2066 tx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2068 if (assoc_num
== tx_sc
->encoding_sa
)
2069 secy
->operational
= tx_sa
->active
;
2076 static int macsec_upd_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
2078 struct nlattr
**attrs
= info
->attrs
;
2079 struct net_device
*dev
;
2080 struct macsec_secy
*secy
;
2081 struct macsec_rx_sc
*rx_sc
;
2082 struct macsec_rx_sa
*rx_sa
;
2084 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2085 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2087 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2090 if (parse_rxsc_config(attrs
, tb_rxsc
))
2093 if (parse_sa_config(attrs
, tb_sa
))
2096 if (!validate_upd_sa(tb_sa
))
2100 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
2101 &dev
, &secy
, &rx_sc
, &assoc_num
);
2102 if (IS_ERR(rx_sa
)) {
2104 return PTR_ERR(rx_sa
);
2107 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2108 spin_lock_bh(&rx_sa
->lock
);
2109 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2110 spin_unlock_bh(&rx_sa
->lock
);
2113 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2114 rx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2120 static int macsec_upd_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
2122 struct nlattr
**attrs
= info
->attrs
;
2123 struct net_device
*dev
;
2124 struct macsec_secy
*secy
;
2125 struct macsec_rx_sc
*rx_sc
;
2126 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2128 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2131 if (parse_rxsc_config(attrs
, tb_rxsc
))
2134 if (!validate_add_rxsc(tb_rxsc
))
2138 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
2139 if (IS_ERR(rx_sc
)) {
2141 return PTR_ERR(rx_sc
);
2144 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]) {
2145 bool new = !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
2147 if (rx_sc
->active
!= new)
2148 secy
->n_rx_sc
+= new ? 1 : -1;
2150 rx_sc
->active
= new;
2158 static int copy_tx_sa_stats(struct sk_buff
*skb
,
2159 struct macsec_tx_sa_stats __percpu
*pstats
)
2161 struct macsec_tx_sa_stats sum
= {0, };
2164 for_each_possible_cpu(cpu
) {
2165 const struct macsec_tx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2167 sum
.OutPktsProtected
+= stats
->OutPktsProtected
;
2168 sum
.OutPktsEncrypted
+= stats
->OutPktsEncrypted
;
2171 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED
, sum
.OutPktsProtected
) ||
2172 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED
, sum
.OutPktsEncrypted
))
2178 static int copy_rx_sa_stats(struct sk_buff
*skb
,
2179 struct macsec_rx_sa_stats __percpu
*pstats
)
2181 struct macsec_rx_sa_stats sum
= {0, };
2184 for_each_possible_cpu(cpu
) {
2185 const struct macsec_rx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2187 sum
.InPktsOK
+= stats
->InPktsOK
;
2188 sum
.InPktsInvalid
+= stats
->InPktsInvalid
;
2189 sum
.InPktsNotValid
+= stats
->InPktsNotValid
;
2190 sum
.InPktsNotUsingSA
+= stats
->InPktsNotUsingSA
;
2191 sum
.InPktsUnusedSA
+= stats
->InPktsUnusedSA
;
2194 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_OK
, sum
.InPktsOK
) ||
2195 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID
, sum
.InPktsInvalid
) ||
2196 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID
, sum
.InPktsNotValid
) ||
2197 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA
, sum
.InPktsNotUsingSA
) ||
2198 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA
, sum
.InPktsUnusedSA
))
2204 static int copy_rx_sc_stats(struct sk_buff
*skb
,
2205 struct pcpu_rx_sc_stats __percpu
*pstats
)
2207 struct macsec_rx_sc_stats sum
= {0, };
2210 for_each_possible_cpu(cpu
) {
2211 const struct pcpu_rx_sc_stats
*stats
;
2212 struct macsec_rx_sc_stats tmp
;
2215 stats
= per_cpu_ptr(pstats
, cpu
);
2217 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2218 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2219 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2221 sum
.InOctetsValidated
+= tmp
.InOctetsValidated
;
2222 sum
.InOctetsDecrypted
+= tmp
.InOctetsDecrypted
;
2223 sum
.InPktsUnchecked
+= tmp
.InPktsUnchecked
;
2224 sum
.InPktsDelayed
+= tmp
.InPktsDelayed
;
2225 sum
.InPktsOK
+= tmp
.InPktsOK
;
2226 sum
.InPktsInvalid
+= tmp
.InPktsInvalid
;
2227 sum
.InPktsLate
+= tmp
.InPktsLate
;
2228 sum
.InPktsNotValid
+= tmp
.InPktsNotValid
;
2229 sum
.InPktsNotUsingSA
+= tmp
.InPktsNotUsingSA
;
2230 sum
.InPktsUnusedSA
+= tmp
.InPktsUnusedSA
;
2233 if (nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED
,
2234 sum
.InOctetsValidated
,
2235 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2236 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED
,
2237 sum
.InOctetsDecrypted
,
2238 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2239 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED
,
2240 sum
.InPktsUnchecked
,
2241 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2242 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED
,
2244 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2245 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK
,
2247 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2248 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID
,
2250 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2251 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE
,
2253 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2254 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID
,
2256 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2257 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA
,
2258 sum
.InPktsNotUsingSA
,
2259 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2260 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA
,
2262 MACSEC_RXSC_STATS_ATTR_PAD
))
2268 static int copy_tx_sc_stats(struct sk_buff
*skb
,
2269 struct pcpu_tx_sc_stats __percpu
*pstats
)
2271 struct macsec_tx_sc_stats sum
= {0, };
2274 for_each_possible_cpu(cpu
) {
2275 const struct pcpu_tx_sc_stats
*stats
;
2276 struct macsec_tx_sc_stats tmp
;
2279 stats
= per_cpu_ptr(pstats
, cpu
);
2281 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2282 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2283 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2285 sum
.OutPktsProtected
+= tmp
.OutPktsProtected
;
2286 sum
.OutPktsEncrypted
+= tmp
.OutPktsEncrypted
;
2287 sum
.OutOctetsProtected
+= tmp
.OutOctetsProtected
;
2288 sum
.OutOctetsEncrypted
+= tmp
.OutOctetsEncrypted
;
2291 if (nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED
,
2292 sum
.OutPktsProtected
,
2293 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2294 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED
,
2295 sum
.OutPktsEncrypted
,
2296 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2297 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED
,
2298 sum
.OutOctetsProtected
,
2299 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2300 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED
,
2301 sum
.OutOctetsEncrypted
,
2302 MACSEC_TXSC_STATS_ATTR_PAD
))
2308 static int copy_secy_stats(struct sk_buff
*skb
,
2309 struct pcpu_secy_stats __percpu
*pstats
)
2311 struct macsec_dev_stats sum
= {0, };
2314 for_each_possible_cpu(cpu
) {
2315 const struct pcpu_secy_stats
*stats
;
2316 struct macsec_dev_stats tmp
;
2319 stats
= per_cpu_ptr(pstats
, cpu
);
2321 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2322 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2323 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2325 sum
.OutPktsUntagged
+= tmp
.OutPktsUntagged
;
2326 sum
.InPktsUntagged
+= tmp
.InPktsUntagged
;
2327 sum
.OutPktsTooLong
+= tmp
.OutPktsTooLong
;
2328 sum
.InPktsNoTag
+= tmp
.InPktsNoTag
;
2329 sum
.InPktsBadTag
+= tmp
.InPktsBadTag
;
2330 sum
.InPktsUnknownSCI
+= tmp
.InPktsUnknownSCI
;
2331 sum
.InPktsNoSCI
+= tmp
.InPktsNoSCI
;
2332 sum
.InPktsOverrun
+= tmp
.InPktsOverrun
;
2335 if (nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED
,
2336 sum
.OutPktsUntagged
,
2337 MACSEC_SECY_STATS_ATTR_PAD
) ||
2338 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED
,
2340 MACSEC_SECY_STATS_ATTR_PAD
) ||
2341 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG
,
2343 MACSEC_SECY_STATS_ATTR_PAD
) ||
2344 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG
,
2346 MACSEC_SECY_STATS_ATTR_PAD
) ||
2347 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG
,
2349 MACSEC_SECY_STATS_ATTR_PAD
) ||
2350 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI
,
2351 sum
.InPktsUnknownSCI
,
2352 MACSEC_SECY_STATS_ATTR_PAD
) ||
2353 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI
,
2355 MACSEC_SECY_STATS_ATTR_PAD
) ||
2356 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN
,
2358 MACSEC_SECY_STATS_ATTR_PAD
))
2364 static int nla_put_secy(struct macsec_secy
*secy
, struct sk_buff
*skb
)
2366 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2367 struct nlattr
*secy_nest
= nla_nest_start(skb
, MACSEC_ATTR_SECY
);
2373 switch (secy
->key_len
) {
2374 case MACSEC_GCM_AES_128_SAK_LEN
:
2375 csid
= MACSEC_DEFAULT_CIPHER_ID
;
2377 case MACSEC_GCM_AES_256_SAK_LEN
:
2378 csid
= MACSEC_CIPHER_ID_GCM_AES_256
;
2384 if (nla_put_sci(skb
, MACSEC_SECY_ATTR_SCI
, secy
->sci
,
2385 MACSEC_SECY_ATTR_PAD
) ||
2386 nla_put_u64_64bit(skb
, MACSEC_SECY_ATTR_CIPHER_SUITE
,
2387 csid
, MACSEC_SECY_ATTR_PAD
) ||
2388 nla_put_u8(skb
, MACSEC_SECY_ATTR_ICV_LEN
, secy
->icv_len
) ||
2389 nla_put_u8(skb
, MACSEC_SECY_ATTR_OPER
, secy
->operational
) ||
2390 nla_put_u8(skb
, MACSEC_SECY_ATTR_PROTECT
, secy
->protect_frames
) ||
2391 nla_put_u8(skb
, MACSEC_SECY_ATTR_REPLAY
, secy
->replay_protect
) ||
2392 nla_put_u8(skb
, MACSEC_SECY_ATTR_VALIDATE
, secy
->validate_frames
) ||
2393 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCRYPT
, tx_sc
->encrypt
) ||
2394 nla_put_u8(skb
, MACSEC_SECY_ATTR_INC_SCI
, tx_sc
->send_sci
) ||
2395 nla_put_u8(skb
, MACSEC_SECY_ATTR_ES
, tx_sc
->end_station
) ||
2396 nla_put_u8(skb
, MACSEC_SECY_ATTR_SCB
, tx_sc
->scb
) ||
2397 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCODING_SA
, tx_sc
->encoding_sa
))
2400 if (secy
->replay_protect
) {
2401 if (nla_put_u32(skb
, MACSEC_SECY_ATTR_WINDOW
, secy
->replay_window
))
2405 nla_nest_end(skb
, secy_nest
);
2409 nla_nest_cancel(skb
, secy_nest
);
2413 static int dump_secy(struct macsec_secy
*secy
, struct net_device
*dev
,
2414 struct sk_buff
*skb
, struct netlink_callback
*cb
)
2416 struct macsec_rx_sc
*rx_sc
;
2417 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2418 struct nlattr
*txsa_list
, *rxsc_list
;
2421 struct nlattr
*attr
;
2423 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
2424 &macsec_fam
, NLM_F_MULTI
, MACSEC_CMD_GET_TXSC
);
2428 genl_dump_check_consistent(cb
, hdr
);
2430 if (nla_put_u32(skb
, MACSEC_ATTR_IFINDEX
, dev
->ifindex
))
2431 goto nla_put_failure
;
2433 if (nla_put_secy(secy
, skb
))
2434 goto nla_put_failure
;
2436 attr
= nla_nest_start(skb
, MACSEC_ATTR_TXSC_STATS
);
2438 goto nla_put_failure
;
2439 if (copy_tx_sc_stats(skb
, tx_sc
->stats
)) {
2440 nla_nest_cancel(skb
, attr
);
2441 goto nla_put_failure
;
2443 nla_nest_end(skb
, attr
);
2445 attr
= nla_nest_start(skb
, MACSEC_ATTR_SECY_STATS
);
2447 goto nla_put_failure
;
2448 if (copy_secy_stats(skb
, macsec_priv(dev
)->stats
)) {
2449 nla_nest_cancel(skb
, attr
);
2450 goto nla_put_failure
;
2452 nla_nest_end(skb
, attr
);
2454 txsa_list
= nla_nest_start(skb
, MACSEC_ATTR_TXSA_LIST
);
2456 goto nla_put_failure
;
2457 for (i
= 0, j
= 1; i
< MACSEC_NUM_AN
; i
++) {
2458 struct macsec_tx_sa
*tx_sa
= rtnl_dereference(tx_sc
->sa
[i
]);
2459 struct nlattr
*txsa_nest
;
2464 txsa_nest
= nla_nest_start(skb
, j
++);
2466 nla_nest_cancel(skb
, txsa_list
);
2467 goto nla_put_failure
;
2470 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2471 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, tx_sa
->next_pn
) ||
2472 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, tx_sa
->key
.id
) ||
2473 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, tx_sa
->active
)) {
2474 nla_nest_cancel(skb
, txsa_nest
);
2475 nla_nest_cancel(skb
, txsa_list
);
2476 goto nla_put_failure
;
2479 attr
= nla_nest_start(skb
, MACSEC_SA_ATTR_STATS
);
2481 nla_nest_cancel(skb
, txsa_nest
);
2482 nla_nest_cancel(skb
, txsa_list
);
2483 goto nla_put_failure
;
2485 if (copy_tx_sa_stats(skb
, tx_sa
->stats
)) {
2486 nla_nest_cancel(skb
, attr
);
2487 nla_nest_cancel(skb
, txsa_nest
);
2488 nla_nest_cancel(skb
, txsa_list
);
2489 goto nla_put_failure
;
2491 nla_nest_end(skb
, attr
);
2493 nla_nest_end(skb
, txsa_nest
);
2495 nla_nest_end(skb
, txsa_list
);
2497 rxsc_list
= nla_nest_start(skb
, MACSEC_ATTR_RXSC_LIST
);
2499 goto nla_put_failure
;
2502 for_each_rxsc_rtnl(secy
, rx_sc
) {
2504 struct nlattr
*rxsa_list
;
2505 struct nlattr
*rxsc_nest
= nla_nest_start(skb
, j
++);
2508 nla_nest_cancel(skb
, rxsc_list
);
2509 goto nla_put_failure
;
2512 if (nla_put_u8(skb
, MACSEC_RXSC_ATTR_ACTIVE
, rx_sc
->active
) ||
2513 nla_put_sci(skb
, MACSEC_RXSC_ATTR_SCI
, rx_sc
->sci
,
2514 MACSEC_RXSC_ATTR_PAD
)) {
2515 nla_nest_cancel(skb
, rxsc_nest
);
2516 nla_nest_cancel(skb
, rxsc_list
);
2517 goto nla_put_failure
;
2520 attr
= nla_nest_start(skb
, MACSEC_RXSC_ATTR_STATS
);
2522 nla_nest_cancel(skb
, rxsc_nest
);
2523 nla_nest_cancel(skb
, rxsc_list
);
2524 goto nla_put_failure
;
2526 if (copy_rx_sc_stats(skb
, rx_sc
->stats
)) {
2527 nla_nest_cancel(skb
, attr
);
2528 nla_nest_cancel(skb
, rxsc_nest
);
2529 nla_nest_cancel(skb
, rxsc_list
);
2530 goto nla_put_failure
;
2532 nla_nest_end(skb
, attr
);
2534 rxsa_list
= nla_nest_start(skb
, MACSEC_RXSC_ATTR_SA_LIST
);
2536 nla_nest_cancel(skb
, rxsc_nest
);
2537 nla_nest_cancel(skb
, rxsc_list
);
2538 goto nla_put_failure
;
2541 for (i
= 0, k
= 1; i
< MACSEC_NUM_AN
; i
++) {
2542 struct macsec_rx_sa
*rx_sa
= rtnl_dereference(rx_sc
->sa
[i
]);
2543 struct nlattr
*rxsa_nest
;
2548 rxsa_nest
= nla_nest_start(skb
, k
++);
2550 nla_nest_cancel(skb
, rxsa_list
);
2551 nla_nest_cancel(skb
, rxsc_nest
);
2552 nla_nest_cancel(skb
, rxsc_list
);
2553 goto nla_put_failure
;
2556 attr
= nla_nest_start(skb
, MACSEC_SA_ATTR_STATS
);
2558 nla_nest_cancel(skb
, rxsa_list
);
2559 nla_nest_cancel(skb
, rxsc_nest
);
2560 nla_nest_cancel(skb
, rxsc_list
);
2561 goto nla_put_failure
;
2563 if (copy_rx_sa_stats(skb
, rx_sa
->stats
)) {
2564 nla_nest_cancel(skb
, attr
);
2565 nla_nest_cancel(skb
, rxsa_list
);
2566 nla_nest_cancel(skb
, rxsc_nest
);
2567 nla_nest_cancel(skb
, rxsc_list
);
2568 goto nla_put_failure
;
2570 nla_nest_end(skb
, attr
);
2572 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2573 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, rx_sa
->next_pn
) ||
2574 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, rx_sa
->key
.id
) ||
2575 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, rx_sa
->active
)) {
2576 nla_nest_cancel(skb
, rxsa_nest
);
2577 nla_nest_cancel(skb
, rxsc_nest
);
2578 nla_nest_cancel(skb
, rxsc_list
);
2579 goto nla_put_failure
;
2581 nla_nest_end(skb
, rxsa_nest
);
2584 nla_nest_end(skb
, rxsa_list
);
2585 nla_nest_end(skb
, rxsc_nest
);
2588 nla_nest_end(skb
, rxsc_list
);
2590 genlmsg_end(skb
, hdr
);
2595 genlmsg_cancel(skb
, hdr
);
2599 static int macsec_generation
= 1; /* protected by RTNL */
2601 static int macsec_dump_txsc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2603 struct net
*net
= sock_net(skb
->sk
);
2604 struct net_device
*dev
;
2607 dev_idx
= cb
->args
[0];
2612 cb
->seq
= macsec_generation
;
2614 for_each_netdev(net
, dev
) {
2615 struct macsec_secy
*secy
;
2620 if (!netif_is_macsec(dev
))
2623 secy
= &macsec_priv(dev
)->secy
;
2624 if (dump_secy(secy
, dev
, skb
, cb
) < 0)
2636 static const struct genl_ops macsec_genl_ops
[] = {
2638 .cmd
= MACSEC_CMD_GET_TXSC
,
2639 .dumpit
= macsec_dump_txsc
,
2640 .policy
= macsec_genl_policy
,
2643 .cmd
= MACSEC_CMD_ADD_RXSC
,
2644 .doit
= macsec_add_rxsc
,
2645 .policy
= macsec_genl_policy
,
2646 .flags
= GENL_ADMIN_PERM
,
2649 .cmd
= MACSEC_CMD_DEL_RXSC
,
2650 .doit
= macsec_del_rxsc
,
2651 .policy
= macsec_genl_policy
,
2652 .flags
= GENL_ADMIN_PERM
,
2655 .cmd
= MACSEC_CMD_UPD_RXSC
,
2656 .doit
= macsec_upd_rxsc
,
2657 .policy
= macsec_genl_policy
,
2658 .flags
= GENL_ADMIN_PERM
,
2661 .cmd
= MACSEC_CMD_ADD_TXSA
,
2662 .doit
= macsec_add_txsa
,
2663 .policy
= macsec_genl_policy
,
2664 .flags
= GENL_ADMIN_PERM
,
2667 .cmd
= MACSEC_CMD_DEL_TXSA
,
2668 .doit
= macsec_del_txsa
,
2669 .policy
= macsec_genl_policy
,
2670 .flags
= GENL_ADMIN_PERM
,
2673 .cmd
= MACSEC_CMD_UPD_TXSA
,
2674 .doit
= macsec_upd_txsa
,
2675 .policy
= macsec_genl_policy
,
2676 .flags
= GENL_ADMIN_PERM
,
2679 .cmd
= MACSEC_CMD_ADD_RXSA
,
2680 .doit
= macsec_add_rxsa
,
2681 .policy
= macsec_genl_policy
,
2682 .flags
= GENL_ADMIN_PERM
,
2685 .cmd
= MACSEC_CMD_DEL_RXSA
,
2686 .doit
= macsec_del_rxsa
,
2687 .policy
= macsec_genl_policy
,
2688 .flags
= GENL_ADMIN_PERM
,
2691 .cmd
= MACSEC_CMD_UPD_RXSA
,
2692 .doit
= macsec_upd_rxsa
,
2693 .policy
= macsec_genl_policy
,
2694 .flags
= GENL_ADMIN_PERM
,
2698 static struct genl_family macsec_fam __ro_after_init
= {
2699 .name
= MACSEC_GENL_NAME
,
2701 .version
= MACSEC_GENL_VERSION
,
2702 .maxattr
= MACSEC_ATTR_MAX
,
2704 .module
= THIS_MODULE
,
2705 .ops
= macsec_genl_ops
,
2706 .n_ops
= ARRAY_SIZE(macsec_genl_ops
),
2709 static netdev_tx_t
macsec_start_xmit(struct sk_buff
*skb
,
2710 struct net_device
*dev
)
2712 struct macsec_dev
*macsec
= netdev_priv(dev
);
2713 struct macsec_secy
*secy
= &macsec
->secy
;
2714 struct pcpu_secy_stats
*secy_stats
;
2718 if (!secy
->protect_frames
) {
2719 secy_stats
= this_cpu_ptr(macsec
->stats
);
2720 u64_stats_update_begin(&secy_stats
->syncp
);
2721 secy_stats
->stats
.OutPktsUntagged
++;
2722 u64_stats_update_end(&secy_stats
->syncp
);
2723 skb
->dev
= macsec
->real_dev
;
2725 ret
= dev_queue_xmit(skb
);
2726 count_tx(dev
, ret
, len
);
2730 if (!secy
->operational
) {
2732 dev
->stats
.tx_dropped
++;
2733 return NETDEV_TX_OK
;
2736 skb
= macsec_encrypt(skb
, dev
);
2738 if (PTR_ERR(skb
) != -EINPROGRESS
)
2739 dev
->stats
.tx_dropped
++;
2740 return NETDEV_TX_OK
;
2743 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
2745 macsec_encrypt_finish(skb
, dev
);
2747 ret
= dev_queue_xmit(skb
);
2748 count_tx(dev
, ret
, len
);
2752 #define MACSEC_FEATURES \
2753 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2754 static struct lock_class_key macsec_netdev_addr_lock_key
;
2756 static int macsec_dev_init(struct net_device
*dev
)
2758 struct macsec_dev
*macsec
= macsec_priv(dev
);
2759 struct net_device
*real_dev
= macsec
->real_dev
;
2762 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
2766 err
= gro_cells_init(&macsec
->gro_cells
, dev
);
2768 free_percpu(dev
->tstats
);
2772 dev
->features
= real_dev
->features
& MACSEC_FEATURES
;
2773 dev
->features
|= NETIF_F_LLTX
| NETIF_F_GSO_SOFTWARE
;
2775 dev
->needed_headroom
= real_dev
->needed_headroom
+
2776 MACSEC_NEEDED_HEADROOM
;
2777 dev
->needed_tailroom
= real_dev
->needed_tailroom
+
2778 MACSEC_NEEDED_TAILROOM
;
2780 if (is_zero_ether_addr(dev
->dev_addr
))
2781 eth_hw_addr_inherit(dev
, real_dev
);
2782 if (is_zero_ether_addr(dev
->broadcast
))
2783 memcpy(dev
->broadcast
, real_dev
->broadcast
, dev
->addr_len
);
2788 static void macsec_dev_uninit(struct net_device
*dev
)
2790 struct macsec_dev
*macsec
= macsec_priv(dev
);
2792 gro_cells_destroy(&macsec
->gro_cells
);
2793 free_percpu(dev
->tstats
);
2796 static netdev_features_t
macsec_fix_features(struct net_device
*dev
,
2797 netdev_features_t features
)
2799 struct macsec_dev
*macsec
= macsec_priv(dev
);
2800 struct net_device
*real_dev
= macsec
->real_dev
;
2802 features
&= (real_dev
->features
& MACSEC_FEATURES
) |
2803 NETIF_F_GSO_SOFTWARE
| NETIF_F_SOFT_FEATURES
;
2804 features
|= NETIF_F_LLTX
;
2809 static int macsec_dev_open(struct net_device
*dev
)
2811 struct macsec_dev
*macsec
= macsec_priv(dev
);
2812 struct net_device
*real_dev
= macsec
->real_dev
;
2815 if (!(real_dev
->flags
& IFF_UP
))
2818 err
= dev_uc_add(real_dev
, dev
->dev_addr
);
2822 if (dev
->flags
& IFF_ALLMULTI
) {
2823 err
= dev_set_allmulti(real_dev
, 1);
2828 if (dev
->flags
& IFF_PROMISC
) {
2829 err
= dev_set_promiscuity(real_dev
, 1);
2831 goto clear_allmulti
;
2834 if (netif_carrier_ok(real_dev
))
2835 netif_carrier_on(dev
);
2839 if (dev
->flags
& IFF_ALLMULTI
)
2840 dev_set_allmulti(real_dev
, -1);
2842 dev_uc_del(real_dev
, dev
->dev_addr
);
2843 netif_carrier_off(dev
);
2847 static int macsec_dev_stop(struct net_device
*dev
)
2849 struct macsec_dev
*macsec
= macsec_priv(dev
);
2850 struct net_device
*real_dev
= macsec
->real_dev
;
2852 netif_carrier_off(dev
);
2854 dev_mc_unsync(real_dev
, dev
);
2855 dev_uc_unsync(real_dev
, dev
);
2857 if (dev
->flags
& IFF_ALLMULTI
)
2858 dev_set_allmulti(real_dev
, -1);
2860 if (dev
->flags
& IFF_PROMISC
)
2861 dev_set_promiscuity(real_dev
, -1);
2863 dev_uc_del(real_dev
, dev
->dev_addr
);
2868 static void macsec_dev_change_rx_flags(struct net_device
*dev
, int change
)
2870 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2872 if (!(dev
->flags
& IFF_UP
))
2875 if (change
& IFF_ALLMULTI
)
2876 dev_set_allmulti(real_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
2878 if (change
& IFF_PROMISC
)
2879 dev_set_promiscuity(real_dev
,
2880 dev
->flags
& IFF_PROMISC
? 1 : -1);
2883 static void macsec_dev_set_rx_mode(struct net_device
*dev
)
2885 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2887 dev_mc_sync(real_dev
, dev
);
2888 dev_uc_sync(real_dev
, dev
);
2891 static int macsec_set_mac_address(struct net_device
*dev
, void *p
)
2893 struct macsec_dev
*macsec
= macsec_priv(dev
);
2894 struct net_device
*real_dev
= macsec
->real_dev
;
2895 struct sockaddr
*addr
= p
;
2898 if (!is_valid_ether_addr(addr
->sa_data
))
2899 return -EADDRNOTAVAIL
;
2901 if (!(dev
->flags
& IFF_UP
))
2904 err
= dev_uc_add(real_dev
, addr
->sa_data
);
2908 dev_uc_del(real_dev
, dev
->dev_addr
);
2911 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
2915 static int macsec_change_mtu(struct net_device
*dev
, int new_mtu
)
2917 struct macsec_dev
*macsec
= macsec_priv(dev
);
2918 unsigned int extra
= macsec
->secy
.icv_len
+ macsec_extra_len(true);
2920 if (macsec
->real_dev
->mtu
- extra
< new_mtu
)
2928 static void macsec_get_stats64(struct net_device
*dev
,
2929 struct rtnl_link_stats64
*s
)
2936 for_each_possible_cpu(cpu
) {
2937 struct pcpu_sw_netstats
*stats
;
2938 struct pcpu_sw_netstats tmp
;
2941 stats
= per_cpu_ptr(dev
->tstats
, cpu
);
2943 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2944 tmp
.rx_packets
= stats
->rx_packets
;
2945 tmp
.rx_bytes
= stats
->rx_bytes
;
2946 tmp
.tx_packets
= stats
->tx_packets
;
2947 tmp
.tx_bytes
= stats
->tx_bytes
;
2948 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2950 s
->rx_packets
+= tmp
.rx_packets
;
2951 s
->rx_bytes
+= tmp
.rx_bytes
;
2952 s
->tx_packets
+= tmp
.tx_packets
;
2953 s
->tx_bytes
+= tmp
.tx_bytes
;
2956 s
->rx_dropped
= dev
->stats
.rx_dropped
;
2957 s
->tx_dropped
= dev
->stats
.tx_dropped
;
2960 static int macsec_get_iflink(const struct net_device
*dev
)
2962 return macsec_priv(dev
)->real_dev
->ifindex
;
2966 static int macsec_get_nest_level(struct net_device
*dev
)
2968 return macsec_priv(dev
)->nest_level
;
2972 static const struct net_device_ops macsec_netdev_ops
= {
2973 .ndo_init
= macsec_dev_init
,
2974 .ndo_uninit
= macsec_dev_uninit
,
2975 .ndo_open
= macsec_dev_open
,
2976 .ndo_stop
= macsec_dev_stop
,
2977 .ndo_fix_features
= macsec_fix_features
,
2978 .ndo_change_mtu
= macsec_change_mtu
,
2979 .ndo_set_rx_mode
= macsec_dev_set_rx_mode
,
2980 .ndo_change_rx_flags
= macsec_dev_change_rx_flags
,
2981 .ndo_set_mac_address
= macsec_set_mac_address
,
2982 .ndo_start_xmit
= macsec_start_xmit
,
2983 .ndo_get_stats64
= macsec_get_stats64
,
2984 .ndo_get_iflink
= macsec_get_iflink
,
2985 .ndo_get_lock_subclass
= macsec_get_nest_level
,
2988 static const struct device_type macsec_type
= {
2992 static const struct nla_policy macsec_rtnl_policy
[IFLA_MACSEC_MAX
+ 1] = {
2993 [IFLA_MACSEC_SCI
] = { .type
= NLA_U64
},
2994 [IFLA_MACSEC_ICV_LEN
] = { .type
= NLA_U8
},
2995 [IFLA_MACSEC_CIPHER_SUITE
] = { .type
= NLA_U64
},
2996 [IFLA_MACSEC_WINDOW
] = { .type
= NLA_U32
},
2997 [IFLA_MACSEC_ENCODING_SA
] = { .type
= NLA_U8
},
2998 [IFLA_MACSEC_ENCRYPT
] = { .type
= NLA_U8
},
2999 [IFLA_MACSEC_PROTECT
] = { .type
= NLA_U8
},
3000 [IFLA_MACSEC_INC_SCI
] = { .type
= NLA_U8
},
3001 [IFLA_MACSEC_ES
] = { .type
= NLA_U8
},
3002 [IFLA_MACSEC_SCB
] = { .type
= NLA_U8
},
3003 [IFLA_MACSEC_REPLAY_PROTECT
] = { .type
= NLA_U8
},
3004 [IFLA_MACSEC_VALIDATION
] = { .type
= NLA_U8
},
3007 static void macsec_free_netdev(struct net_device
*dev
)
3009 struct macsec_dev
*macsec
= macsec_priv(dev
);
3010 struct net_device
*real_dev
= macsec
->real_dev
;
3012 free_percpu(macsec
->stats
);
3013 free_percpu(macsec
->secy
.tx_sc
.stats
);
3018 static void macsec_setup(struct net_device
*dev
)
3022 dev
->max_mtu
= ETH_MAX_MTU
;
3023 dev
->priv_flags
|= IFF_NO_QUEUE
;
3024 dev
->netdev_ops
= &macsec_netdev_ops
;
3025 dev
->needs_free_netdev
= true;
3026 dev
->priv_destructor
= macsec_free_netdev
;
3027 SET_NETDEV_DEVTYPE(dev
, &macsec_type
);
3029 eth_zero_addr(dev
->broadcast
);
3032 static int macsec_changelink_common(struct net_device
*dev
,
3033 struct nlattr
*data
[])
3035 struct macsec_secy
*secy
;
3036 struct macsec_tx_sc
*tx_sc
;
3038 secy
= &macsec_priv(dev
)->secy
;
3039 tx_sc
= &secy
->tx_sc
;
3041 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3042 struct macsec_tx_sa
*tx_sa
;
3044 tx_sc
->encoding_sa
= nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]);
3045 tx_sa
= rtnl_dereference(tx_sc
->sa
[tx_sc
->encoding_sa
]);
3047 secy
->operational
= tx_sa
&& tx_sa
->active
;
3050 if (data
[IFLA_MACSEC_WINDOW
])
3051 secy
->replay_window
= nla_get_u32(data
[IFLA_MACSEC_WINDOW
]);
3053 if (data
[IFLA_MACSEC_ENCRYPT
])
3054 tx_sc
->encrypt
= !!nla_get_u8(data
[IFLA_MACSEC_ENCRYPT
]);
3056 if (data
[IFLA_MACSEC_PROTECT
])
3057 secy
->protect_frames
= !!nla_get_u8(data
[IFLA_MACSEC_PROTECT
]);
3059 if (data
[IFLA_MACSEC_INC_SCI
])
3060 tx_sc
->send_sci
= !!nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]);
3062 if (data
[IFLA_MACSEC_ES
])
3063 tx_sc
->end_station
= !!nla_get_u8(data
[IFLA_MACSEC_ES
]);
3065 if (data
[IFLA_MACSEC_SCB
])
3066 tx_sc
->scb
= !!nla_get_u8(data
[IFLA_MACSEC_SCB
]);
3068 if (data
[IFLA_MACSEC_REPLAY_PROTECT
])
3069 secy
->replay_protect
= !!nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
]);
3071 if (data
[IFLA_MACSEC_VALIDATION
])
3072 secy
->validate_frames
= nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]);
3074 if (data
[IFLA_MACSEC_CIPHER_SUITE
]) {
3075 switch (nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
])) {
3076 case MACSEC_CIPHER_ID_GCM_AES_128
:
3077 case MACSEC_DEFAULT_CIPHER_ID
:
3078 secy
->key_len
= MACSEC_GCM_AES_128_SAK_LEN
;
3080 case MACSEC_CIPHER_ID_GCM_AES_256
:
3081 secy
->key_len
= MACSEC_GCM_AES_256_SAK_LEN
;
3091 static int macsec_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
3092 struct nlattr
*data
[],
3093 struct netlink_ext_ack
*extack
)
3098 if (data
[IFLA_MACSEC_CIPHER_SUITE
] ||
3099 data
[IFLA_MACSEC_ICV_LEN
] ||
3100 data
[IFLA_MACSEC_SCI
] ||
3101 data
[IFLA_MACSEC_PORT
])
3104 return macsec_changelink_common(dev
, data
);
3107 static void macsec_del_dev(struct macsec_dev
*macsec
)
3111 while (macsec
->secy
.rx_sc
) {
3112 struct macsec_rx_sc
*rx_sc
= rtnl_dereference(macsec
->secy
.rx_sc
);
3114 rcu_assign_pointer(macsec
->secy
.rx_sc
, rx_sc
->next
);
3118 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
3119 struct macsec_tx_sa
*sa
= rtnl_dereference(macsec
->secy
.tx_sc
.sa
[i
]);
3122 RCU_INIT_POINTER(macsec
->secy
.tx_sc
.sa
[i
], NULL
);
3128 static void macsec_common_dellink(struct net_device
*dev
, struct list_head
*head
)
3130 struct macsec_dev
*macsec
= macsec_priv(dev
);
3131 struct net_device
*real_dev
= macsec
->real_dev
;
3133 unregister_netdevice_queue(dev
, head
);
3134 list_del_rcu(&macsec
->secys
);
3135 macsec_del_dev(macsec
);
3136 netdev_upper_dev_unlink(real_dev
, dev
);
3138 macsec_generation
++;
3141 static void macsec_dellink(struct net_device
*dev
, struct list_head
*head
)
3143 struct macsec_dev
*macsec
= macsec_priv(dev
);
3144 struct net_device
*real_dev
= macsec
->real_dev
;
3145 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3147 macsec_common_dellink(dev
, head
);
3149 if (list_empty(&rxd
->secys
)) {
3150 netdev_rx_handler_unregister(real_dev
);
3155 static int register_macsec_dev(struct net_device
*real_dev
,
3156 struct net_device
*dev
)
3158 struct macsec_dev
*macsec
= macsec_priv(dev
);
3159 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3164 rxd
= kmalloc(sizeof(*rxd
), GFP_KERNEL
);
3168 INIT_LIST_HEAD(&rxd
->secys
);
3170 err
= netdev_rx_handler_register(real_dev
, macsec_handle_frame
,
3178 list_add_tail_rcu(&macsec
->secys
, &rxd
->secys
);
3182 static bool sci_exists(struct net_device
*dev
, sci_t sci
)
3184 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(dev
);
3185 struct macsec_dev
*macsec
;
3187 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
3188 if (macsec
->secy
.sci
== sci
)
3195 static sci_t
dev_to_sci(struct net_device
*dev
, __be16 port
)
3197 return make_sci(dev
->dev_addr
, port
);
3200 static int macsec_add_dev(struct net_device
*dev
, sci_t sci
, u8 icv_len
)
3202 struct macsec_dev
*macsec
= macsec_priv(dev
);
3203 struct macsec_secy
*secy
= &macsec
->secy
;
3205 macsec
->stats
= netdev_alloc_pcpu_stats(struct pcpu_secy_stats
);
3209 secy
->tx_sc
.stats
= netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats
);
3210 if (!secy
->tx_sc
.stats
) {
3211 free_percpu(macsec
->stats
);
3215 if (sci
== MACSEC_UNDEF_SCI
)
3216 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3219 secy
->operational
= true;
3220 secy
->key_len
= DEFAULT_SAK_LEN
;
3221 secy
->icv_len
= icv_len
;
3222 secy
->validate_frames
= MACSEC_VALIDATE_DEFAULT
;
3223 secy
->protect_frames
= true;
3224 secy
->replay_protect
= false;
3227 secy
->tx_sc
.active
= true;
3228 secy
->tx_sc
.encoding_sa
= DEFAULT_ENCODING_SA
;
3229 secy
->tx_sc
.encrypt
= DEFAULT_ENCRYPT
;
3230 secy
->tx_sc
.send_sci
= DEFAULT_SEND_SCI
;
3231 secy
->tx_sc
.end_station
= false;
3232 secy
->tx_sc
.scb
= false;
3237 static int macsec_newlink(struct net
*net
, struct net_device
*dev
,
3238 struct nlattr
*tb
[], struct nlattr
*data
[],
3239 struct netlink_ext_ack
*extack
)
3241 struct macsec_dev
*macsec
= macsec_priv(dev
);
3242 struct net_device
*real_dev
;
3245 u8 icv_len
= DEFAULT_ICV_LEN
;
3246 rx_handler_func_t
*rx_handler
;
3250 real_dev
= __dev_get_by_index(net
, nla_get_u32(tb
[IFLA_LINK
]));
3254 dev
->priv_flags
|= IFF_MACSEC
;
3256 macsec
->real_dev
= real_dev
;
3258 if (data
&& data
[IFLA_MACSEC_ICV_LEN
])
3259 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3260 dev
->mtu
= real_dev
->mtu
- icv_len
- macsec_extra_len(true);
3262 rx_handler
= rtnl_dereference(real_dev
->rx_handler
);
3263 if (rx_handler
&& rx_handler
!= macsec_handle_frame
)
3266 err
= register_netdevice(dev
);
3272 macsec
->nest_level
= dev_get_nest_level(real_dev
) + 1;
3273 netdev_lockdep_set_classes(dev
);
3274 lockdep_set_class_and_subclass(&dev
->addr_list_lock
,
3275 &macsec_netdev_addr_lock_key
,
3276 macsec_get_nest_level(dev
));
3278 err
= netdev_upper_dev_link(real_dev
, dev
, extack
);
3282 /* need to be already registered so that ->init has run and
3283 * the MAC addr is set
3285 if (data
&& data
[IFLA_MACSEC_SCI
])
3286 sci
= nla_get_sci(data
[IFLA_MACSEC_SCI
]);
3287 else if (data
&& data
[IFLA_MACSEC_PORT
])
3288 sci
= dev_to_sci(dev
, nla_get_be16(data
[IFLA_MACSEC_PORT
]));
3290 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3292 if (rx_handler
&& sci_exists(real_dev
, sci
)) {
3297 err
= macsec_add_dev(dev
, sci
, icv_len
);
3302 err
= macsec_changelink_common(dev
, data
);
3307 err
= register_macsec_dev(real_dev
, dev
);
3311 macsec_generation
++;
3316 macsec_del_dev(macsec
);
3318 netdev_upper_dev_unlink(real_dev
, dev
);
3320 unregister_netdevice(dev
);
3324 static int macsec_validate_attr(struct nlattr
*tb
[], struct nlattr
*data
[],
3325 struct netlink_ext_ack
*extack
)
3327 u64 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3328 u8 icv_len
= DEFAULT_ICV_LEN
;
3335 if (data
[IFLA_MACSEC_CIPHER_SUITE
])
3336 csid
= nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
]);
3338 if (data
[IFLA_MACSEC_ICV_LEN
]) {
3339 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3340 if (icv_len
!= DEFAULT_ICV_LEN
) {
3341 char dummy_key
[DEFAULT_SAK_LEN
] = { 0 };
3342 struct crypto_aead
*dummy_tfm
;
3344 dummy_tfm
= macsec_alloc_tfm(dummy_key
,
3347 if (IS_ERR(dummy_tfm
))
3348 return PTR_ERR(dummy_tfm
);
3349 crypto_free_aead(dummy_tfm
);
3354 case MACSEC_CIPHER_ID_GCM_AES_128
:
3355 case MACSEC_CIPHER_ID_GCM_AES_256
:
3356 case MACSEC_DEFAULT_CIPHER_ID
:
3357 if (icv_len
< MACSEC_MIN_ICV_LEN
||
3358 icv_len
> MACSEC_STD_ICV_LEN
)
3365 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3366 if (nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]) >= MACSEC_NUM_AN
)
3370 for (flag
= IFLA_MACSEC_ENCODING_SA
+ 1;
3371 flag
< IFLA_MACSEC_VALIDATION
;
3374 if (nla_get_u8(data
[flag
]) > 1)
3379 es
= data
[IFLA_MACSEC_ES
] ? nla_get_u8(data
[IFLA_MACSEC_ES
]) : false;
3380 sci
= data
[IFLA_MACSEC_INC_SCI
] ? nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]) : false;
3381 scb
= data
[IFLA_MACSEC_SCB
] ? nla_get_u8(data
[IFLA_MACSEC_SCB
]) : false;
3383 if ((sci
&& (scb
|| es
)) || (scb
&& es
))
3386 if (data
[IFLA_MACSEC_VALIDATION
] &&
3387 nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]) > MACSEC_VALIDATE_MAX
)
3390 if ((data
[IFLA_MACSEC_REPLAY_PROTECT
] &&
3391 nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
])) &&
3392 !data
[IFLA_MACSEC_WINDOW
])
3398 static struct net
*macsec_get_link_net(const struct net_device
*dev
)
3400 return dev_net(macsec_priv(dev
)->real_dev
);
3403 static size_t macsec_get_size(const struct net_device
*dev
)
3405 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
3406 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
3407 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
3408 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
3409 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
3410 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
3411 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
3412 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
3413 nla_total_size(1) + /* IFLA_MACSEC_ES */
3414 nla_total_size(1) + /* IFLA_MACSEC_SCB */
3415 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
3416 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
3420 static int macsec_fill_info(struct sk_buff
*skb
,
3421 const struct net_device
*dev
)
3423 struct macsec_secy
*secy
= &macsec_priv(dev
)->secy
;
3424 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
3427 switch (secy
->key_len
) {
3428 case MACSEC_GCM_AES_128_SAK_LEN
:
3429 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3431 case MACSEC_GCM_AES_256_SAK_LEN
:
3432 csid
= MACSEC_CIPHER_ID_GCM_AES_256
;
3435 goto nla_put_failure
;
3438 if (nla_put_sci(skb
, IFLA_MACSEC_SCI
, secy
->sci
,
3440 nla_put_u8(skb
, IFLA_MACSEC_ICV_LEN
, secy
->icv_len
) ||
3441 nla_put_u64_64bit(skb
, IFLA_MACSEC_CIPHER_SUITE
,
3442 csid
, IFLA_MACSEC_PAD
) ||
3443 nla_put_u8(skb
, IFLA_MACSEC_ENCODING_SA
, tx_sc
->encoding_sa
) ||
3444 nla_put_u8(skb
, IFLA_MACSEC_ENCRYPT
, tx_sc
->encrypt
) ||
3445 nla_put_u8(skb
, IFLA_MACSEC_PROTECT
, secy
->protect_frames
) ||
3446 nla_put_u8(skb
, IFLA_MACSEC_INC_SCI
, tx_sc
->send_sci
) ||
3447 nla_put_u8(skb
, IFLA_MACSEC_ES
, tx_sc
->end_station
) ||
3448 nla_put_u8(skb
, IFLA_MACSEC_SCB
, tx_sc
->scb
) ||
3449 nla_put_u8(skb
, IFLA_MACSEC_REPLAY_PROTECT
, secy
->replay_protect
) ||
3450 nla_put_u8(skb
, IFLA_MACSEC_VALIDATION
, secy
->validate_frames
) ||
3452 goto nla_put_failure
;
3454 if (secy
->replay_protect
) {
3455 if (nla_put_u32(skb
, IFLA_MACSEC_WINDOW
, secy
->replay_window
))
3456 goto nla_put_failure
;
3465 static struct rtnl_link_ops macsec_link_ops __read_mostly
= {
3467 .priv_size
= sizeof(struct macsec_dev
),
3468 .maxtype
= IFLA_MACSEC_MAX
,
3469 .policy
= macsec_rtnl_policy
,
3470 .setup
= macsec_setup
,
3471 .validate
= macsec_validate_attr
,
3472 .newlink
= macsec_newlink
,
3473 .changelink
= macsec_changelink
,
3474 .dellink
= macsec_dellink
,
3475 .get_size
= macsec_get_size
,
3476 .fill_info
= macsec_fill_info
,
3477 .get_link_net
= macsec_get_link_net
,
3480 static bool is_macsec_master(struct net_device
*dev
)
3482 return rcu_access_pointer(dev
->rx_handler
) == macsec_handle_frame
;
3485 static int macsec_notify(struct notifier_block
*this, unsigned long event
,
3488 struct net_device
*real_dev
= netdev_notifier_info_to_dev(ptr
);
3491 if (!is_macsec_master(real_dev
))
3495 case NETDEV_UNREGISTER
: {
3496 struct macsec_dev
*m
, *n
;
3497 struct macsec_rxh_data
*rxd
;
3499 rxd
= macsec_data_rtnl(real_dev
);
3500 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
3501 macsec_common_dellink(m
->secy
.netdev
, &head
);
3504 netdev_rx_handler_unregister(real_dev
);
3507 unregister_netdevice_many(&head
);
3510 case NETDEV_CHANGEMTU
: {
3511 struct macsec_dev
*m
;
3512 struct macsec_rxh_data
*rxd
;
3514 rxd
= macsec_data_rtnl(real_dev
);
3515 list_for_each_entry(m
, &rxd
->secys
, secys
) {
3516 struct net_device
*dev
= m
->secy
.netdev
;
3517 unsigned int mtu
= real_dev
->mtu
- (m
->secy
.icv_len
+
3518 macsec_extra_len(true));
3521 dev_set_mtu(dev
, mtu
);
3529 static struct notifier_block macsec_notifier
= {
3530 .notifier_call
= macsec_notify
,
3533 static int __init
macsec_init(void)
3537 pr_info("MACsec IEEE 802.1AE\n");
3538 err
= register_netdevice_notifier(&macsec_notifier
);
3542 err
= rtnl_link_register(&macsec_link_ops
);
3546 err
= genl_register_family(&macsec_fam
);
3553 rtnl_link_unregister(&macsec_link_ops
);
3555 unregister_netdevice_notifier(&macsec_notifier
);
3559 static void __exit
macsec_exit(void)
3561 genl_unregister_family(&macsec_fam
);
3562 rtnl_link_unregister(&macsec_link_ops
);
3563 unregister_netdevice_notifier(&macsec_notifier
);
3567 module_init(macsec_init
);
3568 module_exit(macsec_exit
);
3570 MODULE_ALIAS_RTNL_LINK("macsec");
3571 MODULE_ALIAS_GENL_FAMILY("macsec");
3573 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3574 MODULE_LICENSE("GPL v2");