2 * drivers/net/macsec.c - MACsec device
4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/socket.h>
15 #include <linux/module.h>
16 #include <crypto/aead.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rtnetlink.h>
19 #include <net/genetlink.h>
21 #include <net/gro_cells.h>
23 #include <uapi/linux/if_macsec.h>
25 typedef u64 __bitwise sci_t
;
27 #define MACSEC_SCI_LEN 8
29 /* SecTAG length = macsec_eth_header without the optional SCI */
30 #define MACSEC_TAG_LEN 6
32 struct macsec_eth_header
{
36 #if defined(__LITTLE_ENDIAN_BITFIELD)
39 #elif defined(__BIG_ENDIAN_BITFIELD)
43 #error "Please fix <asm/byteorder.h>"
46 u8 secure_channel_id
[8]; /* optional */
49 #define MACSEC_TCI_VERSION 0x80
50 #define MACSEC_TCI_ES 0x40 /* end station */
51 #define MACSEC_TCI_SC 0x20 /* SCI present */
52 #define MACSEC_TCI_SCB 0x10 /* epon */
53 #define MACSEC_TCI_E 0x08 /* encryption */
54 #define MACSEC_TCI_C 0x04 /* changed text */
55 #define MACSEC_AN_MASK 0x03 /* association number */
56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59 #define MIN_NON_SHORT_LEN 48
61 #define GCM_AES_IV_LEN 12
62 #define DEFAULT_ICV_LEN 16
64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
66 #define for_each_rxsc(secy, sc) \
67 for (sc = rcu_dereference_bh(secy->rx_sc); \
69 sc = rcu_dereference_bh(sc->next))
70 #define for_each_rxsc_rtnl(secy, sc) \
71 for (sc = rtnl_dereference(secy->rx_sc); \
73 sc = rtnl_dereference(sc->next))
77 u8 secure_channel_id
[8];
84 * struct macsec_key - SA key
85 * @id: user-provided key identifier
86 * @tfm: crypto struct, key storage
89 u8 id
[MACSEC_KEYID_LEN
];
90 struct crypto_aead
*tfm
;
93 struct macsec_rx_sc_stats
{
94 __u64 InOctetsValidated
;
95 __u64 InOctetsDecrypted
;
96 __u64 InPktsUnchecked
;
101 __u64 InPktsNotValid
;
102 __u64 InPktsNotUsingSA
;
103 __u64 InPktsUnusedSA
;
106 struct macsec_rx_sa_stats
{
109 __u32 InPktsNotValid
;
110 __u32 InPktsNotUsingSA
;
111 __u32 InPktsUnusedSA
;
114 struct macsec_tx_sa_stats
{
115 __u32 OutPktsProtected
;
116 __u32 OutPktsEncrypted
;
119 struct macsec_tx_sc_stats
{
120 __u64 OutPktsProtected
;
121 __u64 OutPktsEncrypted
;
122 __u64 OutOctetsProtected
;
123 __u64 OutOctetsEncrypted
;
126 struct macsec_dev_stats
{
127 __u64 OutPktsUntagged
;
128 __u64 InPktsUntagged
;
129 __u64 OutPktsTooLong
;
132 __u64 InPktsUnknownSCI
;
138 * struct macsec_rx_sa - receive secure association
140 * @next_pn: packet number expected for the next packet
141 * @lock: protects next_pn manipulations
142 * @key: key structure
143 * @stats: per-SA stats
145 struct macsec_rx_sa
{
146 struct macsec_key key
;
151 struct macsec_rx_sa_stats __percpu
*stats
;
152 struct macsec_rx_sc
*sc
;
156 struct pcpu_rx_sc_stats
{
157 struct macsec_rx_sc_stats stats
;
158 struct u64_stats_sync syncp
;
162 * struct macsec_rx_sc - receive secure channel
163 * @sci: secure channel identifier for this SC
164 * @active: channel is active
165 * @sa: array of secure associations
166 * @stats: per-SC stats
168 struct macsec_rx_sc
{
169 struct macsec_rx_sc __rcu
*next
;
172 struct macsec_rx_sa __rcu
*sa
[MACSEC_NUM_AN
];
173 struct pcpu_rx_sc_stats __percpu
*stats
;
175 struct rcu_head rcu_head
;
179 * struct macsec_tx_sa - transmit secure association
181 * @next_pn: packet number to use for the next packet
182 * @lock: protects next_pn manipulations
183 * @key: key structure
184 * @stats: per-SA stats
186 struct macsec_tx_sa
{
187 struct macsec_key key
;
192 struct macsec_tx_sa_stats __percpu
*stats
;
196 struct pcpu_tx_sc_stats
{
197 struct macsec_tx_sc_stats stats
;
198 struct u64_stats_sync syncp
;
202 * struct macsec_tx_sc - transmit secure channel
204 * @encoding_sa: association number of the SA currently in use
205 * @encrypt: encrypt packets on transmit, or authenticate only
206 * @send_sci: always include the SCI in the SecTAG
208 * @scb: single copy broadcast flag
209 * @sa: array of secure associations
210 * @stats: stats for this TXSC
212 struct macsec_tx_sc
{
219 struct macsec_tx_sa __rcu
*sa
[MACSEC_NUM_AN
];
220 struct pcpu_tx_sc_stats __percpu
*stats
;
223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
226 * struct macsec_secy - MACsec Security Entity
227 * @netdev: netdevice for this SecY
228 * @n_rx_sc: number of receive secure channels configured on this SecY
229 * @sci: secure channel identifier used for tx
230 * @key_len: length of keys used by the cipher suite
231 * @icv_len: length of ICV used by the cipher suite
232 * @validate_frames: validation mode
233 * @operational: MAC_Operational flag
234 * @protect_frames: enable protection for this SecY
235 * @replay_protect: enable packet number checks on receive
236 * @replay_window: size of the replay window
237 * @tx_sc: transmit secure channel
238 * @rx_sc: linked list of receive secure channels
241 struct net_device
*netdev
;
242 unsigned int n_rx_sc
;
246 enum macsec_validation_type validate_frames
;
251 struct macsec_tx_sc tx_sc
;
252 struct macsec_rx_sc __rcu
*rx_sc
;
255 struct pcpu_secy_stats
{
256 struct macsec_dev_stats stats
;
257 struct u64_stats_sync syncp
;
261 * struct macsec_dev - private data
263 * @real_dev: pointer to underlying netdevice
264 * @stats: MACsec device stats
265 * @secys: linked list of SecY's on the underlying device
268 struct macsec_secy secy
;
269 struct net_device
*real_dev
;
270 struct pcpu_secy_stats __percpu
*stats
;
271 struct list_head secys
;
272 struct gro_cells gro_cells
;
276 * struct macsec_rxh_data - rx_handler private argument
277 * @secys: linked list of SecY's on this underlying device
279 struct macsec_rxh_data
{
280 struct list_head secys
;
283 static struct macsec_dev
*macsec_priv(const struct net_device
*dev
)
285 return (struct macsec_dev
*)netdev_priv(dev
);
288 static struct macsec_rxh_data
*macsec_data_rcu(const struct net_device
*dev
)
290 return rcu_dereference_bh(dev
->rx_handler_data
);
293 static struct macsec_rxh_data
*macsec_data_rtnl(const struct net_device
*dev
)
295 return rtnl_dereference(dev
->rx_handler_data
);
299 struct aead_request
*req
;
301 struct macsec_tx_sa
*tx_sa
;
302 struct macsec_rx_sa
*rx_sa
;
309 static struct macsec_rx_sa
*macsec_rxsa_get(struct macsec_rx_sa __rcu
*ptr
)
311 struct macsec_rx_sa
*sa
= rcu_dereference_bh(ptr
);
313 if (!sa
|| !sa
->active
)
316 if (!atomic_inc_not_zero(&sa
->refcnt
))
322 static void free_rx_sc_rcu(struct rcu_head
*head
)
324 struct macsec_rx_sc
*rx_sc
= container_of(head
, struct macsec_rx_sc
, rcu_head
);
326 free_percpu(rx_sc
->stats
);
330 static struct macsec_rx_sc
*macsec_rxsc_get(struct macsec_rx_sc
*sc
)
332 return atomic_inc_not_zero(&sc
->refcnt
) ? sc
: NULL
;
335 static void macsec_rxsc_put(struct macsec_rx_sc
*sc
)
337 if (atomic_dec_and_test(&sc
->refcnt
))
338 call_rcu(&sc
->rcu_head
, free_rx_sc_rcu
);
341 static void free_rxsa(struct rcu_head
*head
)
343 struct macsec_rx_sa
*sa
= container_of(head
, struct macsec_rx_sa
, rcu
);
345 crypto_free_aead(sa
->key
.tfm
);
346 free_percpu(sa
->stats
);
350 static void macsec_rxsa_put(struct macsec_rx_sa
*sa
)
352 if (atomic_dec_and_test(&sa
->refcnt
))
353 call_rcu(&sa
->rcu
, free_rxsa
);
356 static struct macsec_tx_sa
*macsec_txsa_get(struct macsec_tx_sa __rcu
*ptr
)
358 struct macsec_tx_sa
*sa
= rcu_dereference_bh(ptr
);
360 if (!sa
|| !sa
->active
)
363 if (!atomic_inc_not_zero(&sa
->refcnt
))
369 static void free_txsa(struct rcu_head
*head
)
371 struct macsec_tx_sa
*sa
= container_of(head
, struct macsec_tx_sa
, rcu
);
373 crypto_free_aead(sa
->key
.tfm
);
374 free_percpu(sa
->stats
);
378 static void macsec_txsa_put(struct macsec_tx_sa
*sa
)
380 if (atomic_dec_and_test(&sa
->refcnt
))
381 call_rcu(&sa
->rcu
, free_txsa
);
384 static struct macsec_cb
*macsec_skb_cb(struct sk_buff
*skb
)
386 BUILD_BUG_ON(sizeof(struct macsec_cb
) > sizeof(skb
->cb
));
387 return (struct macsec_cb
*)skb
->cb
;
390 #define MACSEC_PORT_ES (htons(0x0001))
391 #define MACSEC_PORT_SCB (0x0000)
392 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
394 #define DEFAULT_SAK_LEN 16
395 #define DEFAULT_SEND_SCI true
396 #define DEFAULT_ENCRYPT false
397 #define DEFAULT_ENCODING_SA 0
399 static sci_t
make_sci(u8
*addr
, __be16 port
)
403 memcpy(&sci
, addr
, ETH_ALEN
);
404 memcpy(((char *)&sci
) + ETH_ALEN
, &port
, sizeof(port
));
409 static sci_t
macsec_frame_sci(struct macsec_eth_header
*hdr
, bool sci_present
)
414 memcpy(&sci
, hdr
->secure_channel_id
,
415 sizeof(hdr
->secure_channel_id
));
417 sci
= make_sci(hdr
->eth
.h_source
, MACSEC_PORT_ES
);
422 static unsigned int macsec_sectag_len(bool sci_present
)
424 return MACSEC_TAG_LEN
+ (sci_present
? MACSEC_SCI_LEN
: 0);
427 static unsigned int macsec_hdr_len(bool sci_present
)
429 return macsec_sectag_len(sci_present
) + ETH_HLEN
;
432 static unsigned int macsec_extra_len(bool sci_present
)
434 return macsec_sectag_len(sci_present
) + sizeof(__be16
);
437 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
438 static void macsec_fill_sectag(struct macsec_eth_header
*h
,
439 const struct macsec_secy
*secy
, u32 pn
)
441 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
443 memset(&h
->tci_an
, 0, macsec_sectag_len(tx_sc
->send_sci
));
444 h
->eth
.h_proto
= htons(ETH_P_MACSEC
);
446 if (tx_sc
->send_sci
||
447 (secy
->n_rx_sc
> 1 && !tx_sc
->end_station
&& !tx_sc
->scb
)) {
448 h
->tci_an
|= MACSEC_TCI_SC
;
449 memcpy(&h
->secure_channel_id
, &secy
->sci
,
450 sizeof(h
->secure_channel_id
));
452 if (tx_sc
->end_station
)
453 h
->tci_an
|= MACSEC_TCI_ES
;
455 h
->tci_an
|= MACSEC_TCI_SCB
;
458 h
->packet_number
= htonl(pn
);
460 /* with GCM, C/E clear for !encrypt, both set for encrypt */
462 h
->tci_an
|= MACSEC_TCI_CONFID
;
463 else if (secy
->icv_len
!= DEFAULT_ICV_LEN
)
464 h
->tci_an
|= MACSEC_TCI_C
;
466 h
->tci_an
|= tx_sc
->encoding_sa
;
469 static void macsec_set_shortlen(struct macsec_eth_header
*h
, size_t data_len
)
471 if (data_len
< MIN_NON_SHORT_LEN
)
472 h
->short_length
= data_len
;
475 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
476 static bool macsec_validate_skb(struct sk_buff
*skb
, u16 icv_len
)
478 struct macsec_eth_header
*h
= (struct macsec_eth_header
*)skb
->data
;
479 int len
= skb
->len
- 2 * ETH_ALEN
;
480 int extra_len
= macsec_extra_len(!!(h
->tci_an
& MACSEC_TCI_SC
)) + icv_len
;
482 /* a) It comprises at least 17 octets */
486 /* b) MACsec EtherType: already checked */
488 /* c) V bit is clear */
489 if (h
->tci_an
& MACSEC_TCI_VERSION
)
492 /* d) ES or SCB => !SC */
493 if ((h
->tci_an
& MACSEC_TCI_ES
|| h
->tci_an
& MACSEC_TCI_SCB
) &&
494 (h
->tci_an
& MACSEC_TCI_SC
))
497 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
501 /* rx.pn != 0 (figure 10-5) */
502 if (!h
->packet_number
)
505 /* length check, f) g) h) i) */
507 return len
== extra_len
+ h
->short_length
;
508 return len
>= extra_len
+ MIN_NON_SHORT_LEN
;
511 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
512 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
514 static void macsec_fill_iv(unsigned char *iv
, sci_t sci
, u32 pn
)
516 struct gcm_iv
*gcm_iv
= (struct gcm_iv
*)iv
;
519 gcm_iv
->pn
= htonl(pn
);
522 static struct macsec_eth_header
*macsec_ethhdr(struct sk_buff
*skb
)
524 return (struct macsec_eth_header
*)skb_mac_header(skb
);
527 static u32
tx_sa_update_pn(struct macsec_tx_sa
*tx_sa
, struct macsec_secy
*secy
)
531 spin_lock_bh(&tx_sa
->lock
);
535 if (tx_sa
->next_pn
== 0) {
536 pr_debug("PN wrapped, transitioning to !oper\n");
537 tx_sa
->active
= false;
538 if (secy
->protect_frames
)
539 secy
->operational
= false;
541 spin_unlock_bh(&tx_sa
->lock
);
546 static void macsec_encrypt_finish(struct sk_buff
*skb
, struct net_device
*dev
)
548 struct macsec_dev
*macsec
= netdev_priv(dev
);
550 skb
->dev
= macsec
->real_dev
;
551 skb_reset_mac_header(skb
);
552 skb
->protocol
= eth_hdr(skb
)->h_proto
;
555 static void macsec_count_tx(struct sk_buff
*skb
, struct macsec_tx_sc
*tx_sc
,
556 struct macsec_tx_sa
*tx_sa
)
558 struct pcpu_tx_sc_stats
*txsc_stats
= this_cpu_ptr(tx_sc
->stats
);
560 u64_stats_update_begin(&txsc_stats
->syncp
);
561 if (tx_sc
->encrypt
) {
562 txsc_stats
->stats
.OutOctetsEncrypted
+= skb
->len
;
563 txsc_stats
->stats
.OutPktsEncrypted
++;
564 this_cpu_inc(tx_sa
->stats
->OutPktsEncrypted
);
566 txsc_stats
->stats
.OutOctetsProtected
+= skb
->len
;
567 txsc_stats
->stats
.OutPktsProtected
++;
568 this_cpu_inc(tx_sa
->stats
->OutPktsProtected
);
570 u64_stats_update_end(&txsc_stats
->syncp
);
573 static void count_tx(struct net_device
*dev
, int ret
, int len
)
575 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
576 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
578 u64_stats_update_begin(&stats
->syncp
);
580 stats
->tx_bytes
+= len
;
581 u64_stats_update_end(&stats
->syncp
);
583 dev
->stats
.tx_dropped
++;
587 static void macsec_encrypt_done(struct crypto_async_request
*base
, int err
)
589 struct sk_buff
*skb
= base
->data
;
590 struct net_device
*dev
= skb
->dev
;
591 struct macsec_dev
*macsec
= macsec_priv(dev
);
592 struct macsec_tx_sa
*sa
= macsec_skb_cb(skb
)->tx_sa
;
595 aead_request_free(macsec_skb_cb(skb
)->req
);
598 macsec_encrypt_finish(skb
, dev
);
599 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
601 ret
= dev_queue_xmit(skb
);
602 count_tx(dev
, ret
, len
);
603 rcu_read_unlock_bh();
609 static struct aead_request
*macsec_alloc_req(struct crypto_aead
*tfm
,
611 struct scatterlist
**sg
)
613 size_t size
, iv_offset
, sg_offset
;
614 struct aead_request
*req
;
617 size
= sizeof(struct aead_request
) + crypto_aead_reqsize(tfm
);
619 size
+= GCM_AES_IV_LEN
;
621 size
= ALIGN(size
, __alignof__(struct scatterlist
));
623 size
+= sizeof(struct scatterlist
) * (MAX_SKB_FRAGS
+ 1);
625 tmp
= kmalloc(size
, GFP_ATOMIC
);
629 *iv
= (unsigned char *)(tmp
+ iv_offset
);
630 *sg
= (struct scatterlist
*)(tmp
+ sg_offset
);
633 aead_request_set_tfm(req
, tfm
);
638 static struct sk_buff
*macsec_encrypt(struct sk_buff
*skb
,
639 struct net_device
*dev
)
642 struct scatterlist
*sg
;
645 struct macsec_eth_header
*hh
;
646 size_t unprotected_len
;
647 struct aead_request
*req
;
648 struct macsec_secy
*secy
;
649 struct macsec_tx_sc
*tx_sc
;
650 struct macsec_tx_sa
*tx_sa
;
651 struct macsec_dev
*macsec
= macsec_priv(dev
);
654 secy
= &macsec
->secy
;
655 tx_sc
= &secy
->tx_sc
;
657 /* 10.5.1 TX SA assignment */
658 tx_sa
= macsec_txsa_get(tx_sc
->sa
[tx_sc
->encoding_sa
]);
660 secy
->operational
= false;
662 return ERR_PTR(-EINVAL
);
665 if (unlikely(skb_headroom(skb
) < MACSEC_NEEDED_HEADROOM
||
666 skb_tailroom(skb
) < MACSEC_NEEDED_TAILROOM
)) {
667 struct sk_buff
*nskb
= skb_copy_expand(skb
,
668 MACSEC_NEEDED_HEADROOM
,
669 MACSEC_NEEDED_TAILROOM
,
675 macsec_txsa_put(tx_sa
);
677 return ERR_PTR(-ENOMEM
);
680 skb
= skb_unshare(skb
, GFP_ATOMIC
);
682 macsec_txsa_put(tx_sa
);
683 return ERR_PTR(-ENOMEM
);
687 unprotected_len
= skb
->len
;
689 hh
= (struct macsec_eth_header
*)skb_push(skb
, macsec_extra_len(tx_sc
->send_sci
));
690 memmove(hh
, eth
, 2 * ETH_ALEN
);
692 pn
= tx_sa_update_pn(tx_sa
, secy
);
694 macsec_txsa_put(tx_sa
);
696 return ERR_PTR(-ENOLINK
);
698 macsec_fill_sectag(hh
, secy
, pn
);
699 macsec_set_shortlen(hh
, unprotected_len
- 2 * ETH_ALEN
);
701 skb_put(skb
, secy
->icv_len
);
703 if (skb
->len
- ETH_HLEN
> macsec_priv(dev
)->real_dev
->mtu
) {
704 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
706 u64_stats_update_begin(&secy_stats
->syncp
);
707 secy_stats
->stats
.OutPktsTooLong
++;
708 u64_stats_update_end(&secy_stats
->syncp
);
710 macsec_txsa_put(tx_sa
);
712 return ERR_PTR(-EINVAL
);
715 req
= macsec_alloc_req(tx_sa
->key
.tfm
, &iv
, &sg
);
717 macsec_txsa_put(tx_sa
);
719 return ERR_PTR(-ENOMEM
);
722 macsec_fill_iv(iv
, secy
->sci
, pn
);
724 sg_init_table(sg
, MAX_SKB_FRAGS
+ 1);
725 skb_to_sgvec(skb
, sg
, 0, skb
->len
);
727 if (tx_sc
->encrypt
) {
728 int len
= skb
->len
- macsec_hdr_len(tx_sc
->send_sci
) -
730 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
731 aead_request_set_ad(req
, macsec_hdr_len(tx_sc
->send_sci
));
733 aead_request_set_crypt(req
, sg
, sg
, 0, iv
);
734 aead_request_set_ad(req
, skb
->len
- secy
->icv_len
);
737 macsec_skb_cb(skb
)->req
= req
;
738 macsec_skb_cb(skb
)->tx_sa
= tx_sa
;
739 aead_request_set_callback(req
, 0, macsec_encrypt_done
, skb
);
742 ret
= crypto_aead_encrypt(req
);
743 if (ret
== -EINPROGRESS
) {
745 } else if (ret
!= 0) {
748 aead_request_free(req
);
749 macsec_txsa_put(tx_sa
);
750 return ERR_PTR(-EINVAL
);
754 aead_request_free(req
);
755 macsec_txsa_put(tx_sa
);
760 static bool macsec_post_decrypt(struct sk_buff
*skb
, struct macsec_secy
*secy
, u32 pn
)
762 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
763 struct pcpu_rx_sc_stats
*rxsc_stats
= this_cpu_ptr(rx_sa
->sc
->stats
);
764 struct macsec_eth_header
*hdr
= macsec_ethhdr(skb
);
767 spin_lock(&rx_sa
->lock
);
768 if (rx_sa
->next_pn
>= secy
->replay_window
)
769 lowest_pn
= rx_sa
->next_pn
- secy
->replay_window
;
771 /* Now perform replay protection check again
772 * (see IEEE 802.1AE-2006 figure 10-5)
774 if (secy
->replay_protect
&& pn
< lowest_pn
) {
775 spin_unlock(&rx_sa
->lock
);
776 u64_stats_update_begin(&rxsc_stats
->syncp
);
777 rxsc_stats
->stats
.InPktsLate
++;
778 u64_stats_update_end(&rxsc_stats
->syncp
);
782 if (secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
) {
783 u64_stats_update_begin(&rxsc_stats
->syncp
);
784 if (hdr
->tci_an
& MACSEC_TCI_E
)
785 rxsc_stats
->stats
.InOctetsDecrypted
+= skb
->len
;
787 rxsc_stats
->stats
.InOctetsValidated
+= skb
->len
;
788 u64_stats_update_end(&rxsc_stats
->syncp
);
791 if (!macsec_skb_cb(skb
)->valid
) {
792 spin_unlock(&rx_sa
->lock
);
795 if (hdr
->tci_an
& MACSEC_TCI_C
||
796 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
797 u64_stats_update_begin(&rxsc_stats
->syncp
);
798 rxsc_stats
->stats
.InPktsNotValid
++;
799 u64_stats_update_end(&rxsc_stats
->syncp
);
803 u64_stats_update_begin(&rxsc_stats
->syncp
);
804 if (secy
->validate_frames
== MACSEC_VALIDATE_CHECK
) {
805 rxsc_stats
->stats
.InPktsInvalid
++;
806 this_cpu_inc(rx_sa
->stats
->InPktsInvalid
);
807 } else if (pn
< lowest_pn
) {
808 rxsc_stats
->stats
.InPktsDelayed
++;
810 rxsc_stats
->stats
.InPktsUnchecked
++;
812 u64_stats_update_end(&rxsc_stats
->syncp
);
814 u64_stats_update_begin(&rxsc_stats
->syncp
);
815 if (pn
< lowest_pn
) {
816 rxsc_stats
->stats
.InPktsDelayed
++;
818 rxsc_stats
->stats
.InPktsOK
++;
819 this_cpu_inc(rx_sa
->stats
->InPktsOK
);
821 u64_stats_update_end(&rxsc_stats
->syncp
);
823 if (pn
>= rx_sa
->next_pn
)
824 rx_sa
->next_pn
= pn
+ 1;
825 spin_unlock(&rx_sa
->lock
);
831 static void macsec_reset_skb(struct sk_buff
*skb
, struct net_device
*dev
)
833 skb
->pkt_type
= PACKET_HOST
;
834 skb
->protocol
= eth_type_trans(skb
, dev
);
836 skb_reset_network_header(skb
);
837 if (!skb_transport_header_was_set(skb
))
838 skb_reset_transport_header(skb
);
839 skb_reset_mac_len(skb
);
842 static void macsec_finalize_skb(struct sk_buff
*skb
, u8 icv_len
, u8 hdr_len
)
844 memmove(skb
->data
+ hdr_len
, skb
->data
, 2 * ETH_ALEN
);
845 skb_pull(skb
, hdr_len
);
846 pskb_trim_unique(skb
, skb
->len
- icv_len
);
849 static void count_rx(struct net_device
*dev
, int len
)
851 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
853 u64_stats_update_begin(&stats
->syncp
);
855 stats
->rx_bytes
+= len
;
856 u64_stats_update_end(&stats
->syncp
);
859 static void macsec_decrypt_done(struct crypto_async_request
*base
, int err
)
861 struct sk_buff
*skb
= base
->data
;
862 struct net_device
*dev
= skb
->dev
;
863 struct macsec_dev
*macsec
= macsec_priv(dev
);
864 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
865 struct macsec_rx_sc
*rx_sc
= rx_sa
->sc
;
869 aead_request_free(macsec_skb_cb(skb
)->req
);
872 pn
= ntohl(macsec_ethhdr(skb
)->packet_number
);
873 if (!macsec_post_decrypt(skb
, &macsec
->secy
, pn
)) {
874 rcu_read_unlock_bh();
879 macsec_finalize_skb(skb
, macsec
->secy
.icv_len
,
880 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
881 macsec_reset_skb(skb
, macsec
->secy
.netdev
);
884 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
885 if (ret
== NET_RX_SUCCESS
)
888 macsec
->secy
.netdev
->stats
.rx_dropped
++;
890 rcu_read_unlock_bh();
893 macsec_rxsa_put(rx_sa
);
894 macsec_rxsc_put(rx_sc
);
898 static struct sk_buff
*macsec_decrypt(struct sk_buff
*skb
,
899 struct net_device
*dev
,
900 struct macsec_rx_sa
*rx_sa
,
902 struct macsec_secy
*secy
)
905 struct scatterlist
*sg
;
907 struct aead_request
*req
;
908 struct macsec_eth_header
*hdr
;
909 u16 icv_len
= secy
->icv_len
;
911 macsec_skb_cb(skb
)->valid
= false;
912 skb
= skb_share_check(skb
, GFP_ATOMIC
);
914 return ERR_PTR(-ENOMEM
);
916 req
= macsec_alloc_req(rx_sa
->key
.tfm
, &iv
, &sg
);
919 return ERR_PTR(-ENOMEM
);
922 hdr
= (struct macsec_eth_header
*)skb
->data
;
923 macsec_fill_iv(iv
, sci
, ntohl(hdr
->packet_number
));
925 sg_init_table(sg
, MAX_SKB_FRAGS
+ 1);
926 skb_to_sgvec(skb
, sg
, 0, skb
->len
);
928 if (hdr
->tci_an
& MACSEC_TCI_E
) {
929 /* confidentiality: ethernet + macsec header
930 * authenticated, encrypted payload
932 int len
= skb
->len
- macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
);
934 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
935 aead_request_set_ad(req
, macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
));
936 skb
= skb_unshare(skb
, GFP_ATOMIC
);
938 aead_request_free(req
);
939 return ERR_PTR(-ENOMEM
);
942 /* integrity only: all headers + data authenticated */
943 aead_request_set_crypt(req
, sg
, sg
, icv_len
, iv
);
944 aead_request_set_ad(req
, skb
->len
- icv_len
);
947 macsec_skb_cb(skb
)->req
= req
;
949 aead_request_set_callback(req
, 0, macsec_decrypt_done
, skb
);
952 ret
= crypto_aead_decrypt(req
);
953 if (ret
== -EINPROGRESS
) {
955 } else if (ret
!= 0) {
956 /* decryption/authentication failed
957 * 10.6 if validateFrames is disabled, deliver anyway
959 if (ret
!= -EBADMSG
) {
964 macsec_skb_cb(skb
)->valid
= true;
968 aead_request_free(req
);
973 static struct macsec_rx_sc
*find_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
975 struct macsec_rx_sc
*rx_sc
;
977 for_each_rxsc(secy
, rx_sc
) {
978 if (rx_sc
->sci
== sci
)
985 static struct macsec_rx_sc
*find_rx_sc_rtnl(struct macsec_secy
*secy
, sci_t sci
)
987 struct macsec_rx_sc
*rx_sc
;
989 for_each_rxsc_rtnl(secy
, rx_sc
) {
990 if (rx_sc
->sci
== sci
)
997 static void handle_not_macsec(struct sk_buff
*skb
)
999 struct macsec_rxh_data
*rxd
;
1000 struct macsec_dev
*macsec
;
1003 rxd
= macsec_data_rcu(skb
->dev
);
1005 /* 10.6 If the management control validateFrames is not
1006 * Strict, frames without a SecTAG are received, counted, and
1007 * delivered to the Controlled Port
1009 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1010 struct sk_buff
*nskb
;
1012 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
1014 if (macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1015 u64_stats_update_begin(&secy_stats
->syncp
);
1016 secy_stats
->stats
.InPktsNoTag
++;
1017 u64_stats_update_end(&secy_stats
->syncp
);
1021 /* deliver on this port */
1022 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1026 nskb
->dev
= macsec
->secy
.netdev
;
1028 ret
= netif_rx(nskb
);
1029 if (ret
== NET_RX_SUCCESS
) {
1030 u64_stats_update_begin(&secy_stats
->syncp
);
1031 secy_stats
->stats
.InPktsUntagged
++;
1032 u64_stats_update_end(&secy_stats
->syncp
);
1034 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1041 static rx_handler_result_t
macsec_handle_frame(struct sk_buff
**pskb
)
1043 struct sk_buff
*skb
= *pskb
;
1044 struct net_device
*dev
= skb
->dev
;
1045 struct macsec_eth_header
*hdr
;
1046 struct macsec_secy
*secy
= NULL
;
1047 struct macsec_rx_sc
*rx_sc
;
1048 struct macsec_rx_sa
*rx_sa
;
1049 struct macsec_rxh_data
*rxd
;
1050 struct macsec_dev
*macsec
;
1054 struct pcpu_rx_sc_stats
*rxsc_stats
;
1055 struct pcpu_secy_stats
*secy_stats
;
1059 if (skb_headroom(skb
) < ETH_HLEN
)
1062 hdr
= macsec_ethhdr(skb
);
1063 if (hdr
->eth
.h_proto
!= htons(ETH_P_MACSEC
)) {
1064 handle_not_macsec(skb
);
1066 /* and deliver to the uncontrolled port */
1067 return RX_HANDLER_PASS
;
1070 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1073 return RX_HANDLER_CONSUMED
;
1076 pulled_sci
= pskb_may_pull(skb
, macsec_extra_len(true));
1078 if (!pskb_may_pull(skb
, macsec_extra_len(false)))
1082 hdr
= macsec_ethhdr(skb
);
1084 /* Frames with a SecTAG that has the TCI E bit set but the C
1085 * bit clear are discarded, as this reserved encoding is used
1086 * to identify frames with a SecTAG that are not to be
1087 * delivered to the Controlled Port.
1089 if ((hdr
->tci_an
& (MACSEC_TCI_C
| MACSEC_TCI_E
)) == MACSEC_TCI_E
)
1090 return RX_HANDLER_PASS
;
1092 /* now, pull the extra length */
1093 if (hdr
->tci_an
& MACSEC_TCI_SC
) {
1098 /* ethernet header is part of crypto processing */
1099 skb_push(skb
, ETH_HLEN
);
1101 macsec_skb_cb(skb
)->has_sci
= !!(hdr
->tci_an
& MACSEC_TCI_SC
);
1102 macsec_skb_cb(skb
)->assoc_num
= hdr
->tci_an
& MACSEC_AN_MASK
;
1103 sci
= macsec_frame_sci(hdr
, macsec_skb_cb(skb
)->has_sci
);
1106 rxd
= macsec_data_rcu(skb
->dev
);
1108 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1109 struct macsec_rx_sc
*sc
= find_rx_sc(&macsec
->secy
, sci
);
1110 sc
= sc
? macsec_rxsc_get(sc
) : NULL
;
1113 secy
= &macsec
->secy
;
1123 macsec
= macsec_priv(dev
);
1124 secy_stats
= this_cpu_ptr(macsec
->stats
);
1125 rxsc_stats
= this_cpu_ptr(rx_sc
->stats
);
1127 if (!macsec_validate_skb(skb
, secy
->icv_len
)) {
1128 u64_stats_update_begin(&secy_stats
->syncp
);
1129 secy_stats
->stats
.InPktsBadTag
++;
1130 u64_stats_update_end(&secy_stats
->syncp
);
1134 rx_sa
= macsec_rxsa_get(rx_sc
->sa
[macsec_skb_cb(skb
)->assoc_num
]);
1136 /* 10.6.1 if the SA is not in use */
1138 /* If validateFrames is Strict or the C bit in the
1139 * SecTAG is set, discard
1141 if (hdr
->tci_an
& MACSEC_TCI_C
||
1142 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
1143 u64_stats_update_begin(&rxsc_stats
->syncp
);
1144 rxsc_stats
->stats
.InPktsNotUsingSA
++;
1145 u64_stats_update_end(&rxsc_stats
->syncp
);
1149 /* not Strict, the frame (with the SecTAG and ICV
1150 * removed) is delivered to the Controlled Port.
1152 u64_stats_update_begin(&rxsc_stats
->syncp
);
1153 rxsc_stats
->stats
.InPktsUnusedSA
++;
1154 u64_stats_update_end(&rxsc_stats
->syncp
);
1158 /* First, PN check to avoid decrypting obviously wrong packets */
1159 pn
= ntohl(hdr
->packet_number
);
1160 if (secy
->replay_protect
) {
1163 spin_lock(&rx_sa
->lock
);
1164 late
= rx_sa
->next_pn
>= secy
->replay_window
&&
1165 pn
< (rx_sa
->next_pn
- secy
->replay_window
);
1166 spin_unlock(&rx_sa
->lock
);
1169 u64_stats_update_begin(&rxsc_stats
->syncp
);
1170 rxsc_stats
->stats
.InPktsLate
++;
1171 u64_stats_update_end(&rxsc_stats
->syncp
);
1176 macsec_skb_cb(skb
)->rx_sa
= rx_sa
;
1178 /* Disabled && !changed text => skip validation */
1179 if (hdr
->tci_an
& MACSEC_TCI_C
||
1180 secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
)
1181 skb
= macsec_decrypt(skb
, dev
, rx_sa
, sci
, secy
);
1184 /* the decrypt callback needs the reference */
1185 if (PTR_ERR(skb
) != -EINPROGRESS
) {
1186 macsec_rxsa_put(rx_sa
);
1187 macsec_rxsc_put(rx_sc
);
1191 return RX_HANDLER_CONSUMED
;
1194 if (!macsec_post_decrypt(skb
, secy
, pn
))
1198 macsec_finalize_skb(skb
, secy
->icv_len
,
1199 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1200 macsec_reset_skb(skb
, secy
->netdev
);
1203 macsec_rxsa_put(rx_sa
);
1204 macsec_rxsc_put(rx_sc
);
1206 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
1207 if (ret
== NET_RX_SUCCESS
)
1208 count_rx(dev
, skb
->len
);
1210 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1215 return RX_HANDLER_CONSUMED
;
1218 macsec_rxsa_put(rx_sa
);
1220 macsec_rxsc_put(rx_sc
);
1225 return RX_HANDLER_CONSUMED
;
1228 /* 10.6.1 if the SC is not found */
1229 cbit
= !!(hdr
->tci_an
& MACSEC_TCI_C
);
1231 macsec_finalize_skb(skb
, DEFAULT_ICV_LEN
,
1232 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1234 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1235 struct sk_buff
*nskb
;
1237 secy_stats
= this_cpu_ptr(macsec
->stats
);
1239 /* If validateFrames is Strict or the C bit in the
1240 * SecTAG is set, discard
1243 macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1244 u64_stats_update_begin(&secy_stats
->syncp
);
1245 secy_stats
->stats
.InPktsNoSCI
++;
1246 u64_stats_update_end(&secy_stats
->syncp
);
1250 /* not strict, the frame (with the SecTAG and ICV
1251 * removed) is delivered to the Controlled Port.
1253 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1257 macsec_reset_skb(nskb
, macsec
->secy
.netdev
);
1259 ret
= netif_rx(nskb
);
1260 if (ret
== NET_RX_SUCCESS
) {
1261 u64_stats_update_begin(&secy_stats
->syncp
);
1262 secy_stats
->stats
.InPktsUnknownSCI
++;
1263 u64_stats_update_end(&secy_stats
->syncp
);
1265 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1271 return RX_HANDLER_PASS
;
1274 static struct crypto_aead
*macsec_alloc_tfm(char *key
, int key_len
, int icv_len
)
1276 struct crypto_aead
*tfm
;
1279 tfm
= crypto_alloc_aead("gcm(aes)", 0, 0);
1284 ret
= crypto_aead_setkey(tfm
, key
, key_len
);
1288 ret
= crypto_aead_setauthsize(tfm
, icv_len
);
1294 crypto_free_aead(tfm
);
1295 return ERR_PTR(ret
);
1298 static int init_rx_sa(struct macsec_rx_sa
*rx_sa
, char *sak
, int key_len
,
1301 rx_sa
->stats
= alloc_percpu(struct macsec_rx_sa_stats
);
1305 rx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1306 if (IS_ERR(rx_sa
->key
.tfm
)) {
1307 free_percpu(rx_sa
->stats
);
1308 return PTR_ERR(rx_sa
->key
.tfm
);
1311 rx_sa
->active
= false;
1313 atomic_set(&rx_sa
->refcnt
, 1);
1314 spin_lock_init(&rx_sa
->lock
);
1319 static void clear_rx_sa(struct macsec_rx_sa
*rx_sa
)
1321 rx_sa
->active
= false;
1323 macsec_rxsa_put(rx_sa
);
1326 static void free_rx_sc(struct macsec_rx_sc
*rx_sc
)
1330 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1331 struct macsec_rx_sa
*sa
= rtnl_dereference(rx_sc
->sa
[i
]);
1333 RCU_INIT_POINTER(rx_sc
->sa
[i
], NULL
);
1338 macsec_rxsc_put(rx_sc
);
1341 static struct macsec_rx_sc
*del_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1343 struct macsec_rx_sc
*rx_sc
, __rcu
**rx_scp
;
1345 for (rx_scp
= &secy
->rx_sc
, rx_sc
= rtnl_dereference(*rx_scp
);
1347 rx_scp
= &rx_sc
->next
, rx_sc
= rtnl_dereference(*rx_scp
)) {
1348 if (rx_sc
->sci
== sci
) {
1351 rcu_assign_pointer(*rx_scp
, rx_sc
->next
);
1359 static struct macsec_rx_sc
*create_rx_sc(struct net_device
*dev
, sci_t sci
)
1361 struct macsec_rx_sc
*rx_sc
;
1362 struct macsec_dev
*macsec
;
1363 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
1364 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
1365 struct macsec_secy
*secy
;
1367 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
1368 if (find_rx_sc_rtnl(&macsec
->secy
, sci
))
1369 return ERR_PTR(-EEXIST
);
1372 rx_sc
= kzalloc(sizeof(*rx_sc
), GFP_KERNEL
);
1374 return ERR_PTR(-ENOMEM
);
1376 rx_sc
->stats
= netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats
);
1377 if (!rx_sc
->stats
) {
1379 return ERR_PTR(-ENOMEM
);
1383 rx_sc
->active
= true;
1384 atomic_set(&rx_sc
->refcnt
, 1);
1386 secy
= &macsec_priv(dev
)->secy
;
1387 rcu_assign_pointer(rx_sc
->next
, secy
->rx_sc
);
1388 rcu_assign_pointer(secy
->rx_sc
, rx_sc
);
1396 static int init_tx_sa(struct macsec_tx_sa
*tx_sa
, char *sak
, int key_len
,
1399 tx_sa
->stats
= alloc_percpu(struct macsec_tx_sa_stats
);
1403 tx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1404 if (IS_ERR(tx_sa
->key
.tfm
)) {
1405 free_percpu(tx_sa
->stats
);
1406 return PTR_ERR(tx_sa
->key
.tfm
);
1409 tx_sa
->active
= false;
1410 atomic_set(&tx_sa
->refcnt
, 1);
1411 spin_lock_init(&tx_sa
->lock
);
1416 static void clear_tx_sa(struct macsec_tx_sa
*tx_sa
)
1418 tx_sa
->active
= false;
1420 macsec_txsa_put(tx_sa
);
1423 static struct genl_family macsec_fam
= {
1424 .id
= GENL_ID_GENERATE
,
1425 .name
= MACSEC_GENL_NAME
,
1427 .version
= MACSEC_GENL_VERSION
,
1428 .maxattr
= MACSEC_ATTR_MAX
,
1432 static struct net_device
*get_dev_from_nl(struct net
*net
,
1433 struct nlattr
**attrs
)
1435 int ifindex
= nla_get_u32(attrs
[MACSEC_ATTR_IFINDEX
]);
1436 struct net_device
*dev
;
1438 dev
= __dev_get_by_index(net
, ifindex
);
1440 return ERR_PTR(-ENODEV
);
1442 if (!netif_is_macsec(dev
))
1443 return ERR_PTR(-ENODEV
);
1448 static sci_t
nla_get_sci(const struct nlattr
*nla
)
1450 return (__force sci_t
)nla_get_u64(nla
);
1453 static int nla_put_sci(struct sk_buff
*skb
, int attrtype
, sci_t value
,
1456 return nla_put_u64_64bit(skb
, attrtype
, (__force u64
)value
, padattr
);
1459 static struct macsec_tx_sa
*get_txsa_from_nl(struct net
*net
,
1460 struct nlattr
**attrs
,
1461 struct nlattr
**tb_sa
,
1462 struct net_device
**devp
,
1463 struct macsec_secy
**secyp
,
1464 struct macsec_tx_sc
**scp
,
1467 struct net_device
*dev
;
1468 struct macsec_secy
*secy
;
1469 struct macsec_tx_sc
*tx_sc
;
1470 struct macsec_tx_sa
*tx_sa
;
1472 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1473 return ERR_PTR(-EINVAL
);
1475 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1477 dev
= get_dev_from_nl(net
, attrs
);
1479 return ERR_CAST(dev
);
1481 if (*assoc_num
>= MACSEC_NUM_AN
)
1482 return ERR_PTR(-EINVAL
);
1484 secy
= &macsec_priv(dev
)->secy
;
1485 tx_sc
= &secy
->tx_sc
;
1487 tx_sa
= rtnl_dereference(tx_sc
->sa
[*assoc_num
]);
1489 return ERR_PTR(-ENODEV
);
1497 static struct macsec_rx_sc
*get_rxsc_from_nl(struct net
*net
,
1498 struct nlattr
**attrs
,
1499 struct nlattr
**tb_rxsc
,
1500 struct net_device
**devp
,
1501 struct macsec_secy
**secyp
)
1503 struct net_device
*dev
;
1504 struct macsec_secy
*secy
;
1505 struct macsec_rx_sc
*rx_sc
;
1508 dev
= get_dev_from_nl(net
, attrs
);
1510 return ERR_CAST(dev
);
1512 secy
= &macsec_priv(dev
)->secy
;
1514 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1515 return ERR_PTR(-EINVAL
);
1517 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1518 rx_sc
= find_rx_sc_rtnl(secy
, sci
);
1520 return ERR_PTR(-ENODEV
);
1528 static struct macsec_rx_sa
*get_rxsa_from_nl(struct net
*net
,
1529 struct nlattr
**attrs
,
1530 struct nlattr
**tb_rxsc
,
1531 struct nlattr
**tb_sa
,
1532 struct net_device
**devp
,
1533 struct macsec_secy
**secyp
,
1534 struct macsec_rx_sc
**scp
,
1537 struct macsec_rx_sc
*rx_sc
;
1538 struct macsec_rx_sa
*rx_sa
;
1540 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1541 return ERR_PTR(-EINVAL
);
1543 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1544 if (*assoc_num
>= MACSEC_NUM_AN
)
1545 return ERR_PTR(-EINVAL
);
1547 rx_sc
= get_rxsc_from_nl(net
, attrs
, tb_rxsc
, devp
, secyp
);
1549 return ERR_CAST(rx_sc
);
1551 rx_sa
= rtnl_dereference(rx_sc
->sa
[*assoc_num
]);
1553 return ERR_PTR(-ENODEV
);
1560 static const struct nla_policy macsec_genl_policy
[NUM_MACSEC_ATTR
] = {
1561 [MACSEC_ATTR_IFINDEX
] = { .type
= NLA_U32
},
1562 [MACSEC_ATTR_RXSC_CONFIG
] = { .type
= NLA_NESTED
},
1563 [MACSEC_ATTR_SA_CONFIG
] = { .type
= NLA_NESTED
},
1566 static const struct nla_policy macsec_genl_rxsc_policy
[NUM_MACSEC_RXSC_ATTR
] = {
1567 [MACSEC_RXSC_ATTR_SCI
] = { .type
= NLA_U64
},
1568 [MACSEC_RXSC_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1571 static const struct nla_policy macsec_genl_sa_policy
[NUM_MACSEC_SA_ATTR
] = {
1572 [MACSEC_SA_ATTR_AN
] = { .type
= NLA_U8
},
1573 [MACSEC_SA_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1574 [MACSEC_SA_ATTR_PN
] = { .type
= NLA_U32
},
1575 [MACSEC_SA_ATTR_KEYID
] = { .type
= NLA_BINARY
,
1576 .len
= MACSEC_KEYID_LEN
, },
1577 [MACSEC_SA_ATTR_KEY
] = { .type
= NLA_BINARY
,
1578 .len
= MACSEC_MAX_KEY_LEN
, },
1581 static int parse_sa_config(struct nlattr
**attrs
, struct nlattr
**tb_sa
)
1583 if (!attrs
[MACSEC_ATTR_SA_CONFIG
])
1586 if (nla_parse_nested(tb_sa
, MACSEC_SA_ATTR_MAX
, attrs
[MACSEC_ATTR_SA_CONFIG
],
1587 macsec_genl_sa_policy
))
1593 static int parse_rxsc_config(struct nlattr
**attrs
, struct nlattr
**tb_rxsc
)
1595 if (!attrs
[MACSEC_ATTR_RXSC_CONFIG
])
1598 if (nla_parse_nested(tb_rxsc
, MACSEC_RXSC_ATTR_MAX
, attrs
[MACSEC_ATTR_RXSC_CONFIG
],
1599 macsec_genl_rxsc_policy
))
1605 static bool validate_add_rxsa(struct nlattr
**attrs
)
1607 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1608 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1609 !attrs
[MACSEC_SA_ATTR_KEYID
])
1612 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1615 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1618 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1619 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1623 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1629 static int macsec_add_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1631 struct net_device
*dev
;
1632 struct nlattr
**attrs
= info
->attrs
;
1633 struct macsec_secy
*secy
;
1634 struct macsec_rx_sc
*rx_sc
;
1635 struct macsec_rx_sa
*rx_sa
;
1636 unsigned char assoc_num
;
1637 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1638 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1641 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1644 if (parse_sa_config(attrs
, tb_sa
))
1647 if (parse_rxsc_config(attrs
, tb_rxsc
))
1650 if (!validate_add_rxsa(tb_sa
))
1654 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
1655 if (IS_ERR(rx_sc
)) {
1657 return PTR_ERR(rx_sc
);
1660 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1662 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1663 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1664 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1669 rx_sa
= rtnl_dereference(rx_sc
->sa
[assoc_num
]);
1675 rx_sa
= kmalloc(sizeof(*rx_sa
), GFP_KERNEL
);
1681 err
= init_rx_sa(rx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1682 secy
->key_len
, secy
->icv_len
);
1689 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
1690 spin_lock_bh(&rx_sa
->lock
);
1691 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1692 spin_unlock_bh(&rx_sa
->lock
);
1695 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1696 rx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1698 nla_memcpy(rx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1700 rcu_assign_pointer(rx_sc
->sa
[assoc_num
], rx_sa
);
1707 static bool validate_add_rxsc(struct nlattr
**attrs
)
1709 if (!attrs
[MACSEC_RXSC_ATTR_SCI
])
1712 if (attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) {
1713 if (nla_get_u8(attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) > 1)
1720 static int macsec_add_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1722 struct net_device
*dev
;
1723 sci_t sci
= MACSEC_UNDEF_SCI
;
1724 struct nlattr
**attrs
= info
->attrs
;
1725 struct macsec_rx_sc
*rx_sc
;
1726 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1728 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1731 if (parse_rxsc_config(attrs
, tb_rxsc
))
1734 if (!validate_add_rxsc(tb_rxsc
))
1738 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1741 return PTR_ERR(dev
);
1744 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1746 rx_sc
= create_rx_sc(dev
, sci
);
1747 if (IS_ERR(rx_sc
)) {
1749 return PTR_ERR(rx_sc
);
1752 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
])
1753 rx_sc
->active
= !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
1760 static bool validate_add_txsa(struct nlattr
**attrs
)
1762 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1763 !attrs
[MACSEC_SA_ATTR_PN
] ||
1764 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1765 !attrs
[MACSEC_SA_ATTR_KEYID
])
1768 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1771 if (nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1774 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1775 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1779 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1785 static int macsec_add_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1787 struct net_device
*dev
;
1788 struct nlattr
**attrs
= info
->attrs
;
1789 struct macsec_secy
*secy
;
1790 struct macsec_tx_sc
*tx_sc
;
1791 struct macsec_tx_sa
*tx_sa
;
1792 unsigned char assoc_num
;
1793 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1796 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1799 if (parse_sa_config(attrs
, tb_sa
))
1802 if (!validate_add_txsa(tb_sa
))
1806 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1809 return PTR_ERR(dev
);
1812 secy
= &macsec_priv(dev
)->secy
;
1813 tx_sc
= &secy
->tx_sc
;
1815 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1817 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1818 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1819 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1824 tx_sa
= rtnl_dereference(tx_sc
->sa
[assoc_num
]);
1830 tx_sa
= kmalloc(sizeof(*tx_sa
), GFP_KERNEL
);
1836 err
= init_tx_sa(tx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1837 secy
->key_len
, secy
->icv_len
);
1844 nla_memcpy(tx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1846 spin_lock_bh(&tx_sa
->lock
);
1847 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1848 spin_unlock_bh(&tx_sa
->lock
);
1850 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1851 tx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1853 if (assoc_num
== tx_sc
->encoding_sa
&& tx_sa
->active
)
1854 secy
->operational
= true;
1856 rcu_assign_pointer(tx_sc
->sa
[assoc_num
], tx_sa
);
1863 static int macsec_del_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1865 struct nlattr
**attrs
= info
->attrs
;
1866 struct net_device
*dev
;
1867 struct macsec_secy
*secy
;
1868 struct macsec_rx_sc
*rx_sc
;
1869 struct macsec_rx_sa
*rx_sa
;
1871 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1872 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1874 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1877 if (parse_sa_config(attrs
, tb_sa
))
1880 if (parse_rxsc_config(attrs
, tb_rxsc
))
1884 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
1885 &dev
, &secy
, &rx_sc
, &assoc_num
);
1886 if (IS_ERR(rx_sa
)) {
1888 return PTR_ERR(rx_sa
);
1891 if (rx_sa
->active
) {
1896 RCU_INIT_POINTER(rx_sc
->sa
[assoc_num
], NULL
);
1904 static int macsec_del_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1906 struct nlattr
**attrs
= info
->attrs
;
1907 struct net_device
*dev
;
1908 struct macsec_secy
*secy
;
1909 struct macsec_rx_sc
*rx_sc
;
1911 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1913 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1916 if (parse_rxsc_config(attrs
, tb_rxsc
))
1919 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1923 dev
= get_dev_from_nl(genl_info_net(info
), info
->attrs
);
1926 return PTR_ERR(dev
);
1929 secy
= &macsec_priv(dev
)->secy
;
1930 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1932 rx_sc
= del_rx_sc(secy
, sci
);
1944 static int macsec_del_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1946 struct nlattr
**attrs
= info
->attrs
;
1947 struct net_device
*dev
;
1948 struct macsec_secy
*secy
;
1949 struct macsec_tx_sc
*tx_sc
;
1950 struct macsec_tx_sa
*tx_sa
;
1952 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1954 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1957 if (parse_sa_config(attrs
, tb_sa
))
1961 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
1962 &dev
, &secy
, &tx_sc
, &assoc_num
);
1963 if (IS_ERR(tx_sa
)) {
1965 return PTR_ERR(tx_sa
);
1968 if (tx_sa
->active
) {
1973 RCU_INIT_POINTER(tx_sc
->sa
[assoc_num
], NULL
);
1981 static bool validate_upd_sa(struct nlattr
**attrs
)
1983 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1984 attrs
[MACSEC_SA_ATTR_KEY
] ||
1985 attrs
[MACSEC_SA_ATTR_KEYID
])
1988 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1991 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1994 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1995 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
2002 static int macsec_upd_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2004 struct nlattr
**attrs
= info
->attrs
;
2005 struct net_device
*dev
;
2006 struct macsec_secy
*secy
;
2007 struct macsec_tx_sc
*tx_sc
;
2008 struct macsec_tx_sa
*tx_sa
;
2010 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2012 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2015 if (parse_sa_config(attrs
, tb_sa
))
2018 if (!validate_upd_sa(tb_sa
))
2022 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2023 &dev
, &secy
, &tx_sc
, &assoc_num
);
2024 if (IS_ERR(tx_sa
)) {
2026 return PTR_ERR(tx_sa
);
2029 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2030 spin_lock_bh(&tx_sa
->lock
);
2031 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2032 spin_unlock_bh(&tx_sa
->lock
);
2035 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2036 tx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2038 if (assoc_num
== tx_sc
->encoding_sa
)
2039 secy
->operational
= tx_sa
->active
;
2046 static int macsec_upd_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
2048 struct nlattr
**attrs
= info
->attrs
;
2049 struct net_device
*dev
;
2050 struct macsec_secy
*secy
;
2051 struct macsec_rx_sc
*rx_sc
;
2052 struct macsec_rx_sa
*rx_sa
;
2054 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2055 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2057 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2060 if (parse_rxsc_config(attrs
, tb_rxsc
))
2063 if (parse_sa_config(attrs
, tb_sa
))
2066 if (!validate_upd_sa(tb_sa
))
2070 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
2071 &dev
, &secy
, &rx_sc
, &assoc_num
);
2072 if (IS_ERR(rx_sa
)) {
2074 return PTR_ERR(rx_sa
);
2077 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2078 spin_lock_bh(&rx_sa
->lock
);
2079 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2080 spin_unlock_bh(&rx_sa
->lock
);
2083 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2084 rx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2090 static int macsec_upd_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
2092 struct nlattr
**attrs
= info
->attrs
;
2093 struct net_device
*dev
;
2094 struct macsec_secy
*secy
;
2095 struct macsec_rx_sc
*rx_sc
;
2096 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2098 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2101 if (parse_rxsc_config(attrs
, tb_rxsc
))
2104 if (!validate_add_rxsc(tb_rxsc
))
2108 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
2109 if (IS_ERR(rx_sc
)) {
2111 return PTR_ERR(rx_sc
);
2114 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]) {
2115 bool new = !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
2117 if (rx_sc
->active
!= new)
2118 secy
->n_rx_sc
+= new ? 1 : -1;
2120 rx_sc
->active
= new;
2128 static int copy_tx_sa_stats(struct sk_buff
*skb
,
2129 struct macsec_tx_sa_stats __percpu
*pstats
)
2131 struct macsec_tx_sa_stats sum
= {0, };
2134 for_each_possible_cpu(cpu
) {
2135 const struct macsec_tx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2137 sum
.OutPktsProtected
+= stats
->OutPktsProtected
;
2138 sum
.OutPktsEncrypted
+= stats
->OutPktsEncrypted
;
2141 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED
, sum
.OutPktsProtected
) ||
2142 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED
, sum
.OutPktsEncrypted
))
2148 static int copy_rx_sa_stats(struct sk_buff
*skb
,
2149 struct macsec_rx_sa_stats __percpu
*pstats
)
2151 struct macsec_rx_sa_stats sum
= {0, };
2154 for_each_possible_cpu(cpu
) {
2155 const struct macsec_rx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2157 sum
.InPktsOK
+= stats
->InPktsOK
;
2158 sum
.InPktsInvalid
+= stats
->InPktsInvalid
;
2159 sum
.InPktsNotValid
+= stats
->InPktsNotValid
;
2160 sum
.InPktsNotUsingSA
+= stats
->InPktsNotUsingSA
;
2161 sum
.InPktsUnusedSA
+= stats
->InPktsUnusedSA
;
2164 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_OK
, sum
.InPktsOK
) ||
2165 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID
, sum
.InPktsInvalid
) ||
2166 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID
, sum
.InPktsNotValid
) ||
2167 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA
, sum
.InPktsNotUsingSA
) ||
2168 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA
, sum
.InPktsUnusedSA
))
2174 static int copy_rx_sc_stats(struct sk_buff
*skb
,
2175 struct pcpu_rx_sc_stats __percpu
*pstats
)
2177 struct macsec_rx_sc_stats sum
= {0, };
2180 for_each_possible_cpu(cpu
) {
2181 const struct pcpu_rx_sc_stats
*stats
;
2182 struct macsec_rx_sc_stats tmp
;
2185 stats
= per_cpu_ptr(pstats
, cpu
);
2187 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2188 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2189 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2191 sum
.InOctetsValidated
+= tmp
.InOctetsValidated
;
2192 sum
.InOctetsDecrypted
+= tmp
.InOctetsDecrypted
;
2193 sum
.InPktsUnchecked
+= tmp
.InPktsUnchecked
;
2194 sum
.InPktsDelayed
+= tmp
.InPktsDelayed
;
2195 sum
.InPktsOK
+= tmp
.InPktsOK
;
2196 sum
.InPktsInvalid
+= tmp
.InPktsInvalid
;
2197 sum
.InPktsLate
+= tmp
.InPktsLate
;
2198 sum
.InPktsNotValid
+= tmp
.InPktsNotValid
;
2199 sum
.InPktsNotUsingSA
+= tmp
.InPktsNotUsingSA
;
2200 sum
.InPktsUnusedSA
+= tmp
.InPktsUnusedSA
;
2203 if (nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED
,
2204 sum
.InOctetsValidated
,
2205 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2206 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED
,
2207 sum
.InOctetsDecrypted
,
2208 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2209 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED
,
2210 sum
.InPktsUnchecked
,
2211 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2212 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED
,
2214 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2215 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK
,
2217 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2218 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID
,
2220 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2221 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE
,
2223 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2224 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID
,
2226 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2227 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA
,
2228 sum
.InPktsNotUsingSA
,
2229 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2230 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA
,
2232 MACSEC_RXSC_STATS_ATTR_PAD
))
2238 static int copy_tx_sc_stats(struct sk_buff
*skb
,
2239 struct pcpu_tx_sc_stats __percpu
*pstats
)
2241 struct macsec_tx_sc_stats sum
= {0, };
2244 for_each_possible_cpu(cpu
) {
2245 const struct pcpu_tx_sc_stats
*stats
;
2246 struct macsec_tx_sc_stats tmp
;
2249 stats
= per_cpu_ptr(pstats
, cpu
);
2251 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2252 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2253 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2255 sum
.OutPktsProtected
+= tmp
.OutPktsProtected
;
2256 sum
.OutPktsEncrypted
+= tmp
.OutPktsEncrypted
;
2257 sum
.OutOctetsProtected
+= tmp
.OutOctetsProtected
;
2258 sum
.OutOctetsEncrypted
+= tmp
.OutOctetsEncrypted
;
2261 if (nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED
,
2262 sum
.OutPktsProtected
,
2263 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2264 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED
,
2265 sum
.OutPktsEncrypted
,
2266 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2267 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED
,
2268 sum
.OutOctetsProtected
,
2269 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2270 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED
,
2271 sum
.OutOctetsEncrypted
,
2272 MACSEC_TXSC_STATS_ATTR_PAD
))
2278 static int copy_secy_stats(struct sk_buff
*skb
,
2279 struct pcpu_secy_stats __percpu
*pstats
)
2281 struct macsec_dev_stats sum
= {0, };
2284 for_each_possible_cpu(cpu
) {
2285 const struct pcpu_secy_stats
*stats
;
2286 struct macsec_dev_stats tmp
;
2289 stats
= per_cpu_ptr(pstats
, cpu
);
2291 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2292 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2293 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2295 sum
.OutPktsUntagged
+= tmp
.OutPktsUntagged
;
2296 sum
.InPktsUntagged
+= tmp
.InPktsUntagged
;
2297 sum
.OutPktsTooLong
+= tmp
.OutPktsTooLong
;
2298 sum
.InPktsNoTag
+= tmp
.InPktsNoTag
;
2299 sum
.InPktsBadTag
+= tmp
.InPktsBadTag
;
2300 sum
.InPktsUnknownSCI
+= tmp
.InPktsUnknownSCI
;
2301 sum
.InPktsNoSCI
+= tmp
.InPktsNoSCI
;
2302 sum
.InPktsOverrun
+= tmp
.InPktsOverrun
;
2305 if (nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED
,
2306 sum
.OutPktsUntagged
,
2307 MACSEC_SECY_STATS_ATTR_PAD
) ||
2308 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED
,
2310 MACSEC_SECY_STATS_ATTR_PAD
) ||
2311 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG
,
2313 MACSEC_SECY_STATS_ATTR_PAD
) ||
2314 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG
,
2316 MACSEC_SECY_STATS_ATTR_PAD
) ||
2317 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG
,
2319 MACSEC_SECY_STATS_ATTR_PAD
) ||
2320 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI
,
2321 sum
.InPktsUnknownSCI
,
2322 MACSEC_SECY_STATS_ATTR_PAD
) ||
2323 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI
,
2325 MACSEC_SECY_STATS_ATTR_PAD
) ||
2326 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN
,
2328 MACSEC_SECY_STATS_ATTR_PAD
))
2334 static int nla_put_secy(struct macsec_secy
*secy
, struct sk_buff
*skb
)
2336 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2337 struct nlattr
*secy_nest
= nla_nest_start(skb
, MACSEC_ATTR_SECY
);
2342 if (nla_put_sci(skb
, MACSEC_SECY_ATTR_SCI
, secy
->sci
,
2343 MACSEC_SECY_ATTR_PAD
) ||
2344 nla_put_u64_64bit(skb
, MACSEC_SECY_ATTR_CIPHER_SUITE
,
2345 MACSEC_DEFAULT_CIPHER_ID
,
2346 MACSEC_SECY_ATTR_PAD
) ||
2347 nla_put_u8(skb
, MACSEC_SECY_ATTR_ICV_LEN
, secy
->icv_len
) ||
2348 nla_put_u8(skb
, MACSEC_SECY_ATTR_OPER
, secy
->operational
) ||
2349 nla_put_u8(skb
, MACSEC_SECY_ATTR_PROTECT
, secy
->protect_frames
) ||
2350 nla_put_u8(skb
, MACSEC_SECY_ATTR_REPLAY
, secy
->replay_protect
) ||
2351 nla_put_u8(skb
, MACSEC_SECY_ATTR_VALIDATE
, secy
->validate_frames
) ||
2352 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCRYPT
, tx_sc
->encrypt
) ||
2353 nla_put_u8(skb
, MACSEC_SECY_ATTR_INC_SCI
, tx_sc
->send_sci
) ||
2354 nla_put_u8(skb
, MACSEC_SECY_ATTR_ES
, tx_sc
->end_station
) ||
2355 nla_put_u8(skb
, MACSEC_SECY_ATTR_SCB
, tx_sc
->scb
) ||
2356 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCODING_SA
, tx_sc
->encoding_sa
))
2359 if (secy
->replay_protect
) {
2360 if (nla_put_u32(skb
, MACSEC_SECY_ATTR_WINDOW
, secy
->replay_window
))
2364 nla_nest_end(skb
, secy_nest
);
2368 nla_nest_cancel(skb
, secy_nest
);
2372 static int dump_secy(struct macsec_secy
*secy
, struct net_device
*dev
,
2373 struct sk_buff
*skb
, struct netlink_callback
*cb
)
2375 struct macsec_rx_sc
*rx_sc
;
2376 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2377 struct nlattr
*txsa_list
, *rxsc_list
;
2380 struct nlattr
*attr
;
2382 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
2383 &macsec_fam
, NLM_F_MULTI
, MACSEC_CMD_GET_TXSC
);
2387 genl_dump_check_consistent(cb
, hdr
, &macsec_fam
);
2389 if (nla_put_u32(skb
, MACSEC_ATTR_IFINDEX
, dev
->ifindex
))
2390 goto nla_put_failure
;
2392 if (nla_put_secy(secy
, skb
))
2393 goto nla_put_failure
;
2395 attr
= nla_nest_start(skb
, MACSEC_ATTR_TXSC_STATS
);
2397 goto nla_put_failure
;
2398 if (copy_tx_sc_stats(skb
, tx_sc
->stats
)) {
2399 nla_nest_cancel(skb
, attr
);
2400 goto nla_put_failure
;
2402 nla_nest_end(skb
, attr
);
2404 attr
= nla_nest_start(skb
, MACSEC_ATTR_SECY_STATS
);
2406 goto nla_put_failure
;
2407 if (copy_secy_stats(skb
, macsec_priv(dev
)->stats
)) {
2408 nla_nest_cancel(skb
, attr
);
2409 goto nla_put_failure
;
2411 nla_nest_end(skb
, attr
);
2413 txsa_list
= nla_nest_start(skb
, MACSEC_ATTR_TXSA_LIST
);
2415 goto nla_put_failure
;
2416 for (i
= 0, j
= 1; i
< MACSEC_NUM_AN
; i
++) {
2417 struct macsec_tx_sa
*tx_sa
= rtnl_dereference(tx_sc
->sa
[i
]);
2418 struct nlattr
*txsa_nest
;
2423 txsa_nest
= nla_nest_start(skb
, j
++);
2425 nla_nest_cancel(skb
, txsa_list
);
2426 goto nla_put_failure
;
2429 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2430 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, tx_sa
->next_pn
) ||
2431 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, tx_sa
->key
.id
) ||
2432 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, tx_sa
->active
)) {
2433 nla_nest_cancel(skb
, txsa_nest
);
2434 nla_nest_cancel(skb
, txsa_list
);
2435 goto nla_put_failure
;
2438 attr
= nla_nest_start(skb
, MACSEC_SA_ATTR_STATS
);
2440 nla_nest_cancel(skb
, txsa_nest
);
2441 nla_nest_cancel(skb
, txsa_list
);
2442 goto nla_put_failure
;
2444 if (copy_tx_sa_stats(skb
, tx_sa
->stats
)) {
2445 nla_nest_cancel(skb
, attr
);
2446 nla_nest_cancel(skb
, txsa_nest
);
2447 nla_nest_cancel(skb
, txsa_list
);
2448 goto nla_put_failure
;
2450 nla_nest_end(skb
, attr
);
2452 nla_nest_end(skb
, txsa_nest
);
2454 nla_nest_end(skb
, txsa_list
);
2456 rxsc_list
= nla_nest_start(skb
, MACSEC_ATTR_RXSC_LIST
);
2458 goto nla_put_failure
;
2461 for_each_rxsc_rtnl(secy
, rx_sc
) {
2463 struct nlattr
*rxsa_list
;
2464 struct nlattr
*rxsc_nest
= nla_nest_start(skb
, j
++);
2467 nla_nest_cancel(skb
, rxsc_list
);
2468 goto nla_put_failure
;
2471 if (nla_put_u8(skb
, MACSEC_RXSC_ATTR_ACTIVE
, rx_sc
->active
) ||
2472 nla_put_sci(skb
, MACSEC_RXSC_ATTR_SCI
, rx_sc
->sci
,
2473 MACSEC_RXSC_ATTR_PAD
)) {
2474 nla_nest_cancel(skb
, rxsc_nest
);
2475 nla_nest_cancel(skb
, rxsc_list
);
2476 goto nla_put_failure
;
2479 attr
= nla_nest_start(skb
, MACSEC_RXSC_ATTR_STATS
);
2481 nla_nest_cancel(skb
, rxsc_nest
);
2482 nla_nest_cancel(skb
, rxsc_list
);
2483 goto nla_put_failure
;
2485 if (copy_rx_sc_stats(skb
, rx_sc
->stats
)) {
2486 nla_nest_cancel(skb
, attr
);
2487 nla_nest_cancel(skb
, rxsc_nest
);
2488 nla_nest_cancel(skb
, rxsc_list
);
2489 goto nla_put_failure
;
2491 nla_nest_end(skb
, attr
);
2493 rxsa_list
= nla_nest_start(skb
, MACSEC_RXSC_ATTR_SA_LIST
);
2495 nla_nest_cancel(skb
, rxsc_nest
);
2496 nla_nest_cancel(skb
, rxsc_list
);
2497 goto nla_put_failure
;
2500 for (i
= 0, k
= 1; i
< MACSEC_NUM_AN
; i
++) {
2501 struct macsec_rx_sa
*rx_sa
= rtnl_dereference(rx_sc
->sa
[i
]);
2502 struct nlattr
*rxsa_nest
;
2507 rxsa_nest
= nla_nest_start(skb
, k
++);
2509 nla_nest_cancel(skb
, rxsa_list
);
2510 nla_nest_cancel(skb
, rxsc_nest
);
2511 nla_nest_cancel(skb
, rxsc_list
);
2512 goto nla_put_failure
;
2515 attr
= nla_nest_start(skb
, MACSEC_SA_ATTR_STATS
);
2517 nla_nest_cancel(skb
, rxsa_list
);
2518 nla_nest_cancel(skb
, rxsc_nest
);
2519 nla_nest_cancel(skb
, rxsc_list
);
2520 goto nla_put_failure
;
2522 if (copy_rx_sa_stats(skb
, rx_sa
->stats
)) {
2523 nla_nest_cancel(skb
, attr
);
2524 nla_nest_cancel(skb
, rxsa_list
);
2525 nla_nest_cancel(skb
, rxsc_nest
);
2526 nla_nest_cancel(skb
, rxsc_list
);
2527 goto nla_put_failure
;
2529 nla_nest_end(skb
, attr
);
2531 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2532 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, rx_sa
->next_pn
) ||
2533 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, rx_sa
->key
.id
) ||
2534 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, rx_sa
->active
)) {
2535 nla_nest_cancel(skb
, rxsa_nest
);
2536 nla_nest_cancel(skb
, rxsc_nest
);
2537 nla_nest_cancel(skb
, rxsc_list
);
2538 goto nla_put_failure
;
2540 nla_nest_end(skb
, rxsa_nest
);
2543 nla_nest_end(skb
, rxsa_list
);
2544 nla_nest_end(skb
, rxsc_nest
);
2547 nla_nest_end(skb
, rxsc_list
);
2549 genlmsg_end(skb
, hdr
);
2554 genlmsg_cancel(skb
, hdr
);
2558 static int macsec_generation
= 1; /* protected by RTNL */
2560 static int macsec_dump_txsc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2562 struct net
*net
= sock_net(skb
->sk
);
2563 struct net_device
*dev
;
2566 dev_idx
= cb
->args
[0];
2571 cb
->seq
= macsec_generation
;
2573 for_each_netdev(net
, dev
) {
2574 struct macsec_secy
*secy
;
2579 if (!netif_is_macsec(dev
))
2582 secy
= &macsec_priv(dev
)->secy
;
2583 if (dump_secy(secy
, dev
, skb
, cb
) < 0)
2595 static const struct genl_ops macsec_genl_ops
[] = {
2597 .cmd
= MACSEC_CMD_GET_TXSC
,
2598 .dumpit
= macsec_dump_txsc
,
2599 .policy
= macsec_genl_policy
,
2602 .cmd
= MACSEC_CMD_ADD_RXSC
,
2603 .doit
= macsec_add_rxsc
,
2604 .policy
= macsec_genl_policy
,
2605 .flags
= GENL_ADMIN_PERM
,
2608 .cmd
= MACSEC_CMD_DEL_RXSC
,
2609 .doit
= macsec_del_rxsc
,
2610 .policy
= macsec_genl_policy
,
2611 .flags
= GENL_ADMIN_PERM
,
2614 .cmd
= MACSEC_CMD_UPD_RXSC
,
2615 .doit
= macsec_upd_rxsc
,
2616 .policy
= macsec_genl_policy
,
2617 .flags
= GENL_ADMIN_PERM
,
2620 .cmd
= MACSEC_CMD_ADD_TXSA
,
2621 .doit
= macsec_add_txsa
,
2622 .policy
= macsec_genl_policy
,
2623 .flags
= GENL_ADMIN_PERM
,
2626 .cmd
= MACSEC_CMD_DEL_TXSA
,
2627 .doit
= macsec_del_txsa
,
2628 .policy
= macsec_genl_policy
,
2629 .flags
= GENL_ADMIN_PERM
,
2632 .cmd
= MACSEC_CMD_UPD_TXSA
,
2633 .doit
= macsec_upd_txsa
,
2634 .policy
= macsec_genl_policy
,
2635 .flags
= GENL_ADMIN_PERM
,
2638 .cmd
= MACSEC_CMD_ADD_RXSA
,
2639 .doit
= macsec_add_rxsa
,
2640 .policy
= macsec_genl_policy
,
2641 .flags
= GENL_ADMIN_PERM
,
2644 .cmd
= MACSEC_CMD_DEL_RXSA
,
2645 .doit
= macsec_del_rxsa
,
2646 .policy
= macsec_genl_policy
,
2647 .flags
= GENL_ADMIN_PERM
,
2650 .cmd
= MACSEC_CMD_UPD_RXSA
,
2651 .doit
= macsec_upd_rxsa
,
2652 .policy
= macsec_genl_policy
,
2653 .flags
= GENL_ADMIN_PERM
,
2657 static netdev_tx_t
macsec_start_xmit(struct sk_buff
*skb
,
2658 struct net_device
*dev
)
2660 struct macsec_dev
*macsec
= netdev_priv(dev
);
2661 struct macsec_secy
*secy
= &macsec
->secy
;
2662 struct pcpu_secy_stats
*secy_stats
;
2666 if (!secy
->protect_frames
) {
2667 secy_stats
= this_cpu_ptr(macsec
->stats
);
2668 u64_stats_update_begin(&secy_stats
->syncp
);
2669 secy_stats
->stats
.OutPktsUntagged
++;
2670 u64_stats_update_end(&secy_stats
->syncp
);
2671 skb
->dev
= macsec
->real_dev
;
2673 ret
= dev_queue_xmit(skb
);
2674 count_tx(dev
, ret
, len
);
2678 if (!secy
->operational
) {
2680 dev
->stats
.tx_dropped
++;
2681 return NETDEV_TX_OK
;
2684 skb
= macsec_encrypt(skb
, dev
);
2686 if (PTR_ERR(skb
) != -EINPROGRESS
)
2687 dev
->stats
.tx_dropped
++;
2688 return NETDEV_TX_OK
;
2691 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
2693 macsec_encrypt_finish(skb
, dev
);
2695 ret
= dev_queue_xmit(skb
);
2696 count_tx(dev
, ret
, len
);
2700 #define MACSEC_FEATURES \
2701 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2702 static int macsec_dev_init(struct net_device
*dev
)
2704 struct macsec_dev
*macsec
= macsec_priv(dev
);
2705 struct net_device
*real_dev
= macsec
->real_dev
;
2708 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
2712 err
= gro_cells_init(&macsec
->gro_cells
, dev
);
2714 free_percpu(dev
->tstats
);
2718 dev
->features
= real_dev
->features
& MACSEC_FEATURES
;
2719 dev
->features
|= NETIF_F_LLTX
| NETIF_F_GSO_SOFTWARE
;
2721 dev
->needed_headroom
= real_dev
->needed_headroom
+
2722 MACSEC_NEEDED_HEADROOM
;
2723 dev
->needed_tailroom
= real_dev
->needed_tailroom
+
2724 MACSEC_NEEDED_TAILROOM
;
2726 if (is_zero_ether_addr(dev
->dev_addr
))
2727 eth_hw_addr_inherit(dev
, real_dev
);
2728 if (is_zero_ether_addr(dev
->broadcast
))
2729 memcpy(dev
->broadcast
, real_dev
->broadcast
, dev
->addr_len
);
2734 static void macsec_dev_uninit(struct net_device
*dev
)
2736 struct macsec_dev
*macsec
= macsec_priv(dev
);
2738 gro_cells_destroy(&macsec
->gro_cells
);
2739 free_percpu(dev
->tstats
);
2742 static netdev_features_t
macsec_fix_features(struct net_device
*dev
,
2743 netdev_features_t features
)
2745 struct macsec_dev
*macsec
= macsec_priv(dev
);
2746 struct net_device
*real_dev
= macsec
->real_dev
;
2748 features
&= (real_dev
->features
& MACSEC_FEATURES
) |
2749 NETIF_F_GSO_SOFTWARE
| NETIF_F_SOFT_FEATURES
;
2750 features
|= NETIF_F_LLTX
;
2755 static int macsec_dev_open(struct net_device
*dev
)
2757 struct macsec_dev
*macsec
= macsec_priv(dev
);
2758 struct net_device
*real_dev
= macsec
->real_dev
;
2761 if (!(real_dev
->flags
& IFF_UP
))
2764 err
= dev_uc_add(real_dev
, dev
->dev_addr
);
2768 if (dev
->flags
& IFF_ALLMULTI
) {
2769 err
= dev_set_allmulti(real_dev
, 1);
2774 if (dev
->flags
& IFF_PROMISC
) {
2775 err
= dev_set_promiscuity(real_dev
, 1);
2777 goto clear_allmulti
;
2780 if (netif_carrier_ok(real_dev
))
2781 netif_carrier_on(dev
);
2785 if (dev
->flags
& IFF_ALLMULTI
)
2786 dev_set_allmulti(real_dev
, -1);
2788 dev_uc_del(real_dev
, dev
->dev_addr
);
2789 netif_carrier_off(dev
);
2793 static int macsec_dev_stop(struct net_device
*dev
)
2795 struct macsec_dev
*macsec
= macsec_priv(dev
);
2796 struct net_device
*real_dev
= macsec
->real_dev
;
2798 netif_carrier_off(dev
);
2800 dev_mc_unsync(real_dev
, dev
);
2801 dev_uc_unsync(real_dev
, dev
);
2803 if (dev
->flags
& IFF_ALLMULTI
)
2804 dev_set_allmulti(real_dev
, -1);
2806 if (dev
->flags
& IFF_PROMISC
)
2807 dev_set_promiscuity(real_dev
, -1);
2809 dev_uc_del(real_dev
, dev
->dev_addr
);
2814 static void macsec_dev_change_rx_flags(struct net_device
*dev
, int change
)
2816 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2818 if (!(dev
->flags
& IFF_UP
))
2821 if (change
& IFF_ALLMULTI
)
2822 dev_set_allmulti(real_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
2824 if (change
& IFF_PROMISC
)
2825 dev_set_promiscuity(real_dev
,
2826 dev
->flags
& IFF_PROMISC
? 1 : -1);
2829 static void macsec_dev_set_rx_mode(struct net_device
*dev
)
2831 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2833 dev_mc_sync(real_dev
, dev
);
2834 dev_uc_sync(real_dev
, dev
);
2837 static int macsec_set_mac_address(struct net_device
*dev
, void *p
)
2839 struct macsec_dev
*macsec
= macsec_priv(dev
);
2840 struct net_device
*real_dev
= macsec
->real_dev
;
2841 struct sockaddr
*addr
= p
;
2844 if (!is_valid_ether_addr(addr
->sa_data
))
2845 return -EADDRNOTAVAIL
;
2847 if (!(dev
->flags
& IFF_UP
))
2850 err
= dev_uc_add(real_dev
, addr
->sa_data
);
2854 dev_uc_del(real_dev
, dev
->dev_addr
);
2857 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
2861 static int macsec_change_mtu(struct net_device
*dev
, int new_mtu
)
2863 struct macsec_dev
*macsec
= macsec_priv(dev
);
2864 unsigned int extra
= macsec
->secy
.icv_len
+ macsec_extra_len(true);
2866 if (macsec
->real_dev
->mtu
- extra
< new_mtu
)
2874 static struct rtnl_link_stats64
*macsec_get_stats64(struct net_device
*dev
,
2875 struct rtnl_link_stats64
*s
)
2882 for_each_possible_cpu(cpu
) {
2883 struct pcpu_sw_netstats
*stats
;
2884 struct pcpu_sw_netstats tmp
;
2887 stats
= per_cpu_ptr(dev
->tstats
, cpu
);
2889 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2890 tmp
.rx_packets
= stats
->rx_packets
;
2891 tmp
.rx_bytes
= stats
->rx_bytes
;
2892 tmp
.tx_packets
= stats
->tx_packets
;
2893 tmp
.tx_bytes
= stats
->tx_bytes
;
2894 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2896 s
->rx_packets
+= tmp
.rx_packets
;
2897 s
->rx_bytes
+= tmp
.rx_bytes
;
2898 s
->tx_packets
+= tmp
.tx_packets
;
2899 s
->tx_bytes
+= tmp
.tx_bytes
;
2902 s
->rx_dropped
= dev
->stats
.rx_dropped
;
2903 s
->tx_dropped
= dev
->stats
.tx_dropped
;
2908 static int macsec_get_iflink(const struct net_device
*dev
)
2910 return macsec_priv(dev
)->real_dev
->ifindex
;
2913 static const struct net_device_ops macsec_netdev_ops
= {
2914 .ndo_init
= macsec_dev_init
,
2915 .ndo_uninit
= macsec_dev_uninit
,
2916 .ndo_open
= macsec_dev_open
,
2917 .ndo_stop
= macsec_dev_stop
,
2918 .ndo_fix_features
= macsec_fix_features
,
2919 .ndo_change_mtu
= macsec_change_mtu
,
2920 .ndo_set_rx_mode
= macsec_dev_set_rx_mode
,
2921 .ndo_change_rx_flags
= macsec_dev_change_rx_flags
,
2922 .ndo_set_mac_address
= macsec_set_mac_address
,
2923 .ndo_start_xmit
= macsec_start_xmit
,
2924 .ndo_get_stats64
= macsec_get_stats64
,
2925 .ndo_get_iflink
= macsec_get_iflink
,
2928 static const struct device_type macsec_type
= {
2932 static const struct nla_policy macsec_rtnl_policy
[IFLA_MACSEC_MAX
+ 1] = {
2933 [IFLA_MACSEC_SCI
] = { .type
= NLA_U64
},
2934 [IFLA_MACSEC_ICV_LEN
] = { .type
= NLA_U8
},
2935 [IFLA_MACSEC_CIPHER_SUITE
] = { .type
= NLA_U64
},
2936 [IFLA_MACSEC_WINDOW
] = { .type
= NLA_U32
},
2937 [IFLA_MACSEC_ENCODING_SA
] = { .type
= NLA_U8
},
2938 [IFLA_MACSEC_ENCRYPT
] = { .type
= NLA_U8
},
2939 [IFLA_MACSEC_PROTECT
] = { .type
= NLA_U8
},
2940 [IFLA_MACSEC_INC_SCI
] = { .type
= NLA_U8
},
2941 [IFLA_MACSEC_ES
] = { .type
= NLA_U8
},
2942 [IFLA_MACSEC_SCB
] = { .type
= NLA_U8
},
2943 [IFLA_MACSEC_REPLAY_PROTECT
] = { .type
= NLA_U8
},
2944 [IFLA_MACSEC_VALIDATION
] = { .type
= NLA_U8
},
2947 static void macsec_free_netdev(struct net_device
*dev
)
2949 struct macsec_dev
*macsec
= macsec_priv(dev
);
2950 struct net_device
*real_dev
= macsec
->real_dev
;
2952 free_percpu(macsec
->stats
);
2953 free_percpu(macsec
->secy
.tx_sc
.stats
);
2959 static void macsec_setup(struct net_device
*dev
)
2962 dev
->priv_flags
|= IFF_NO_QUEUE
;
2963 dev
->netdev_ops
= &macsec_netdev_ops
;
2964 dev
->destructor
= macsec_free_netdev
;
2966 eth_zero_addr(dev
->broadcast
);
2969 static void macsec_changelink_common(struct net_device
*dev
,
2970 struct nlattr
*data
[])
2972 struct macsec_secy
*secy
;
2973 struct macsec_tx_sc
*tx_sc
;
2975 secy
= &macsec_priv(dev
)->secy
;
2976 tx_sc
= &secy
->tx_sc
;
2978 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
2979 struct macsec_tx_sa
*tx_sa
;
2981 tx_sc
->encoding_sa
= nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]);
2982 tx_sa
= rtnl_dereference(tx_sc
->sa
[tx_sc
->encoding_sa
]);
2984 secy
->operational
= tx_sa
&& tx_sa
->active
;
2987 if (data
[IFLA_MACSEC_WINDOW
])
2988 secy
->replay_window
= nla_get_u32(data
[IFLA_MACSEC_WINDOW
]);
2990 if (data
[IFLA_MACSEC_ENCRYPT
])
2991 tx_sc
->encrypt
= !!nla_get_u8(data
[IFLA_MACSEC_ENCRYPT
]);
2993 if (data
[IFLA_MACSEC_PROTECT
])
2994 secy
->protect_frames
= !!nla_get_u8(data
[IFLA_MACSEC_PROTECT
]);
2996 if (data
[IFLA_MACSEC_INC_SCI
])
2997 tx_sc
->send_sci
= !!nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]);
2999 if (data
[IFLA_MACSEC_ES
])
3000 tx_sc
->end_station
= !!nla_get_u8(data
[IFLA_MACSEC_ES
]);
3002 if (data
[IFLA_MACSEC_SCB
])
3003 tx_sc
->scb
= !!nla_get_u8(data
[IFLA_MACSEC_SCB
]);
3005 if (data
[IFLA_MACSEC_REPLAY_PROTECT
])
3006 secy
->replay_protect
= !!nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
]);
3008 if (data
[IFLA_MACSEC_VALIDATION
])
3009 secy
->validate_frames
= nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]);
3012 static int macsec_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
3013 struct nlattr
*data
[])
3018 if (data
[IFLA_MACSEC_CIPHER_SUITE
] ||
3019 data
[IFLA_MACSEC_ICV_LEN
] ||
3020 data
[IFLA_MACSEC_SCI
] ||
3021 data
[IFLA_MACSEC_PORT
])
3024 macsec_changelink_common(dev
, data
);
3029 static void macsec_del_dev(struct macsec_dev
*macsec
)
3033 while (macsec
->secy
.rx_sc
) {
3034 struct macsec_rx_sc
*rx_sc
= rtnl_dereference(macsec
->secy
.rx_sc
);
3036 rcu_assign_pointer(macsec
->secy
.rx_sc
, rx_sc
->next
);
3040 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
3041 struct macsec_tx_sa
*sa
= rtnl_dereference(macsec
->secy
.tx_sc
.sa
[i
]);
3044 RCU_INIT_POINTER(macsec
->secy
.tx_sc
.sa
[i
], NULL
);
3050 static void macsec_common_dellink(struct net_device
*dev
, struct list_head
*head
)
3052 struct macsec_dev
*macsec
= macsec_priv(dev
);
3054 unregister_netdevice_queue(dev
, head
);
3055 list_del_rcu(&macsec
->secys
);
3056 macsec_del_dev(macsec
);
3058 macsec_generation
++;
3061 static void macsec_dellink(struct net_device
*dev
, struct list_head
*head
)
3063 struct macsec_dev
*macsec
= macsec_priv(dev
);
3064 struct net_device
*real_dev
= macsec
->real_dev
;
3065 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3067 macsec_common_dellink(dev
, head
);
3069 if (list_empty(&rxd
->secys
)) {
3070 netdev_rx_handler_unregister(real_dev
);
3075 static int register_macsec_dev(struct net_device
*real_dev
,
3076 struct net_device
*dev
)
3078 struct macsec_dev
*macsec
= macsec_priv(dev
);
3079 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3084 rxd
= kmalloc(sizeof(*rxd
), GFP_KERNEL
);
3088 INIT_LIST_HEAD(&rxd
->secys
);
3090 err
= netdev_rx_handler_register(real_dev
, macsec_handle_frame
,
3098 list_add_tail_rcu(&macsec
->secys
, &rxd
->secys
);
3102 static bool sci_exists(struct net_device
*dev
, sci_t sci
)
3104 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(dev
);
3105 struct macsec_dev
*macsec
;
3107 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
3108 if (macsec
->secy
.sci
== sci
)
3115 static sci_t
dev_to_sci(struct net_device
*dev
, __be16 port
)
3117 return make_sci(dev
->dev_addr
, port
);
3120 static int macsec_add_dev(struct net_device
*dev
, sci_t sci
, u8 icv_len
)
3122 struct macsec_dev
*macsec
= macsec_priv(dev
);
3123 struct macsec_secy
*secy
= &macsec
->secy
;
3125 macsec
->stats
= netdev_alloc_pcpu_stats(struct pcpu_secy_stats
);
3129 secy
->tx_sc
.stats
= netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats
);
3130 if (!secy
->tx_sc
.stats
) {
3131 free_percpu(macsec
->stats
);
3135 if (sci
== MACSEC_UNDEF_SCI
)
3136 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3139 secy
->operational
= true;
3140 secy
->key_len
= DEFAULT_SAK_LEN
;
3141 secy
->icv_len
= icv_len
;
3142 secy
->validate_frames
= MACSEC_VALIDATE_DEFAULT
;
3143 secy
->protect_frames
= true;
3144 secy
->replay_protect
= false;
3147 secy
->tx_sc
.active
= true;
3148 secy
->tx_sc
.encoding_sa
= DEFAULT_ENCODING_SA
;
3149 secy
->tx_sc
.encrypt
= DEFAULT_ENCRYPT
;
3150 secy
->tx_sc
.send_sci
= DEFAULT_SEND_SCI
;
3151 secy
->tx_sc
.end_station
= false;
3152 secy
->tx_sc
.scb
= false;
3157 static int macsec_newlink(struct net
*net
, struct net_device
*dev
,
3158 struct nlattr
*tb
[], struct nlattr
*data
[])
3160 struct macsec_dev
*macsec
= macsec_priv(dev
);
3161 struct net_device
*real_dev
;
3164 u8 icv_len
= DEFAULT_ICV_LEN
;
3165 rx_handler_func_t
*rx_handler
;
3169 real_dev
= __dev_get_by_index(net
, nla_get_u32(tb
[IFLA_LINK
]));
3173 dev
->priv_flags
|= IFF_MACSEC
;
3175 macsec
->real_dev
= real_dev
;
3177 if (data
&& data
[IFLA_MACSEC_ICV_LEN
])
3178 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3179 dev
->mtu
= real_dev
->mtu
- icv_len
- macsec_extra_len(true);
3181 rx_handler
= rtnl_dereference(real_dev
->rx_handler
);
3182 if (rx_handler
&& rx_handler
!= macsec_handle_frame
)
3185 err
= register_netdevice(dev
);
3191 /* need to be already registered so that ->init has run and
3192 * the MAC addr is set
3194 if (data
&& data
[IFLA_MACSEC_SCI
])
3195 sci
= nla_get_sci(data
[IFLA_MACSEC_SCI
]);
3196 else if (data
&& data
[IFLA_MACSEC_PORT
])
3197 sci
= dev_to_sci(dev
, nla_get_be16(data
[IFLA_MACSEC_PORT
]));
3199 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3201 if (rx_handler
&& sci_exists(real_dev
, sci
)) {
3206 err
= macsec_add_dev(dev
, sci
, icv_len
);
3211 macsec_changelink_common(dev
, data
);
3213 err
= register_macsec_dev(real_dev
, dev
);
3217 macsec_generation
++;
3222 macsec_del_dev(macsec
);
3224 unregister_netdevice(dev
);
3228 static int macsec_validate_attr(struct nlattr
*tb
[], struct nlattr
*data
[])
3230 u64 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3231 u8 icv_len
= DEFAULT_ICV_LEN
;
3238 if (data
[IFLA_MACSEC_CIPHER_SUITE
])
3239 csid
= nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
]);
3241 if (data
[IFLA_MACSEC_ICV_LEN
]) {
3242 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3243 if (icv_len
!= DEFAULT_ICV_LEN
) {
3244 char dummy_key
[DEFAULT_SAK_LEN
] = { 0 };
3245 struct crypto_aead
*dummy_tfm
;
3247 dummy_tfm
= macsec_alloc_tfm(dummy_key
,
3250 if (IS_ERR(dummy_tfm
))
3251 return PTR_ERR(dummy_tfm
);
3252 crypto_free_aead(dummy_tfm
);
3257 case MACSEC_DEFAULT_CIPHER_ID
:
3258 case MACSEC_DEFAULT_CIPHER_ALT
:
3259 if (icv_len
< MACSEC_MIN_ICV_LEN
||
3260 icv_len
> MACSEC_STD_ICV_LEN
)
3267 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3268 if (nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]) >= MACSEC_NUM_AN
)
3272 for (flag
= IFLA_MACSEC_ENCODING_SA
+ 1;
3273 flag
< IFLA_MACSEC_VALIDATION
;
3276 if (nla_get_u8(data
[flag
]) > 1)
3281 es
= data
[IFLA_MACSEC_ES
] ? nla_get_u8(data
[IFLA_MACSEC_ES
]) : false;
3282 sci
= data
[IFLA_MACSEC_INC_SCI
] ? nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]) : false;
3283 scb
= data
[IFLA_MACSEC_SCB
] ? nla_get_u8(data
[IFLA_MACSEC_SCB
]) : false;
3285 if ((sci
&& (scb
|| es
)) || (scb
&& es
))
3288 if (data
[IFLA_MACSEC_VALIDATION
] &&
3289 nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]) > MACSEC_VALIDATE_MAX
)
3292 if ((data
[IFLA_MACSEC_REPLAY_PROTECT
] &&
3293 nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
])) &&
3294 !data
[IFLA_MACSEC_WINDOW
])
3300 static struct net
*macsec_get_link_net(const struct net_device
*dev
)
3302 return dev_net(macsec_priv(dev
)->real_dev
);
3305 static size_t macsec_get_size(const struct net_device
*dev
)
3308 nla_total_size_64bit(8) + /* SCI */
3309 nla_total_size(1) + /* ICV_LEN */
3310 nla_total_size_64bit(8) + /* CIPHER_SUITE */
3311 nla_total_size(4) + /* WINDOW */
3312 nla_total_size(1) + /* ENCODING_SA */
3313 nla_total_size(1) + /* ENCRYPT */
3314 nla_total_size(1) + /* PROTECT */
3315 nla_total_size(1) + /* INC_SCI */
3316 nla_total_size(1) + /* ES */
3317 nla_total_size(1) + /* SCB */
3318 nla_total_size(1) + /* REPLAY_PROTECT */
3319 nla_total_size(1) + /* VALIDATION */
3323 static int macsec_fill_info(struct sk_buff
*skb
,
3324 const struct net_device
*dev
)
3326 struct macsec_secy
*secy
= &macsec_priv(dev
)->secy
;
3327 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
3329 if (nla_put_sci(skb
, IFLA_MACSEC_SCI
, secy
->sci
,
3331 nla_put_u8(skb
, IFLA_MACSEC_ICV_LEN
, secy
->icv_len
) ||
3332 nla_put_u64_64bit(skb
, IFLA_MACSEC_CIPHER_SUITE
,
3333 MACSEC_DEFAULT_CIPHER_ID
, IFLA_MACSEC_PAD
) ||
3334 nla_put_u8(skb
, IFLA_MACSEC_ENCODING_SA
, tx_sc
->encoding_sa
) ||
3335 nla_put_u8(skb
, IFLA_MACSEC_ENCRYPT
, tx_sc
->encrypt
) ||
3336 nla_put_u8(skb
, IFLA_MACSEC_PROTECT
, secy
->protect_frames
) ||
3337 nla_put_u8(skb
, IFLA_MACSEC_INC_SCI
, tx_sc
->send_sci
) ||
3338 nla_put_u8(skb
, IFLA_MACSEC_ES
, tx_sc
->end_station
) ||
3339 nla_put_u8(skb
, IFLA_MACSEC_SCB
, tx_sc
->scb
) ||
3340 nla_put_u8(skb
, IFLA_MACSEC_REPLAY_PROTECT
, secy
->replay_protect
) ||
3341 nla_put_u8(skb
, IFLA_MACSEC_VALIDATION
, secy
->validate_frames
) ||
3343 goto nla_put_failure
;
3345 if (secy
->replay_protect
) {
3346 if (nla_put_u32(skb
, IFLA_MACSEC_WINDOW
, secy
->replay_window
))
3347 goto nla_put_failure
;
3356 static struct rtnl_link_ops macsec_link_ops __read_mostly
= {
3358 .priv_size
= sizeof(struct macsec_dev
),
3359 .maxtype
= IFLA_MACSEC_MAX
,
3360 .policy
= macsec_rtnl_policy
,
3361 .setup
= macsec_setup
,
3362 .validate
= macsec_validate_attr
,
3363 .newlink
= macsec_newlink
,
3364 .changelink
= macsec_changelink
,
3365 .dellink
= macsec_dellink
,
3366 .get_size
= macsec_get_size
,
3367 .fill_info
= macsec_fill_info
,
3368 .get_link_net
= macsec_get_link_net
,
3371 static bool is_macsec_master(struct net_device
*dev
)
3373 return rcu_access_pointer(dev
->rx_handler
) == macsec_handle_frame
;
3376 static int macsec_notify(struct notifier_block
*this, unsigned long event
,
3379 struct net_device
*real_dev
= netdev_notifier_info_to_dev(ptr
);
3382 if (!is_macsec_master(real_dev
))
3386 case NETDEV_UNREGISTER
: {
3387 struct macsec_dev
*m
, *n
;
3388 struct macsec_rxh_data
*rxd
;
3390 rxd
= macsec_data_rtnl(real_dev
);
3391 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
3392 macsec_common_dellink(m
->secy
.netdev
, &head
);
3395 netdev_rx_handler_unregister(real_dev
);
3398 unregister_netdevice_many(&head
);
3401 case NETDEV_CHANGEMTU
: {
3402 struct macsec_dev
*m
;
3403 struct macsec_rxh_data
*rxd
;
3405 rxd
= macsec_data_rtnl(real_dev
);
3406 list_for_each_entry(m
, &rxd
->secys
, secys
) {
3407 struct net_device
*dev
= m
->secy
.netdev
;
3408 unsigned int mtu
= real_dev
->mtu
- (m
->secy
.icv_len
+
3409 macsec_extra_len(true));
3412 dev_set_mtu(dev
, mtu
);
3420 static struct notifier_block macsec_notifier
= {
3421 .notifier_call
= macsec_notify
,
3424 static int __init
macsec_init(void)
3428 pr_info("MACsec IEEE 802.1AE\n");
3429 err
= register_netdevice_notifier(&macsec_notifier
);
3433 err
= rtnl_link_register(&macsec_link_ops
);
3437 err
= genl_register_family_with_ops(&macsec_fam
, macsec_genl_ops
);
3444 rtnl_link_unregister(&macsec_link_ops
);
3446 unregister_netdevice_notifier(&macsec_notifier
);
3450 static void __exit
macsec_exit(void)
3452 genl_unregister_family(&macsec_fam
);
3453 rtnl_link_unregister(&macsec_link_ops
);
3454 unregister_netdevice_notifier(&macsec_notifier
);
3458 module_init(macsec_init
);
3459 module_exit(macsec_exit
);
3461 MODULE_ALIAS_RTNL_LINK("macsec");
3463 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3464 MODULE_LICENSE("GPL v2");