1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/macsec.c - MACsec device
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
8 #include <linux/types.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/module.h>
12 #include <crypto/aead.h>
13 #include <linux/etherdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/refcount.h>
17 #include <net/genetlink.h>
19 #include <net/gro_cells.h>
20 #include <net/macsec.h>
21 #include <linux/phy.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/if_arp.h>
25 #include <uapi/linux/if_macsec.h>
27 #define MACSEC_SCI_LEN 8
29 /* SecTAG length = macsec_eth_header without the optional SCI */
30 #define MACSEC_TAG_LEN 6
32 struct macsec_eth_header
{
36 #if defined(__LITTLE_ENDIAN_BITFIELD)
39 #elif defined(__BIG_ENDIAN_BITFIELD)
43 #error "Please fix <asm/byteorder.h>"
46 u8 secure_channel_id
[8]; /* optional */
49 #define MACSEC_TCI_VERSION 0x80
50 #define MACSEC_TCI_ES 0x40 /* end station */
51 #define MACSEC_TCI_SC 0x20 /* SCI present */
52 #define MACSEC_TCI_SCB 0x10 /* epon */
53 #define MACSEC_TCI_E 0x08 /* encryption */
54 #define MACSEC_TCI_C 0x04 /* changed text */
55 #define MACSEC_AN_MASK 0x03 /* association number */
56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59 #define MIN_NON_SHORT_LEN 48
61 #define GCM_AES_IV_LEN 12
62 #define DEFAULT_ICV_LEN 16
64 #define for_each_rxsc(secy, sc) \
65 for (sc = rcu_dereference_bh(secy->rx_sc); \
67 sc = rcu_dereference_bh(sc->next))
68 #define for_each_rxsc_rtnl(secy, sc) \
69 for (sc = rtnl_dereference(secy->rx_sc); \
71 sc = rtnl_dereference(sc->next))
73 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
77 u8 short_secure_channel_id
[4];
85 u8 secure_channel_id
[8];
91 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
93 struct pcpu_secy_stats
{
94 struct macsec_dev_stats stats
;
95 struct u64_stats_sync syncp
;
99 * struct macsec_dev - private data
101 * @real_dev: pointer to underlying netdevice
102 * @stats: MACsec device stats
103 * @secys: linked list of SecY's on the underlying device
104 * @offload: status of offloading on the MACsec device
107 struct macsec_secy secy
;
108 struct net_device
*real_dev
;
109 struct pcpu_secy_stats __percpu
*stats
;
110 struct list_head secys
;
111 struct gro_cells gro_cells
;
112 enum macsec_offload offload
;
116 * struct macsec_rxh_data - rx_handler private argument
117 * @secys: linked list of SecY's on this underlying device
119 struct macsec_rxh_data
{
120 struct list_head secys
;
123 static struct macsec_dev
*macsec_priv(const struct net_device
*dev
)
125 return (struct macsec_dev
*)netdev_priv(dev
);
128 static struct macsec_rxh_data
*macsec_data_rcu(const struct net_device
*dev
)
130 return rcu_dereference_bh(dev
->rx_handler_data
);
133 static struct macsec_rxh_data
*macsec_data_rtnl(const struct net_device
*dev
)
135 return rtnl_dereference(dev
->rx_handler_data
);
139 struct aead_request
*req
;
141 struct macsec_tx_sa
*tx_sa
;
142 struct macsec_rx_sa
*rx_sa
;
149 static struct macsec_rx_sa
*macsec_rxsa_get(struct macsec_rx_sa __rcu
*ptr
)
151 struct macsec_rx_sa
*sa
= rcu_dereference_bh(ptr
);
153 if (!sa
|| !sa
->active
)
156 if (!refcount_inc_not_zero(&sa
->refcnt
))
162 static void free_rx_sc_rcu(struct rcu_head
*head
)
164 struct macsec_rx_sc
*rx_sc
= container_of(head
, struct macsec_rx_sc
, rcu_head
);
166 free_percpu(rx_sc
->stats
);
170 static struct macsec_rx_sc
*macsec_rxsc_get(struct macsec_rx_sc
*sc
)
172 return refcount_inc_not_zero(&sc
->refcnt
) ? sc
: NULL
;
175 static void macsec_rxsc_put(struct macsec_rx_sc
*sc
)
177 if (refcount_dec_and_test(&sc
->refcnt
))
178 call_rcu(&sc
->rcu_head
, free_rx_sc_rcu
);
181 static void free_rxsa(struct rcu_head
*head
)
183 struct macsec_rx_sa
*sa
= container_of(head
, struct macsec_rx_sa
, rcu
);
185 crypto_free_aead(sa
->key
.tfm
);
186 free_percpu(sa
->stats
);
190 static void macsec_rxsa_put(struct macsec_rx_sa
*sa
)
192 if (refcount_dec_and_test(&sa
->refcnt
))
193 call_rcu(&sa
->rcu
, free_rxsa
);
196 static struct macsec_tx_sa
*macsec_txsa_get(struct macsec_tx_sa __rcu
*ptr
)
198 struct macsec_tx_sa
*sa
= rcu_dereference_bh(ptr
);
200 if (!sa
|| !sa
->active
)
203 if (!refcount_inc_not_zero(&sa
->refcnt
))
209 static void free_txsa(struct rcu_head
*head
)
211 struct macsec_tx_sa
*sa
= container_of(head
, struct macsec_tx_sa
, rcu
);
213 crypto_free_aead(sa
->key
.tfm
);
214 free_percpu(sa
->stats
);
218 static void macsec_txsa_put(struct macsec_tx_sa
*sa
)
220 if (refcount_dec_and_test(&sa
->refcnt
))
221 call_rcu(&sa
->rcu
, free_txsa
);
224 static struct macsec_cb
*macsec_skb_cb(struct sk_buff
*skb
)
226 BUILD_BUG_ON(sizeof(struct macsec_cb
) > sizeof(skb
->cb
));
227 return (struct macsec_cb
*)skb
->cb
;
230 #define MACSEC_PORT_ES (htons(0x0001))
231 #define MACSEC_PORT_SCB (0x0000)
232 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
233 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
235 #define MACSEC_GCM_AES_128_SAK_LEN 16
236 #define MACSEC_GCM_AES_256_SAK_LEN 32
238 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
239 #define DEFAULT_XPN false
240 #define DEFAULT_SEND_SCI true
241 #define DEFAULT_ENCRYPT false
242 #define DEFAULT_ENCODING_SA 0
244 static bool send_sci(const struct macsec_secy
*secy
)
246 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
248 return tx_sc
->send_sci
||
249 (secy
->n_rx_sc
> 1 && !tx_sc
->end_station
&& !tx_sc
->scb
);
252 static sci_t
make_sci(u8
*addr
, __be16 port
)
256 memcpy(&sci
, addr
, ETH_ALEN
);
257 memcpy(((char *)&sci
) + ETH_ALEN
, &port
, sizeof(port
));
262 static sci_t
macsec_frame_sci(struct macsec_eth_header
*hdr
, bool sci_present
)
267 memcpy(&sci
, hdr
->secure_channel_id
,
268 sizeof(hdr
->secure_channel_id
));
270 sci
= make_sci(hdr
->eth
.h_source
, MACSEC_PORT_ES
);
275 static unsigned int macsec_sectag_len(bool sci_present
)
277 return MACSEC_TAG_LEN
+ (sci_present
? MACSEC_SCI_LEN
: 0);
280 static unsigned int macsec_hdr_len(bool sci_present
)
282 return macsec_sectag_len(sci_present
) + ETH_HLEN
;
285 static unsigned int macsec_extra_len(bool sci_present
)
287 return macsec_sectag_len(sci_present
) + sizeof(__be16
);
290 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
291 static void macsec_fill_sectag(struct macsec_eth_header
*h
,
292 const struct macsec_secy
*secy
, u32 pn
,
295 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
297 memset(&h
->tci_an
, 0, macsec_sectag_len(sci_present
));
298 h
->eth
.h_proto
= htons(ETH_P_MACSEC
);
301 h
->tci_an
|= MACSEC_TCI_SC
;
302 memcpy(&h
->secure_channel_id
, &secy
->sci
,
303 sizeof(h
->secure_channel_id
));
305 if (tx_sc
->end_station
)
306 h
->tci_an
|= MACSEC_TCI_ES
;
308 h
->tci_an
|= MACSEC_TCI_SCB
;
311 h
->packet_number
= htonl(pn
);
313 /* with GCM, C/E clear for !encrypt, both set for encrypt */
315 h
->tci_an
|= MACSEC_TCI_CONFID
;
316 else if (secy
->icv_len
!= DEFAULT_ICV_LEN
)
317 h
->tci_an
|= MACSEC_TCI_C
;
319 h
->tci_an
|= tx_sc
->encoding_sa
;
322 static void macsec_set_shortlen(struct macsec_eth_header
*h
, size_t data_len
)
324 if (data_len
< MIN_NON_SHORT_LEN
)
325 h
->short_length
= data_len
;
328 /* Checks if a MACsec interface is being offloaded to an hardware engine */
329 static bool macsec_is_offloaded(struct macsec_dev
*macsec
)
331 if (macsec
->offload
== MACSEC_OFFLOAD_MAC
||
332 macsec
->offload
== MACSEC_OFFLOAD_PHY
)
338 /* Checks if underlying layers implement MACsec offloading functions. */
339 static bool macsec_check_offload(enum macsec_offload offload
,
340 struct macsec_dev
*macsec
)
342 if (!macsec
|| !macsec
->real_dev
)
345 if (offload
== MACSEC_OFFLOAD_PHY
)
346 return macsec
->real_dev
->phydev
&&
347 macsec
->real_dev
->phydev
->macsec_ops
;
348 else if (offload
== MACSEC_OFFLOAD_MAC
)
349 return macsec
->real_dev
->features
& NETIF_F_HW_MACSEC
&&
350 macsec
->real_dev
->macsec_ops
;
355 static const struct macsec_ops
*__macsec_get_ops(enum macsec_offload offload
,
356 struct macsec_dev
*macsec
,
357 struct macsec_context
*ctx
)
360 memset(ctx
, 0, sizeof(*ctx
));
361 ctx
->offload
= offload
;
363 if (offload
== MACSEC_OFFLOAD_PHY
)
364 ctx
->phydev
= macsec
->real_dev
->phydev
;
365 else if (offload
== MACSEC_OFFLOAD_MAC
)
366 ctx
->netdev
= macsec
->real_dev
;
369 if (offload
== MACSEC_OFFLOAD_PHY
)
370 return macsec
->real_dev
->phydev
->macsec_ops
;
372 return macsec
->real_dev
->macsec_ops
;
375 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec
376 * context device reference if provided.
378 static const struct macsec_ops
*macsec_get_ops(struct macsec_dev
*macsec
,
379 struct macsec_context
*ctx
)
381 if (!macsec_check_offload(macsec
->offload
, macsec
))
384 return __macsec_get_ops(macsec
->offload
, macsec
, ctx
);
387 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
388 static bool macsec_validate_skb(struct sk_buff
*skb
, u16 icv_len
, bool xpn
)
390 struct macsec_eth_header
*h
= (struct macsec_eth_header
*)skb
->data
;
391 int len
= skb
->len
- 2 * ETH_ALEN
;
392 int extra_len
= macsec_extra_len(!!(h
->tci_an
& MACSEC_TCI_SC
)) + icv_len
;
394 /* a) It comprises at least 17 octets */
398 /* b) MACsec EtherType: already checked */
400 /* c) V bit is clear */
401 if (h
->tci_an
& MACSEC_TCI_VERSION
)
404 /* d) ES or SCB => !SC */
405 if ((h
->tci_an
& MACSEC_TCI_ES
|| h
->tci_an
& MACSEC_TCI_SCB
) &&
406 (h
->tci_an
& MACSEC_TCI_SC
))
409 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
413 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
414 if (!h
->packet_number
&& !xpn
)
417 /* length check, f) g) h) i) */
419 return len
== extra_len
+ h
->short_length
;
420 return len
>= extra_len
+ MIN_NON_SHORT_LEN
;
423 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
424 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
426 static void macsec_fill_iv_xpn(unsigned char *iv
, ssci_t ssci
, u64 pn
,
429 struct gcm_iv_xpn
*gcm_iv
= (struct gcm_iv_xpn
*)iv
;
431 gcm_iv
->ssci
= ssci
^ salt
.ssci
;
432 gcm_iv
->pn
= cpu_to_be64(pn
) ^ salt
.pn
;
435 static void macsec_fill_iv(unsigned char *iv
, sci_t sci
, u32 pn
)
437 struct gcm_iv
*gcm_iv
= (struct gcm_iv
*)iv
;
440 gcm_iv
->pn
= htonl(pn
);
443 static struct macsec_eth_header
*macsec_ethhdr(struct sk_buff
*skb
)
445 return (struct macsec_eth_header
*)skb_mac_header(skb
);
448 static sci_t
dev_to_sci(struct net_device
*dev
, __be16 port
)
450 return make_sci(dev
->dev_addr
, port
);
453 static void __macsec_pn_wrapped(struct macsec_secy
*secy
,
454 struct macsec_tx_sa
*tx_sa
)
456 pr_debug("PN wrapped, transitioning to !oper\n");
457 tx_sa
->active
= false;
458 if (secy
->protect_frames
)
459 secy
->operational
= false;
462 void macsec_pn_wrapped(struct macsec_secy
*secy
, struct macsec_tx_sa
*tx_sa
)
464 spin_lock_bh(&tx_sa
->lock
);
465 __macsec_pn_wrapped(secy
, tx_sa
);
466 spin_unlock_bh(&tx_sa
->lock
);
468 EXPORT_SYMBOL_GPL(macsec_pn_wrapped
);
470 static pn_t
tx_sa_update_pn(struct macsec_tx_sa
*tx_sa
,
471 struct macsec_secy
*secy
)
475 spin_lock_bh(&tx_sa
->lock
);
477 pn
= tx_sa
->next_pn_halves
;
481 tx_sa
->next_pn_halves
.lower
++;
483 if (tx_sa
->next_pn
== 0)
484 __macsec_pn_wrapped(secy
, tx_sa
);
485 spin_unlock_bh(&tx_sa
->lock
);
490 static void macsec_encrypt_finish(struct sk_buff
*skb
, struct net_device
*dev
)
492 struct macsec_dev
*macsec
= netdev_priv(dev
);
494 skb
->dev
= macsec
->real_dev
;
495 skb_reset_mac_header(skb
);
496 skb
->protocol
= eth_hdr(skb
)->h_proto
;
499 static void macsec_count_tx(struct sk_buff
*skb
, struct macsec_tx_sc
*tx_sc
,
500 struct macsec_tx_sa
*tx_sa
)
502 struct pcpu_tx_sc_stats
*txsc_stats
= this_cpu_ptr(tx_sc
->stats
);
504 u64_stats_update_begin(&txsc_stats
->syncp
);
505 if (tx_sc
->encrypt
) {
506 txsc_stats
->stats
.OutOctetsEncrypted
+= skb
->len
;
507 txsc_stats
->stats
.OutPktsEncrypted
++;
508 this_cpu_inc(tx_sa
->stats
->OutPktsEncrypted
);
510 txsc_stats
->stats
.OutOctetsProtected
+= skb
->len
;
511 txsc_stats
->stats
.OutPktsProtected
++;
512 this_cpu_inc(tx_sa
->stats
->OutPktsProtected
);
514 u64_stats_update_end(&txsc_stats
->syncp
);
517 static void count_tx(struct net_device
*dev
, int ret
, int len
)
519 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
520 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
522 u64_stats_update_begin(&stats
->syncp
);
524 stats
->tx_bytes
+= len
;
525 u64_stats_update_end(&stats
->syncp
);
529 static void macsec_encrypt_done(struct crypto_async_request
*base
, int err
)
531 struct sk_buff
*skb
= base
->data
;
532 struct net_device
*dev
= skb
->dev
;
533 struct macsec_dev
*macsec
= macsec_priv(dev
);
534 struct macsec_tx_sa
*sa
= macsec_skb_cb(skb
)->tx_sa
;
537 aead_request_free(macsec_skb_cb(skb
)->req
);
540 macsec_encrypt_finish(skb
, dev
);
541 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
543 ret
= dev_queue_xmit(skb
);
544 count_tx(dev
, ret
, len
);
545 rcu_read_unlock_bh();
551 static struct aead_request
*macsec_alloc_req(struct crypto_aead
*tfm
,
553 struct scatterlist
**sg
,
556 size_t size
, iv_offset
, sg_offset
;
557 struct aead_request
*req
;
560 size
= sizeof(struct aead_request
) + crypto_aead_reqsize(tfm
);
562 size
+= GCM_AES_IV_LEN
;
564 size
= ALIGN(size
, __alignof__(struct scatterlist
));
566 size
+= sizeof(struct scatterlist
) * num_frags
;
568 tmp
= kmalloc(size
, GFP_ATOMIC
);
572 *iv
= (unsigned char *)(tmp
+ iv_offset
);
573 *sg
= (struct scatterlist
*)(tmp
+ sg_offset
);
576 aead_request_set_tfm(req
, tfm
);
581 static struct sk_buff
*macsec_encrypt(struct sk_buff
*skb
,
582 struct net_device
*dev
)
585 struct scatterlist
*sg
;
586 struct sk_buff
*trailer
;
589 struct macsec_eth_header
*hh
;
590 size_t unprotected_len
;
591 struct aead_request
*req
;
592 struct macsec_secy
*secy
;
593 struct macsec_tx_sc
*tx_sc
;
594 struct macsec_tx_sa
*tx_sa
;
595 struct macsec_dev
*macsec
= macsec_priv(dev
);
599 secy
= &macsec
->secy
;
600 tx_sc
= &secy
->tx_sc
;
602 /* 10.5.1 TX SA assignment */
603 tx_sa
= macsec_txsa_get(tx_sc
->sa
[tx_sc
->encoding_sa
]);
605 secy
->operational
= false;
607 return ERR_PTR(-EINVAL
);
610 if (unlikely(skb_headroom(skb
) < MACSEC_NEEDED_HEADROOM
||
611 skb_tailroom(skb
) < MACSEC_NEEDED_TAILROOM
)) {
612 struct sk_buff
*nskb
= skb_copy_expand(skb
,
613 MACSEC_NEEDED_HEADROOM
,
614 MACSEC_NEEDED_TAILROOM
,
620 macsec_txsa_put(tx_sa
);
622 return ERR_PTR(-ENOMEM
);
625 skb
= skb_unshare(skb
, GFP_ATOMIC
);
627 macsec_txsa_put(tx_sa
);
628 return ERR_PTR(-ENOMEM
);
632 unprotected_len
= skb
->len
;
634 sci_present
= send_sci(secy
);
635 hh
= skb_push(skb
, macsec_extra_len(sci_present
));
636 memmove(hh
, eth
, 2 * ETH_ALEN
);
638 pn
= tx_sa_update_pn(tx_sa
, secy
);
639 if (pn
.full64
== 0) {
640 macsec_txsa_put(tx_sa
);
642 return ERR_PTR(-ENOLINK
);
644 macsec_fill_sectag(hh
, secy
, pn
.lower
, sci_present
);
645 macsec_set_shortlen(hh
, unprotected_len
- 2 * ETH_ALEN
);
647 skb_put(skb
, secy
->icv_len
);
649 if (skb
->len
- ETH_HLEN
> macsec_priv(dev
)->real_dev
->mtu
) {
650 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
652 u64_stats_update_begin(&secy_stats
->syncp
);
653 secy_stats
->stats
.OutPktsTooLong
++;
654 u64_stats_update_end(&secy_stats
->syncp
);
656 macsec_txsa_put(tx_sa
);
658 return ERR_PTR(-EINVAL
);
661 ret
= skb_cow_data(skb
, 0, &trailer
);
662 if (unlikely(ret
< 0)) {
663 macsec_txsa_put(tx_sa
);
668 req
= macsec_alloc_req(tx_sa
->key
.tfm
, &iv
, &sg
, ret
);
670 macsec_txsa_put(tx_sa
);
672 return ERR_PTR(-ENOMEM
);
676 macsec_fill_iv_xpn(iv
, tx_sa
->ssci
, pn
.full64
, tx_sa
->key
.salt
);
678 macsec_fill_iv(iv
, secy
->sci
, pn
.lower
);
680 sg_init_table(sg
, ret
);
681 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
682 if (unlikely(ret
< 0)) {
683 aead_request_free(req
);
684 macsec_txsa_put(tx_sa
);
689 if (tx_sc
->encrypt
) {
690 int len
= skb
->len
- macsec_hdr_len(sci_present
) -
692 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
693 aead_request_set_ad(req
, macsec_hdr_len(sci_present
));
695 aead_request_set_crypt(req
, sg
, sg
, 0, iv
);
696 aead_request_set_ad(req
, skb
->len
- secy
->icv_len
);
699 macsec_skb_cb(skb
)->req
= req
;
700 macsec_skb_cb(skb
)->tx_sa
= tx_sa
;
701 aead_request_set_callback(req
, 0, macsec_encrypt_done
, skb
);
704 ret
= crypto_aead_encrypt(req
);
705 if (ret
== -EINPROGRESS
) {
707 } else if (ret
!= 0) {
710 aead_request_free(req
);
711 macsec_txsa_put(tx_sa
);
712 return ERR_PTR(-EINVAL
);
716 aead_request_free(req
);
717 macsec_txsa_put(tx_sa
);
722 static bool macsec_post_decrypt(struct sk_buff
*skb
, struct macsec_secy
*secy
, u32 pn
)
724 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
725 struct pcpu_rx_sc_stats
*rxsc_stats
= this_cpu_ptr(rx_sa
->sc
->stats
);
726 struct macsec_eth_header
*hdr
= macsec_ethhdr(skb
);
729 spin_lock(&rx_sa
->lock
);
730 if (rx_sa
->next_pn_halves
.lower
>= secy
->replay_window
)
731 lowest_pn
= rx_sa
->next_pn_halves
.lower
- secy
->replay_window
;
733 /* Now perform replay protection check again
734 * (see IEEE 802.1AE-2006 figure 10-5)
736 if (secy
->replay_protect
&& pn
< lowest_pn
&&
737 (!secy
->xpn
|| pn_same_half(pn
, lowest_pn
))) {
738 spin_unlock(&rx_sa
->lock
);
739 u64_stats_update_begin(&rxsc_stats
->syncp
);
740 rxsc_stats
->stats
.InPktsLate
++;
741 u64_stats_update_end(&rxsc_stats
->syncp
);
745 if (secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
) {
746 u64_stats_update_begin(&rxsc_stats
->syncp
);
747 if (hdr
->tci_an
& MACSEC_TCI_E
)
748 rxsc_stats
->stats
.InOctetsDecrypted
+= skb
->len
;
750 rxsc_stats
->stats
.InOctetsValidated
+= skb
->len
;
751 u64_stats_update_end(&rxsc_stats
->syncp
);
754 if (!macsec_skb_cb(skb
)->valid
) {
755 spin_unlock(&rx_sa
->lock
);
758 if (hdr
->tci_an
& MACSEC_TCI_C
||
759 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
760 u64_stats_update_begin(&rxsc_stats
->syncp
);
761 rxsc_stats
->stats
.InPktsNotValid
++;
762 u64_stats_update_end(&rxsc_stats
->syncp
);
766 u64_stats_update_begin(&rxsc_stats
->syncp
);
767 if (secy
->validate_frames
== MACSEC_VALIDATE_CHECK
) {
768 rxsc_stats
->stats
.InPktsInvalid
++;
769 this_cpu_inc(rx_sa
->stats
->InPktsInvalid
);
770 } else if (pn
< lowest_pn
) {
771 rxsc_stats
->stats
.InPktsDelayed
++;
773 rxsc_stats
->stats
.InPktsUnchecked
++;
775 u64_stats_update_end(&rxsc_stats
->syncp
);
777 u64_stats_update_begin(&rxsc_stats
->syncp
);
778 if (pn
< lowest_pn
) {
779 rxsc_stats
->stats
.InPktsDelayed
++;
781 rxsc_stats
->stats
.InPktsOK
++;
782 this_cpu_inc(rx_sa
->stats
->InPktsOK
);
784 u64_stats_update_end(&rxsc_stats
->syncp
);
786 // Instead of "pn >=" - to support pn overflow in xpn
787 if (pn
+ 1 > rx_sa
->next_pn_halves
.lower
) {
788 rx_sa
->next_pn_halves
.lower
= pn
+ 1;
789 } else if (secy
->xpn
&&
790 !pn_same_half(pn
, rx_sa
->next_pn_halves
.lower
)) {
791 rx_sa
->next_pn_halves
.upper
++;
792 rx_sa
->next_pn_halves
.lower
= pn
+ 1;
795 spin_unlock(&rx_sa
->lock
);
801 static void macsec_reset_skb(struct sk_buff
*skb
, struct net_device
*dev
)
803 skb
->pkt_type
= PACKET_HOST
;
804 skb
->protocol
= eth_type_trans(skb
, dev
);
806 skb_reset_network_header(skb
);
807 if (!skb_transport_header_was_set(skb
))
808 skb_reset_transport_header(skb
);
809 skb_reset_mac_len(skb
);
812 static void macsec_finalize_skb(struct sk_buff
*skb
, u8 icv_len
, u8 hdr_len
)
814 skb
->ip_summed
= CHECKSUM_NONE
;
815 memmove(skb
->data
+ hdr_len
, skb
->data
, 2 * ETH_ALEN
);
816 skb_pull(skb
, hdr_len
);
817 pskb_trim_unique(skb
, skb
->len
- icv_len
);
820 static void count_rx(struct net_device
*dev
, int len
)
822 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
824 u64_stats_update_begin(&stats
->syncp
);
826 stats
->rx_bytes
+= len
;
827 u64_stats_update_end(&stats
->syncp
);
830 static void macsec_decrypt_done(struct crypto_async_request
*base
, int err
)
832 struct sk_buff
*skb
= base
->data
;
833 struct net_device
*dev
= skb
->dev
;
834 struct macsec_dev
*macsec
= macsec_priv(dev
);
835 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
836 struct macsec_rx_sc
*rx_sc
= rx_sa
->sc
;
840 aead_request_free(macsec_skb_cb(skb
)->req
);
843 macsec_skb_cb(skb
)->valid
= true;
846 pn
= ntohl(macsec_ethhdr(skb
)->packet_number
);
847 if (!macsec_post_decrypt(skb
, &macsec
->secy
, pn
)) {
848 rcu_read_unlock_bh();
853 macsec_finalize_skb(skb
, macsec
->secy
.icv_len
,
854 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
855 macsec_reset_skb(skb
, macsec
->secy
.netdev
);
858 if (gro_cells_receive(&macsec
->gro_cells
, skb
) == NET_RX_SUCCESS
)
861 rcu_read_unlock_bh();
864 macsec_rxsa_put(rx_sa
);
865 macsec_rxsc_put(rx_sc
);
869 static struct sk_buff
*macsec_decrypt(struct sk_buff
*skb
,
870 struct net_device
*dev
,
871 struct macsec_rx_sa
*rx_sa
,
873 struct macsec_secy
*secy
)
876 struct scatterlist
*sg
;
877 struct sk_buff
*trailer
;
879 struct aead_request
*req
;
880 struct macsec_eth_header
*hdr
;
882 u16 icv_len
= secy
->icv_len
;
884 macsec_skb_cb(skb
)->valid
= false;
885 skb
= skb_share_check(skb
, GFP_ATOMIC
);
887 return ERR_PTR(-ENOMEM
);
889 ret
= skb_cow_data(skb
, 0, &trailer
);
890 if (unlikely(ret
< 0)) {
894 req
= macsec_alloc_req(rx_sa
->key
.tfm
, &iv
, &sg
, ret
);
897 return ERR_PTR(-ENOMEM
);
900 hdr
= (struct macsec_eth_header
*)skb
->data
;
901 hdr_pn
= ntohl(hdr
->packet_number
);
904 pn_t recovered_pn
= rx_sa
->next_pn_halves
;
906 recovered_pn
.lower
= hdr_pn
;
907 if (hdr_pn
< rx_sa
->next_pn_halves
.lower
&&
908 !pn_same_half(hdr_pn
, rx_sa
->next_pn_halves
.lower
))
909 recovered_pn
.upper
++;
911 macsec_fill_iv_xpn(iv
, rx_sa
->ssci
, recovered_pn
.full64
,
914 macsec_fill_iv(iv
, sci
, hdr_pn
);
917 sg_init_table(sg
, ret
);
918 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
919 if (unlikely(ret
< 0)) {
920 aead_request_free(req
);
925 if (hdr
->tci_an
& MACSEC_TCI_E
) {
926 /* confidentiality: ethernet + macsec header
927 * authenticated, encrypted payload
929 int len
= skb
->len
- macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
);
931 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
932 aead_request_set_ad(req
, macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
));
933 skb
= skb_unshare(skb
, GFP_ATOMIC
);
935 aead_request_free(req
);
936 return ERR_PTR(-ENOMEM
);
939 /* integrity only: all headers + data authenticated */
940 aead_request_set_crypt(req
, sg
, sg
, icv_len
, iv
);
941 aead_request_set_ad(req
, skb
->len
- icv_len
);
944 macsec_skb_cb(skb
)->req
= req
;
946 aead_request_set_callback(req
, 0, macsec_decrypt_done
, skb
);
949 ret
= crypto_aead_decrypt(req
);
950 if (ret
== -EINPROGRESS
) {
952 } else if (ret
!= 0) {
953 /* decryption/authentication failed
954 * 10.6 if validateFrames is disabled, deliver anyway
956 if (ret
!= -EBADMSG
) {
961 macsec_skb_cb(skb
)->valid
= true;
965 aead_request_free(req
);
970 static struct macsec_rx_sc
*find_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
972 struct macsec_rx_sc
*rx_sc
;
974 for_each_rxsc(secy
, rx_sc
) {
975 if (rx_sc
->sci
== sci
)
982 static struct macsec_rx_sc
*find_rx_sc_rtnl(struct macsec_secy
*secy
, sci_t sci
)
984 struct macsec_rx_sc
*rx_sc
;
986 for_each_rxsc_rtnl(secy
, rx_sc
) {
987 if (rx_sc
->sci
== sci
)
994 static enum rx_handler_result
handle_not_macsec(struct sk_buff
*skb
)
996 /* Deliver to the uncontrolled port by default */
997 enum rx_handler_result ret
= RX_HANDLER_PASS
;
998 struct ethhdr
*hdr
= eth_hdr(skb
);
999 struct macsec_rxh_data
*rxd
;
1000 struct macsec_dev
*macsec
;
1003 rxd
= macsec_data_rcu(skb
->dev
);
1005 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1006 struct sk_buff
*nskb
;
1007 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
1008 struct net_device
*ndev
= macsec
->secy
.netdev
;
1010 /* If h/w offloading is enabled, HW decodes frames and strips
1011 * the SecTAG, so we have to deduce which port to deliver to.
1013 if (macsec_is_offloaded(macsec
) && netif_running(ndev
)) {
1014 if (ether_addr_equal_64bits(hdr
->h_dest
,
1016 /* exact match, divert skb to this port */
1018 skb
->pkt_type
= PACKET_HOST
;
1019 ret
= RX_HANDLER_ANOTHER
;
1021 } else if (is_multicast_ether_addr_64bits(
1023 /* multicast frame, deliver on this port too */
1024 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1029 if (ether_addr_equal_64bits(hdr
->h_dest
,
1031 nskb
->pkt_type
= PACKET_BROADCAST
;
1033 nskb
->pkt_type
= PACKET_MULTICAST
;
1040 /* 10.6 If the management control validateFrames is not
1041 * Strict, frames without a SecTAG are received, counted, and
1042 * delivered to the Controlled Port
1044 if (macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1045 u64_stats_update_begin(&secy_stats
->syncp
);
1046 secy_stats
->stats
.InPktsNoTag
++;
1047 u64_stats_update_end(&secy_stats
->syncp
);
1051 /* deliver on this port */
1052 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1058 if (netif_rx(nskb
) == NET_RX_SUCCESS
) {
1059 u64_stats_update_begin(&secy_stats
->syncp
);
1060 secy_stats
->stats
.InPktsUntagged
++;
1061 u64_stats_update_end(&secy_stats
->syncp
);
1070 static rx_handler_result_t
macsec_handle_frame(struct sk_buff
**pskb
)
1072 struct sk_buff
*skb
= *pskb
;
1073 struct net_device
*dev
= skb
->dev
;
1074 struct macsec_eth_header
*hdr
;
1075 struct macsec_secy
*secy
= NULL
;
1076 struct macsec_rx_sc
*rx_sc
;
1077 struct macsec_rx_sa
*rx_sa
;
1078 struct macsec_rxh_data
*rxd
;
1079 struct macsec_dev
*macsec
;
1083 struct pcpu_rx_sc_stats
*rxsc_stats
;
1084 struct pcpu_secy_stats
*secy_stats
;
1088 if (skb_headroom(skb
) < ETH_HLEN
)
1091 hdr
= macsec_ethhdr(skb
);
1092 if (hdr
->eth
.h_proto
!= htons(ETH_P_MACSEC
))
1093 return handle_not_macsec(skb
);
1095 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1098 return RX_HANDLER_CONSUMED
;
1100 pulled_sci
= pskb_may_pull(skb
, macsec_extra_len(true));
1102 if (!pskb_may_pull(skb
, macsec_extra_len(false)))
1106 hdr
= macsec_ethhdr(skb
);
1108 /* Frames with a SecTAG that has the TCI E bit set but the C
1109 * bit clear are discarded, as this reserved encoding is used
1110 * to identify frames with a SecTAG that are not to be
1111 * delivered to the Controlled Port.
1113 if ((hdr
->tci_an
& (MACSEC_TCI_C
| MACSEC_TCI_E
)) == MACSEC_TCI_E
)
1114 return RX_HANDLER_PASS
;
1116 /* now, pull the extra length */
1117 if (hdr
->tci_an
& MACSEC_TCI_SC
) {
1122 /* ethernet header is part of crypto processing */
1123 skb_push(skb
, ETH_HLEN
);
1125 macsec_skb_cb(skb
)->has_sci
= !!(hdr
->tci_an
& MACSEC_TCI_SC
);
1126 macsec_skb_cb(skb
)->assoc_num
= hdr
->tci_an
& MACSEC_AN_MASK
;
1127 sci
= macsec_frame_sci(hdr
, macsec_skb_cb(skb
)->has_sci
);
1130 rxd
= macsec_data_rcu(skb
->dev
);
1132 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1133 struct macsec_rx_sc
*sc
= find_rx_sc(&macsec
->secy
, sci
);
1135 sc
= sc
? macsec_rxsc_get(sc
) : NULL
;
1138 secy
= &macsec
->secy
;
1148 macsec
= macsec_priv(dev
);
1149 secy_stats
= this_cpu_ptr(macsec
->stats
);
1150 rxsc_stats
= this_cpu_ptr(rx_sc
->stats
);
1152 if (!macsec_validate_skb(skb
, secy
->icv_len
, secy
->xpn
)) {
1153 u64_stats_update_begin(&secy_stats
->syncp
);
1154 secy_stats
->stats
.InPktsBadTag
++;
1155 u64_stats_update_end(&secy_stats
->syncp
);
1159 rx_sa
= macsec_rxsa_get(rx_sc
->sa
[macsec_skb_cb(skb
)->assoc_num
]);
1161 /* 10.6.1 if the SA is not in use */
1163 /* If validateFrames is Strict or the C bit in the
1164 * SecTAG is set, discard
1166 if (hdr
->tci_an
& MACSEC_TCI_C
||
1167 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
1168 u64_stats_update_begin(&rxsc_stats
->syncp
);
1169 rxsc_stats
->stats
.InPktsNotUsingSA
++;
1170 u64_stats_update_end(&rxsc_stats
->syncp
);
1174 /* not Strict, the frame (with the SecTAG and ICV
1175 * removed) is delivered to the Controlled Port.
1177 u64_stats_update_begin(&rxsc_stats
->syncp
);
1178 rxsc_stats
->stats
.InPktsUnusedSA
++;
1179 u64_stats_update_end(&rxsc_stats
->syncp
);
1183 /* First, PN check to avoid decrypting obviously wrong packets */
1184 hdr_pn
= ntohl(hdr
->packet_number
);
1185 if (secy
->replay_protect
) {
1188 spin_lock(&rx_sa
->lock
);
1189 late
= rx_sa
->next_pn_halves
.lower
>= secy
->replay_window
&&
1190 hdr_pn
< (rx_sa
->next_pn_halves
.lower
- secy
->replay_window
);
1193 late
= late
&& pn_same_half(rx_sa
->next_pn_halves
.lower
, hdr_pn
);
1194 spin_unlock(&rx_sa
->lock
);
1197 u64_stats_update_begin(&rxsc_stats
->syncp
);
1198 rxsc_stats
->stats
.InPktsLate
++;
1199 u64_stats_update_end(&rxsc_stats
->syncp
);
1204 macsec_skb_cb(skb
)->rx_sa
= rx_sa
;
1206 /* Disabled && !changed text => skip validation */
1207 if (hdr
->tci_an
& MACSEC_TCI_C
||
1208 secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
)
1209 skb
= macsec_decrypt(skb
, dev
, rx_sa
, sci
, secy
);
1212 /* the decrypt callback needs the reference */
1213 if (PTR_ERR(skb
) != -EINPROGRESS
) {
1214 macsec_rxsa_put(rx_sa
);
1215 macsec_rxsc_put(rx_sc
);
1219 return RX_HANDLER_CONSUMED
;
1222 if (!macsec_post_decrypt(skb
, secy
, hdr_pn
))
1226 macsec_finalize_skb(skb
, secy
->icv_len
,
1227 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1228 macsec_reset_skb(skb
, secy
->netdev
);
1231 macsec_rxsa_put(rx_sa
);
1232 macsec_rxsc_put(rx_sc
);
1235 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
1236 if (ret
== NET_RX_SUCCESS
)
1237 count_rx(dev
, skb
->len
);
1239 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1244 return RX_HANDLER_CONSUMED
;
1247 macsec_rxsa_put(rx_sa
);
1249 macsec_rxsc_put(rx_sc
);
1254 return RX_HANDLER_CONSUMED
;
1257 /* 10.6.1 if the SC is not found */
1258 cbit
= !!(hdr
->tci_an
& MACSEC_TCI_C
);
1260 macsec_finalize_skb(skb
, DEFAULT_ICV_LEN
,
1261 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1263 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1264 struct sk_buff
*nskb
;
1266 secy_stats
= this_cpu_ptr(macsec
->stats
);
1268 /* If validateFrames is Strict or the C bit in the
1269 * SecTAG is set, discard
1272 macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1273 u64_stats_update_begin(&secy_stats
->syncp
);
1274 secy_stats
->stats
.InPktsNoSCI
++;
1275 u64_stats_update_end(&secy_stats
->syncp
);
1279 /* not strict, the frame (with the SecTAG and ICV
1280 * removed) is delivered to the Controlled Port.
1282 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1286 macsec_reset_skb(nskb
, macsec
->secy
.netdev
);
1288 ret
= netif_rx(nskb
);
1289 if (ret
== NET_RX_SUCCESS
) {
1290 u64_stats_update_begin(&secy_stats
->syncp
);
1291 secy_stats
->stats
.InPktsUnknownSCI
++;
1292 u64_stats_update_end(&secy_stats
->syncp
);
1294 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1300 return RX_HANDLER_PASS
;
1303 static struct crypto_aead
*macsec_alloc_tfm(char *key
, int key_len
, int icv_len
)
1305 struct crypto_aead
*tfm
;
1308 tfm
= crypto_alloc_aead("gcm(aes)", 0, 0);
1313 ret
= crypto_aead_setkey(tfm
, key
, key_len
);
1317 ret
= crypto_aead_setauthsize(tfm
, icv_len
);
1323 crypto_free_aead(tfm
);
1324 return ERR_PTR(ret
);
1327 static int init_rx_sa(struct macsec_rx_sa
*rx_sa
, char *sak
, int key_len
,
1330 rx_sa
->stats
= alloc_percpu(struct macsec_rx_sa_stats
);
1334 rx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1335 if (IS_ERR(rx_sa
->key
.tfm
)) {
1336 free_percpu(rx_sa
->stats
);
1337 return PTR_ERR(rx_sa
->key
.tfm
);
1340 rx_sa
->ssci
= MACSEC_UNDEF_SSCI
;
1341 rx_sa
->active
= false;
1343 refcount_set(&rx_sa
->refcnt
, 1);
1344 spin_lock_init(&rx_sa
->lock
);
1349 static void clear_rx_sa(struct macsec_rx_sa
*rx_sa
)
1351 rx_sa
->active
= false;
1353 macsec_rxsa_put(rx_sa
);
1356 static void free_rx_sc(struct macsec_rx_sc
*rx_sc
)
1360 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1361 struct macsec_rx_sa
*sa
= rtnl_dereference(rx_sc
->sa
[i
]);
1363 RCU_INIT_POINTER(rx_sc
->sa
[i
], NULL
);
1368 macsec_rxsc_put(rx_sc
);
1371 static struct macsec_rx_sc
*del_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1373 struct macsec_rx_sc
*rx_sc
, __rcu
**rx_scp
;
1375 for (rx_scp
= &secy
->rx_sc
, rx_sc
= rtnl_dereference(*rx_scp
);
1377 rx_scp
= &rx_sc
->next
, rx_sc
= rtnl_dereference(*rx_scp
)) {
1378 if (rx_sc
->sci
== sci
) {
1381 rcu_assign_pointer(*rx_scp
, rx_sc
->next
);
1389 static struct macsec_rx_sc
*create_rx_sc(struct net_device
*dev
, sci_t sci
)
1391 struct macsec_rx_sc
*rx_sc
;
1392 struct macsec_dev
*macsec
;
1393 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
1394 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
1395 struct macsec_secy
*secy
;
1397 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
1398 if (find_rx_sc_rtnl(&macsec
->secy
, sci
))
1399 return ERR_PTR(-EEXIST
);
1402 rx_sc
= kzalloc(sizeof(*rx_sc
), GFP_KERNEL
);
1404 return ERR_PTR(-ENOMEM
);
1406 rx_sc
->stats
= netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats
);
1407 if (!rx_sc
->stats
) {
1409 return ERR_PTR(-ENOMEM
);
1413 rx_sc
->active
= true;
1414 refcount_set(&rx_sc
->refcnt
, 1);
1416 secy
= &macsec_priv(dev
)->secy
;
1417 rcu_assign_pointer(rx_sc
->next
, secy
->rx_sc
);
1418 rcu_assign_pointer(secy
->rx_sc
, rx_sc
);
1426 static int init_tx_sa(struct macsec_tx_sa
*tx_sa
, char *sak
, int key_len
,
1429 tx_sa
->stats
= alloc_percpu(struct macsec_tx_sa_stats
);
1433 tx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1434 if (IS_ERR(tx_sa
->key
.tfm
)) {
1435 free_percpu(tx_sa
->stats
);
1436 return PTR_ERR(tx_sa
->key
.tfm
);
1439 tx_sa
->ssci
= MACSEC_UNDEF_SSCI
;
1440 tx_sa
->active
= false;
1441 refcount_set(&tx_sa
->refcnt
, 1);
1442 spin_lock_init(&tx_sa
->lock
);
1447 static void clear_tx_sa(struct macsec_tx_sa
*tx_sa
)
1449 tx_sa
->active
= false;
1451 macsec_txsa_put(tx_sa
);
1454 static struct genl_family macsec_fam
;
1456 static struct net_device
*get_dev_from_nl(struct net
*net
,
1457 struct nlattr
**attrs
)
1459 int ifindex
= nla_get_u32(attrs
[MACSEC_ATTR_IFINDEX
]);
1460 struct net_device
*dev
;
1462 dev
= __dev_get_by_index(net
, ifindex
);
1464 return ERR_PTR(-ENODEV
);
1466 if (!netif_is_macsec(dev
))
1467 return ERR_PTR(-ENODEV
);
1472 static enum macsec_offload
nla_get_offload(const struct nlattr
*nla
)
1474 return (__force
enum macsec_offload
)nla_get_u8(nla
);
1477 static sci_t
nla_get_sci(const struct nlattr
*nla
)
1479 return (__force sci_t
)nla_get_u64(nla
);
1482 static int nla_put_sci(struct sk_buff
*skb
, int attrtype
, sci_t value
,
1485 return nla_put_u64_64bit(skb
, attrtype
, (__force u64
)value
, padattr
);
1488 static ssci_t
nla_get_ssci(const struct nlattr
*nla
)
1490 return (__force ssci_t
)nla_get_u32(nla
);
1493 static int nla_put_ssci(struct sk_buff
*skb
, int attrtype
, ssci_t value
)
1495 return nla_put_u32(skb
, attrtype
, (__force u64
)value
);
1498 static struct macsec_tx_sa
*get_txsa_from_nl(struct net
*net
,
1499 struct nlattr
**attrs
,
1500 struct nlattr
**tb_sa
,
1501 struct net_device
**devp
,
1502 struct macsec_secy
**secyp
,
1503 struct macsec_tx_sc
**scp
,
1506 struct net_device
*dev
;
1507 struct macsec_secy
*secy
;
1508 struct macsec_tx_sc
*tx_sc
;
1509 struct macsec_tx_sa
*tx_sa
;
1511 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1512 return ERR_PTR(-EINVAL
);
1514 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1516 dev
= get_dev_from_nl(net
, attrs
);
1518 return ERR_CAST(dev
);
1520 if (*assoc_num
>= MACSEC_NUM_AN
)
1521 return ERR_PTR(-EINVAL
);
1523 secy
= &macsec_priv(dev
)->secy
;
1524 tx_sc
= &secy
->tx_sc
;
1526 tx_sa
= rtnl_dereference(tx_sc
->sa
[*assoc_num
]);
1528 return ERR_PTR(-ENODEV
);
1536 static struct macsec_rx_sc
*get_rxsc_from_nl(struct net
*net
,
1537 struct nlattr
**attrs
,
1538 struct nlattr
**tb_rxsc
,
1539 struct net_device
**devp
,
1540 struct macsec_secy
**secyp
)
1542 struct net_device
*dev
;
1543 struct macsec_secy
*secy
;
1544 struct macsec_rx_sc
*rx_sc
;
1547 dev
= get_dev_from_nl(net
, attrs
);
1549 return ERR_CAST(dev
);
1551 secy
= &macsec_priv(dev
)->secy
;
1553 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1554 return ERR_PTR(-EINVAL
);
1556 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1557 rx_sc
= find_rx_sc_rtnl(secy
, sci
);
1559 return ERR_PTR(-ENODEV
);
1567 static struct macsec_rx_sa
*get_rxsa_from_nl(struct net
*net
,
1568 struct nlattr
**attrs
,
1569 struct nlattr
**tb_rxsc
,
1570 struct nlattr
**tb_sa
,
1571 struct net_device
**devp
,
1572 struct macsec_secy
**secyp
,
1573 struct macsec_rx_sc
**scp
,
1576 struct macsec_rx_sc
*rx_sc
;
1577 struct macsec_rx_sa
*rx_sa
;
1579 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1580 return ERR_PTR(-EINVAL
);
1582 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1583 if (*assoc_num
>= MACSEC_NUM_AN
)
1584 return ERR_PTR(-EINVAL
);
1586 rx_sc
= get_rxsc_from_nl(net
, attrs
, tb_rxsc
, devp
, secyp
);
1588 return ERR_CAST(rx_sc
);
1590 rx_sa
= rtnl_dereference(rx_sc
->sa
[*assoc_num
]);
1592 return ERR_PTR(-ENODEV
);
1598 static const struct nla_policy macsec_genl_policy
[NUM_MACSEC_ATTR
] = {
1599 [MACSEC_ATTR_IFINDEX
] = { .type
= NLA_U32
},
1600 [MACSEC_ATTR_RXSC_CONFIG
] = { .type
= NLA_NESTED
},
1601 [MACSEC_ATTR_SA_CONFIG
] = { .type
= NLA_NESTED
},
1602 [MACSEC_ATTR_OFFLOAD
] = { .type
= NLA_NESTED
},
1605 static const struct nla_policy macsec_genl_rxsc_policy
[NUM_MACSEC_RXSC_ATTR
] = {
1606 [MACSEC_RXSC_ATTR_SCI
] = { .type
= NLA_U64
},
1607 [MACSEC_RXSC_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1610 static const struct nla_policy macsec_genl_sa_policy
[NUM_MACSEC_SA_ATTR
] = {
1611 [MACSEC_SA_ATTR_AN
] = { .type
= NLA_U8
},
1612 [MACSEC_SA_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1613 [MACSEC_SA_ATTR_PN
] = { .type
= NLA_MIN_LEN
, .len
= 4 },
1614 [MACSEC_SA_ATTR_KEYID
] = { .type
= NLA_BINARY
,
1615 .len
= MACSEC_KEYID_LEN
, },
1616 [MACSEC_SA_ATTR_KEY
] = { .type
= NLA_BINARY
,
1617 .len
= MACSEC_MAX_KEY_LEN
, },
1618 [MACSEC_SA_ATTR_SSCI
] = { .type
= NLA_U32
},
1619 [MACSEC_SA_ATTR_SALT
] = { .type
= NLA_BINARY
,
1620 .len
= MACSEC_SALT_LEN
, },
1623 static const struct nla_policy macsec_genl_offload_policy
[NUM_MACSEC_OFFLOAD_ATTR
] = {
1624 [MACSEC_OFFLOAD_ATTR_TYPE
] = { .type
= NLA_U8
},
1627 /* Offloads an operation to a device driver */
1628 static int macsec_offload(int (* const func
)(struct macsec_context
*),
1629 struct macsec_context
*ctx
)
1633 if (unlikely(!func
))
1636 if (ctx
->offload
== MACSEC_OFFLOAD_PHY
)
1637 mutex_lock(&ctx
->phydev
->lock
);
1639 /* Phase I: prepare. The drive should fail here if there are going to be
1640 * issues in the commit phase.
1642 ctx
->prepare
= true;
1647 /* Phase II: commit. This step cannot fail. */
1648 ctx
->prepare
= false;
1650 /* This should never happen: commit is not allowed to fail */
1652 WARN(1, "MACsec offloading commit failed (%d)\n", ret
);
1655 if (ctx
->offload
== MACSEC_OFFLOAD_PHY
)
1656 mutex_unlock(&ctx
->phydev
->lock
);
1661 static int parse_sa_config(struct nlattr
**attrs
, struct nlattr
**tb_sa
)
1663 if (!attrs
[MACSEC_ATTR_SA_CONFIG
])
1666 if (nla_parse_nested_deprecated(tb_sa
, MACSEC_SA_ATTR_MAX
, attrs
[MACSEC_ATTR_SA_CONFIG
], macsec_genl_sa_policy
, NULL
))
1672 static int parse_rxsc_config(struct nlattr
**attrs
, struct nlattr
**tb_rxsc
)
1674 if (!attrs
[MACSEC_ATTR_RXSC_CONFIG
])
1677 if (nla_parse_nested_deprecated(tb_rxsc
, MACSEC_RXSC_ATTR_MAX
, attrs
[MACSEC_ATTR_RXSC_CONFIG
], macsec_genl_rxsc_policy
, NULL
))
1683 static bool validate_add_rxsa(struct nlattr
**attrs
)
1685 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1686 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1687 !attrs
[MACSEC_SA_ATTR_KEYID
])
1690 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1693 if (attrs
[MACSEC_SA_ATTR_PN
] &&
1694 *(u64
*)nla_data(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1697 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1698 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1702 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1708 static int macsec_add_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1710 struct net_device
*dev
;
1711 struct nlattr
**attrs
= info
->attrs
;
1712 struct macsec_secy
*secy
;
1713 struct macsec_rx_sc
*rx_sc
;
1714 struct macsec_rx_sa
*rx_sa
;
1715 unsigned char assoc_num
;
1717 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1718 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1721 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1724 if (parse_sa_config(attrs
, tb_sa
))
1727 if (parse_rxsc_config(attrs
, tb_rxsc
))
1730 if (!validate_add_rxsa(tb_sa
))
1734 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
1735 if (IS_ERR(rx_sc
)) {
1737 return PTR_ERR(rx_sc
);
1740 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1742 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1743 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1744 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1749 pn_len
= secy
->xpn
? MACSEC_XPN_PN_LEN
: MACSEC_DEFAULT_PN_LEN
;
1750 if (nla_len(tb_sa
[MACSEC_SA_ATTR_PN
]) != pn_len
) {
1751 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1752 nla_len(tb_sa
[MACSEC_SA_ATTR_PN
]), pn_len
);
1758 if (!tb_sa
[MACSEC_SA_ATTR_SSCI
] || !tb_sa
[MACSEC_SA_ATTR_SALT
]) {
1763 if (nla_len(tb_sa
[MACSEC_SA_ATTR_SALT
]) != MACSEC_SALT_LEN
) {
1764 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1765 nla_len(tb_sa
[MACSEC_SA_ATTR_SALT
]),
1766 MACSEC_SA_ATTR_SALT
);
1772 rx_sa
= rtnl_dereference(rx_sc
->sa
[assoc_num
]);
1778 rx_sa
= kmalloc(sizeof(*rx_sa
), GFP_KERNEL
);
1784 err
= init_rx_sa(rx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1785 secy
->key_len
, secy
->icv_len
);
1792 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
1793 spin_lock_bh(&rx_sa
->lock
);
1794 rx_sa
->next_pn
= nla_get_u64(tb_sa
[MACSEC_SA_ATTR_PN
]);
1795 spin_unlock_bh(&rx_sa
->lock
);
1798 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1799 rx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1803 /* If h/w offloading is available, propagate to the device */
1804 if (macsec_is_offloaded(netdev_priv(dev
))) {
1805 const struct macsec_ops
*ops
;
1806 struct macsec_context ctx
;
1808 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
1814 ctx
.sa
.assoc_num
= assoc_num
;
1815 ctx
.sa
.rx_sa
= rx_sa
;
1817 memcpy(ctx
.sa
.key
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1820 err
= macsec_offload(ops
->mdo_add_rxsa
, &ctx
);
1826 rx_sa
->ssci
= nla_get_ssci(tb_sa
[MACSEC_SA_ATTR_SSCI
]);
1827 nla_memcpy(rx_sa
->key
.salt
.bytes
, tb_sa
[MACSEC_SA_ATTR_SALT
],
1831 nla_memcpy(rx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1832 rcu_assign_pointer(rx_sc
->sa
[assoc_num
], rx_sa
);
1844 static bool validate_add_rxsc(struct nlattr
**attrs
)
1846 if (!attrs
[MACSEC_RXSC_ATTR_SCI
])
1849 if (attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) {
1850 if (nla_get_u8(attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) > 1)
1857 static int macsec_add_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1859 struct net_device
*dev
;
1860 sci_t sci
= MACSEC_UNDEF_SCI
;
1861 struct nlattr
**attrs
= info
->attrs
;
1862 struct macsec_rx_sc
*rx_sc
;
1863 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1864 struct macsec_secy
*secy
;
1868 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1871 if (parse_rxsc_config(attrs
, tb_rxsc
))
1874 if (!validate_add_rxsc(tb_rxsc
))
1878 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1881 return PTR_ERR(dev
);
1884 secy
= &macsec_priv(dev
)->secy
;
1885 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1887 rx_sc
= create_rx_sc(dev
, sci
);
1888 if (IS_ERR(rx_sc
)) {
1890 return PTR_ERR(rx_sc
);
1893 was_active
= rx_sc
->active
;
1894 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
])
1895 rx_sc
->active
= !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
1897 if (macsec_is_offloaded(netdev_priv(dev
))) {
1898 const struct macsec_ops
*ops
;
1899 struct macsec_context ctx
;
1901 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
1910 ret
= macsec_offload(ops
->mdo_add_rxsc
, &ctx
);
1920 rx_sc
->active
= was_active
;
1925 static bool validate_add_txsa(struct nlattr
**attrs
)
1927 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1928 !attrs
[MACSEC_SA_ATTR_PN
] ||
1929 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1930 !attrs
[MACSEC_SA_ATTR_KEYID
])
1933 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1936 if (nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1939 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1940 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1944 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1950 static int macsec_add_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1952 struct net_device
*dev
;
1953 struct nlattr
**attrs
= info
->attrs
;
1954 struct macsec_secy
*secy
;
1955 struct macsec_tx_sc
*tx_sc
;
1956 struct macsec_tx_sa
*tx_sa
;
1957 unsigned char assoc_num
;
1959 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1960 bool was_operational
;
1963 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1966 if (parse_sa_config(attrs
, tb_sa
))
1969 if (!validate_add_txsa(tb_sa
))
1973 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1976 return PTR_ERR(dev
);
1979 secy
= &macsec_priv(dev
)->secy
;
1980 tx_sc
= &secy
->tx_sc
;
1982 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1984 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1985 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1986 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1991 pn_len
= secy
->xpn
? MACSEC_XPN_PN_LEN
: MACSEC_DEFAULT_PN_LEN
;
1992 if (nla_len(tb_sa
[MACSEC_SA_ATTR_PN
]) != pn_len
) {
1993 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
1994 nla_len(tb_sa
[MACSEC_SA_ATTR_PN
]), pn_len
);
2000 if (!tb_sa
[MACSEC_SA_ATTR_SSCI
] || !tb_sa
[MACSEC_SA_ATTR_SALT
]) {
2005 if (nla_len(tb_sa
[MACSEC_SA_ATTR_SALT
]) != MACSEC_SALT_LEN
) {
2006 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2007 nla_len(tb_sa
[MACSEC_SA_ATTR_SALT
]),
2008 MACSEC_SA_ATTR_SALT
);
2014 tx_sa
= rtnl_dereference(tx_sc
->sa
[assoc_num
]);
2020 tx_sa
= kmalloc(sizeof(*tx_sa
), GFP_KERNEL
);
2026 err
= init_tx_sa(tx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
2027 secy
->key_len
, secy
->icv_len
);
2034 spin_lock_bh(&tx_sa
->lock
);
2035 tx_sa
->next_pn
= nla_get_u64(tb_sa
[MACSEC_SA_ATTR_PN
]);
2036 spin_unlock_bh(&tx_sa
->lock
);
2038 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2039 tx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2041 was_operational
= secy
->operational
;
2042 if (assoc_num
== tx_sc
->encoding_sa
&& tx_sa
->active
)
2043 secy
->operational
= true;
2045 /* If h/w offloading is available, propagate to the device */
2046 if (macsec_is_offloaded(netdev_priv(dev
))) {
2047 const struct macsec_ops
*ops
;
2048 struct macsec_context ctx
;
2050 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2056 ctx
.sa
.assoc_num
= assoc_num
;
2057 ctx
.sa
.tx_sa
= tx_sa
;
2059 memcpy(ctx
.sa
.key
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
2062 err
= macsec_offload(ops
->mdo_add_txsa
, &ctx
);
2068 tx_sa
->ssci
= nla_get_ssci(tb_sa
[MACSEC_SA_ATTR_SSCI
]);
2069 nla_memcpy(tx_sa
->key
.salt
.bytes
, tb_sa
[MACSEC_SA_ATTR_SALT
],
2073 nla_memcpy(tx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
2074 rcu_assign_pointer(tx_sc
->sa
[assoc_num
], tx_sa
);
2081 secy
->operational
= was_operational
;
2087 static int macsec_del_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
2089 struct nlattr
**attrs
= info
->attrs
;
2090 struct net_device
*dev
;
2091 struct macsec_secy
*secy
;
2092 struct macsec_rx_sc
*rx_sc
;
2093 struct macsec_rx_sa
*rx_sa
;
2095 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2096 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2099 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2102 if (parse_sa_config(attrs
, tb_sa
))
2105 if (parse_rxsc_config(attrs
, tb_rxsc
))
2109 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
2110 &dev
, &secy
, &rx_sc
, &assoc_num
);
2111 if (IS_ERR(rx_sa
)) {
2113 return PTR_ERR(rx_sa
);
2116 if (rx_sa
->active
) {
2121 /* If h/w offloading is available, propagate to the device */
2122 if (macsec_is_offloaded(netdev_priv(dev
))) {
2123 const struct macsec_ops
*ops
;
2124 struct macsec_context ctx
;
2126 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2132 ctx
.sa
.assoc_num
= assoc_num
;
2133 ctx
.sa
.rx_sa
= rx_sa
;
2136 ret
= macsec_offload(ops
->mdo_del_rxsa
, &ctx
);
2141 RCU_INIT_POINTER(rx_sc
->sa
[assoc_num
], NULL
);
2153 static int macsec_del_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
2155 struct nlattr
**attrs
= info
->attrs
;
2156 struct net_device
*dev
;
2157 struct macsec_secy
*secy
;
2158 struct macsec_rx_sc
*rx_sc
;
2160 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2163 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2166 if (parse_rxsc_config(attrs
, tb_rxsc
))
2169 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
2173 dev
= get_dev_from_nl(genl_info_net(info
), info
->attrs
);
2176 return PTR_ERR(dev
);
2179 secy
= &macsec_priv(dev
)->secy
;
2180 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
2182 rx_sc
= del_rx_sc(secy
, sci
);
2188 /* If h/w offloading is available, propagate to the device */
2189 if (macsec_is_offloaded(netdev_priv(dev
))) {
2190 const struct macsec_ops
*ops
;
2191 struct macsec_context ctx
;
2193 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2201 ret
= macsec_offload(ops
->mdo_del_rxsc
, &ctx
);
2216 static int macsec_del_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2218 struct nlattr
**attrs
= info
->attrs
;
2219 struct net_device
*dev
;
2220 struct macsec_secy
*secy
;
2221 struct macsec_tx_sc
*tx_sc
;
2222 struct macsec_tx_sa
*tx_sa
;
2224 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2227 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2230 if (parse_sa_config(attrs
, tb_sa
))
2234 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2235 &dev
, &secy
, &tx_sc
, &assoc_num
);
2236 if (IS_ERR(tx_sa
)) {
2238 return PTR_ERR(tx_sa
);
2241 if (tx_sa
->active
) {
2246 /* If h/w offloading is available, propagate to the device */
2247 if (macsec_is_offloaded(netdev_priv(dev
))) {
2248 const struct macsec_ops
*ops
;
2249 struct macsec_context ctx
;
2251 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2257 ctx
.sa
.assoc_num
= assoc_num
;
2258 ctx
.sa
.tx_sa
= tx_sa
;
2261 ret
= macsec_offload(ops
->mdo_del_txsa
, &ctx
);
2266 RCU_INIT_POINTER(tx_sc
->sa
[assoc_num
], NULL
);
2278 static bool validate_upd_sa(struct nlattr
**attrs
)
2280 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
2281 attrs
[MACSEC_SA_ATTR_KEY
] ||
2282 attrs
[MACSEC_SA_ATTR_KEYID
] ||
2283 attrs
[MACSEC_SA_ATTR_SSCI
] ||
2284 attrs
[MACSEC_SA_ATTR_SALT
])
2287 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
2290 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
2293 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
2294 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
2301 static int macsec_upd_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2303 struct nlattr
**attrs
= info
->attrs
;
2304 struct net_device
*dev
;
2305 struct macsec_secy
*secy
;
2306 struct macsec_tx_sc
*tx_sc
;
2307 struct macsec_tx_sa
*tx_sa
;
2309 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2310 bool was_operational
, was_active
;
2316 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2319 if (parse_sa_config(attrs
, tb_sa
))
2322 if (!validate_upd_sa(tb_sa
))
2326 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2327 &dev
, &secy
, &tx_sc
, &assoc_num
);
2328 if (IS_ERR(tx_sa
)) {
2330 return PTR_ERR(tx_sa
);
2333 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2336 pn_len
= secy
->xpn
? MACSEC_XPN_PN_LEN
: MACSEC_DEFAULT_PN_LEN
;
2337 if (nla_len(tb_sa
[MACSEC_SA_ATTR_PN
]) != pn_len
) {
2338 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2339 nla_len(tb_sa
[MACSEC_SA_ATTR_PN
]), pn_len
);
2344 spin_lock_bh(&tx_sa
->lock
);
2345 prev_pn
= tx_sa
->next_pn_halves
;
2346 tx_sa
->next_pn
= nla_get_u64(tb_sa
[MACSEC_SA_ATTR_PN
]);
2347 spin_unlock_bh(&tx_sa
->lock
);
2350 was_active
= tx_sa
->active
;
2351 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2352 tx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2354 was_operational
= secy
->operational
;
2355 if (assoc_num
== tx_sc
->encoding_sa
)
2356 secy
->operational
= tx_sa
->active
;
2358 /* If h/w offloading is available, propagate to the device */
2359 if (macsec_is_offloaded(netdev_priv(dev
))) {
2360 const struct macsec_ops
*ops
;
2361 struct macsec_context ctx
;
2363 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2369 ctx
.sa
.assoc_num
= assoc_num
;
2370 ctx
.sa
.tx_sa
= tx_sa
;
2373 ret
= macsec_offload(ops
->mdo_upd_txsa
, &ctx
);
2383 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2384 spin_lock_bh(&tx_sa
->lock
);
2385 tx_sa
->next_pn_halves
= prev_pn
;
2386 spin_unlock_bh(&tx_sa
->lock
);
2388 tx_sa
->active
= was_active
;
2389 secy
->operational
= was_operational
;
2394 static int macsec_upd_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
2396 struct nlattr
**attrs
= info
->attrs
;
2397 struct net_device
*dev
;
2398 struct macsec_secy
*secy
;
2399 struct macsec_rx_sc
*rx_sc
;
2400 struct macsec_rx_sa
*rx_sa
;
2402 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2403 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2410 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2413 if (parse_rxsc_config(attrs
, tb_rxsc
))
2416 if (parse_sa_config(attrs
, tb_sa
))
2419 if (!validate_upd_sa(tb_sa
))
2423 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
2424 &dev
, &secy
, &rx_sc
, &assoc_num
);
2425 if (IS_ERR(rx_sa
)) {
2427 return PTR_ERR(rx_sa
);
2430 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2433 pn_len
= secy
->xpn
? MACSEC_XPN_PN_LEN
: MACSEC_DEFAULT_PN_LEN
;
2434 if (nla_len(tb_sa
[MACSEC_SA_ATTR_PN
]) != pn_len
) {
2435 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2436 nla_len(tb_sa
[MACSEC_SA_ATTR_PN
]), pn_len
);
2441 spin_lock_bh(&rx_sa
->lock
);
2442 prev_pn
= rx_sa
->next_pn_halves
;
2443 rx_sa
->next_pn
= nla_get_u64(tb_sa
[MACSEC_SA_ATTR_PN
]);
2444 spin_unlock_bh(&rx_sa
->lock
);
2447 was_active
= rx_sa
->active
;
2448 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2449 rx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2451 /* If h/w offloading is available, propagate to the device */
2452 if (macsec_is_offloaded(netdev_priv(dev
))) {
2453 const struct macsec_ops
*ops
;
2454 struct macsec_context ctx
;
2456 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2462 ctx
.sa
.assoc_num
= assoc_num
;
2463 ctx
.sa
.rx_sa
= rx_sa
;
2466 ret
= macsec_offload(ops
->mdo_upd_rxsa
, &ctx
);
2475 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2476 spin_lock_bh(&rx_sa
->lock
);
2477 rx_sa
->next_pn_halves
= prev_pn
;
2478 spin_unlock_bh(&rx_sa
->lock
);
2480 rx_sa
->active
= was_active
;
2485 static int macsec_upd_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
2487 struct nlattr
**attrs
= info
->attrs
;
2488 struct net_device
*dev
;
2489 struct macsec_secy
*secy
;
2490 struct macsec_rx_sc
*rx_sc
;
2491 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2492 unsigned int prev_n_rx_sc
;
2496 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2499 if (parse_rxsc_config(attrs
, tb_rxsc
))
2502 if (!validate_add_rxsc(tb_rxsc
))
2506 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
2507 if (IS_ERR(rx_sc
)) {
2509 return PTR_ERR(rx_sc
);
2512 was_active
= rx_sc
->active
;
2513 prev_n_rx_sc
= secy
->n_rx_sc
;
2514 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]) {
2515 bool new = !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
2517 if (rx_sc
->active
!= new)
2518 secy
->n_rx_sc
+= new ? 1 : -1;
2520 rx_sc
->active
= new;
2523 /* If h/w offloading is available, propagate to the device */
2524 if (macsec_is_offloaded(netdev_priv(dev
))) {
2525 const struct macsec_ops
*ops
;
2526 struct macsec_context ctx
;
2528 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
2537 ret
= macsec_offload(ops
->mdo_upd_rxsc
, &ctx
);
2547 secy
->n_rx_sc
= prev_n_rx_sc
;
2548 rx_sc
->active
= was_active
;
2553 static bool macsec_is_configured(struct macsec_dev
*macsec
)
2555 struct macsec_secy
*secy
= &macsec
->secy
;
2556 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2559 if (secy
->n_rx_sc
> 0)
2562 for (i
= 0; i
< MACSEC_NUM_AN
; i
++)
2569 static int macsec_upd_offload(struct sk_buff
*skb
, struct genl_info
*info
)
2571 struct nlattr
*tb_offload
[MACSEC_OFFLOAD_ATTR_MAX
+ 1];
2572 enum macsec_offload offload
, prev_offload
;
2573 int (*func
)(struct macsec_context
*ctx
);
2574 struct nlattr
**attrs
= info
->attrs
;
2575 struct net_device
*dev
;
2576 const struct macsec_ops
*ops
;
2577 struct macsec_context ctx
;
2578 struct macsec_dev
*macsec
;
2581 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2584 if (!attrs
[MACSEC_ATTR_OFFLOAD
])
2587 if (nla_parse_nested_deprecated(tb_offload
, MACSEC_OFFLOAD_ATTR_MAX
,
2588 attrs
[MACSEC_ATTR_OFFLOAD
],
2589 macsec_genl_offload_policy
, NULL
))
2592 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
2594 return PTR_ERR(dev
);
2595 macsec
= macsec_priv(dev
);
2597 if (!tb_offload
[MACSEC_OFFLOAD_ATTR_TYPE
])
2600 offload
= nla_get_u8(tb_offload
[MACSEC_OFFLOAD_ATTR_TYPE
]);
2601 if (macsec
->offload
== offload
)
2604 /* Check if the offloading mode is supported by the underlying layers */
2605 if (offload
!= MACSEC_OFFLOAD_OFF
&&
2606 !macsec_check_offload(offload
, macsec
))
2609 /* Check if the net device is busy. */
2610 if (netif_running(dev
))
2615 prev_offload
= macsec
->offload
;
2616 macsec
->offload
= offload
;
2618 /* Check if the device already has rules configured: we do not support
2621 if (macsec_is_configured(macsec
)) {
2626 ops
= __macsec_get_ops(offload
== MACSEC_OFFLOAD_OFF
? prev_offload
: offload
,
2633 if (prev_offload
== MACSEC_OFFLOAD_OFF
)
2634 func
= ops
->mdo_add_secy
;
2636 func
= ops
->mdo_del_secy
;
2638 ctx
.secy
= &macsec
->secy
;
2639 ret
= macsec_offload(func
, &ctx
);
2644 /* Force features update, since they are different for SW MACSec and
2645 * HW offloading cases.
2647 netdev_update_features(dev
);
2651 macsec
->offload
= prev_offload
;
2657 static void get_tx_sa_stats(struct net_device
*dev
, int an
,
2658 struct macsec_tx_sa
*tx_sa
,
2659 struct macsec_tx_sa_stats
*sum
)
2661 struct macsec_dev
*macsec
= macsec_priv(dev
);
2664 /* If h/w offloading is available, propagate to the device */
2665 if (macsec_is_offloaded(macsec
)) {
2666 const struct macsec_ops
*ops
;
2667 struct macsec_context ctx
;
2669 ops
= macsec_get_ops(macsec
, &ctx
);
2671 ctx
.sa
.assoc_num
= an
;
2672 ctx
.sa
.tx_sa
= tx_sa
;
2673 ctx
.stats
.tx_sa_stats
= sum
;
2674 ctx
.secy
= &macsec_priv(dev
)->secy
;
2675 macsec_offload(ops
->mdo_get_tx_sa_stats
, &ctx
);
2680 for_each_possible_cpu(cpu
) {
2681 const struct macsec_tx_sa_stats
*stats
=
2682 per_cpu_ptr(tx_sa
->stats
, cpu
);
2684 sum
->OutPktsProtected
+= stats
->OutPktsProtected
;
2685 sum
->OutPktsEncrypted
+= stats
->OutPktsEncrypted
;
2689 static int copy_tx_sa_stats(struct sk_buff
*skb
, struct macsec_tx_sa_stats
*sum
)
2691 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED
,
2692 sum
->OutPktsProtected
) ||
2693 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED
,
2694 sum
->OutPktsEncrypted
))
2700 static void get_rx_sa_stats(struct net_device
*dev
,
2701 struct macsec_rx_sc
*rx_sc
, int an
,
2702 struct macsec_rx_sa
*rx_sa
,
2703 struct macsec_rx_sa_stats
*sum
)
2705 struct macsec_dev
*macsec
= macsec_priv(dev
);
2708 /* If h/w offloading is available, propagate to the device */
2709 if (macsec_is_offloaded(macsec
)) {
2710 const struct macsec_ops
*ops
;
2711 struct macsec_context ctx
;
2713 ops
= macsec_get_ops(macsec
, &ctx
);
2715 ctx
.sa
.assoc_num
= an
;
2716 ctx
.sa
.rx_sa
= rx_sa
;
2717 ctx
.stats
.rx_sa_stats
= sum
;
2718 ctx
.secy
= &macsec_priv(dev
)->secy
;
2720 macsec_offload(ops
->mdo_get_rx_sa_stats
, &ctx
);
2725 for_each_possible_cpu(cpu
) {
2726 const struct macsec_rx_sa_stats
*stats
=
2727 per_cpu_ptr(rx_sa
->stats
, cpu
);
2729 sum
->InPktsOK
+= stats
->InPktsOK
;
2730 sum
->InPktsInvalid
+= stats
->InPktsInvalid
;
2731 sum
->InPktsNotValid
+= stats
->InPktsNotValid
;
2732 sum
->InPktsNotUsingSA
+= stats
->InPktsNotUsingSA
;
2733 sum
->InPktsUnusedSA
+= stats
->InPktsUnusedSA
;
2737 static int copy_rx_sa_stats(struct sk_buff
*skb
,
2738 struct macsec_rx_sa_stats
*sum
)
2740 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_OK
, sum
->InPktsOK
) ||
2741 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID
,
2742 sum
->InPktsInvalid
) ||
2743 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID
,
2744 sum
->InPktsNotValid
) ||
2745 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA
,
2746 sum
->InPktsNotUsingSA
) ||
2747 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA
,
2748 sum
->InPktsUnusedSA
))
2754 static void get_rx_sc_stats(struct net_device
*dev
,
2755 struct macsec_rx_sc
*rx_sc
,
2756 struct macsec_rx_sc_stats
*sum
)
2758 struct macsec_dev
*macsec
= macsec_priv(dev
);
2761 /* If h/w offloading is available, propagate to the device */
2762 if (macsec_is_offloaded(macsec
)) {
2763 const struct macsec_ops
*ops
;
2764 struct macsec_context ctx
;
2766 ops
= macsec_get_ops(macsec
, &ctx
);
2768 ctx
.stats
.rx_sc_stats
= sum
;
2769 ctx
.secy
= &macsec_priv(dev
)->secy
;
2771 macsec_offload(ops
->mdo_get_rx_sc_stats
, &ctx
);
2776 for_each_possible_cpu(cpu
) {
2777 const struct pcpu_rx_sc_stats
*stats
;
2778 struct macsec_rx_sc_stats tmp
;
2781 stats
= per_cpu_ptr(rx_sc
->stats
, cpu
);
2783 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2784 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2785 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2787 sum
->InOctetsValidated
+= tmp
.InOctetsValidated
;
2788 sum
->InOctetsDecrypted
+= tmp
.InOctetsDecrypted
;
2789 sum
->InPktsUnchecked
+= tmp
.InPktsUnchecked
;
2790 sum
->InPktsDelayed
+= tmp
.InPktsDelayed
;
2791 sum
->InPktsOK
+= tmp
.InPktsOK
;
2792 sum
->InPktsInvalid
+= tmp
.InPktsInvalid
;
2793 sum
->InPktsLate
+= tmp
.InPktsLate
;
2794 sum
->InPktsNotValid
+= tmp
.InPktsNotValid
;
2795 sum
->InPktsNotUsingSA
+= tmp
.InPktsNotUsingSA
;
2796 sum
->InPktsUnusedSA
+= tmp
.InPktsUnusedSA
;
2800 static int copy_rx_sc_stats(struct sk_buff
*skb
, struct macsec_rx_sc_stats
*sum
)
2802 if (nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED
,
2803 sum
->InOctetsValidated
,
2804 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2805 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED
,
2806 sum
->InOctetsDecrypted
,
2807 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2808 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED
,
2809 sum
->InPktsUnchecked
,
2810 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2811 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED
,
2813 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2814 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK
,
2816 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2817 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID
,
2819 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2820 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE
,
2822 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2823 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID
,
2824 sum
->InPktsNotValid
,
2825 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2826 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA
,
2827 sum
->InPktsNotUsingSA
,
2828 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2829 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA
,
2830 sum
->InPktsUnusedSA
,
2831 MACSEC_RXSC_STATS_ATTR_PAD
))
2837 static void get_tx_sc_stats(struct net_device
*dev
,
2838 struct macsec_tx_sc_stats
*sum
)
2840 struct macsec_dev
*macsec
= macsec_priv(dev
);
2843 /* If h/w offloading is available, propagate to the device */
2844 if (macsec_is_offloaded(macsec
)) {
2845 const struct macsec_ops
*ops
;
2846 struct macsec_context ctx
;
2848 ops
= macsec_get_ops(macsec
, &ctx
);
2850 ctx
.stats
.tx_sc_stats
= sum
;
2851 ctx
.secy
= &macsec_priv(dev
)->secy
;
2852 macsec_offload(ops
->mdo_get_tx_sc_stats
, &ctx
);
2857 for_each_possible_cpu(cpu
) {
2858 const struct pcpu_tx_sc_stats
*stats
;
2859 struct macsec_tx_sc_stats tmp
;
2862 stats
= per_cpu_ptr(macsec_priv(dev
)->secy
.tx_sc
.stats
, cpu
);
2864 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2865 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2866 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2868 sum
->OutPktsProtected
+= tmp
.OutPktsProtected
;
2869 sum
->OutPktsEncrypted
+= tmp
.OutPktsEncrypted
;
2870 sum
->OutOctetsProtected
+= tmp
.OutOctetsProtected
;
2871 sum
->OutOctetsEncrypted
+= tmp
.OutOctetsEncrypted
;
2875 static int copy_tx_sc_stats(struct sk_buff
*skb
, struct macsec_tx_sc_stats
*sum
)
2877 if (nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED
,
2878 sum
->OutPktsProtected
,
2879 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2880 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED
,
2881 sum
->OutPktsEncrypted
,
2882 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2883 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED
,
2884 sum
->OutOctetsProtected
,
2885 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2886 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED
,
2887 sum
->OutOctetsEncrypted
,
2888 MACSEC_TXSC_STATS_ATTR_PAD
))
2894 static void get_secy_stats(struct net_device
*dev
, struct macsec_dev_stats
*sum
)
2896 struct macsec_dev
*macsec
= macsec_priv(dev
);
2899 /* If h/w offloading is available, propagate to the device */
2900 if (macsec_is_offloaded(macsec
)) {
2901 const struct macsec_ops
*ops
;
2902 struct macsec_context ctx
;
2904 ops
= macsec_get_ops(macsec
, &ctx
);
2906 ctx
.stats
.dev_stats
= sum
;
2907 ctx
.secy
= &macsec_priv(dev
)->secy
;
2908 macsec_offload(ops
->mdo_get_dev_stats
, &ctx
);
2913 for_each_possible_cpu(cpu
) {
2914 const struct pcpu_secy_stats
*stats
;
2915 struct macsec_dev_stats tmp
;
2918 stats
= per_cpu_ptr(macsec_priv(dev
)->stats
, cpu
);
2920 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2921 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2922 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2924 sum
->OutPktsUntagged
+= tmp
.OutPktsUntagged
;
2925 sum
->InPktsUntagged
+= tmp
.InPktsUntagged
;
2926 sum
->OutPktsTooLong
+= tmp
.OutPktsTooLong
;
2927 sum
->InPktsNoTag
+= tmp
.InPktsNoTag
;
2928 sum
->InPktsBadTag
+= tmp
.InPktsBadTag
;
2929 sum
->InPktsUnknownSCI
+= tmp
.InPktsUnknownSCI
;
2930 sum
->InPktsNoSCI
+= tmp
.InPktsNoSCI
;
2931 sum
->InPktsOverrun
+= tmp
.InPktsOverrun
;
2935 static int copy_secy_stats(struct sk_buff
*skb
, struct macsec_dev_stats
*sum
)
2937 if (nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED
,
2938 sum
->OutPktsUntagged
,
2939 MACSEC_SECY_STATS_ATTR_PAD
) ||
2940 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED
,
2941 sum
->InPktsUntagged
,
2942 MACSEC_SECY_STATS_ATTR_PAD
) ||
2943 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG
,
2944 sum
->OutPktsTooLong
,
2945 MACSEC_SECY_STATS_ATTR_PAD
) ||
2946 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG
,
2948 MACSEC_SECY_STATS_ATTR_PAD
) ||
2949 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG
,
2951 MACSEC_SECY_STATS_ATTR_PAD
) ||
2952 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI
,
2953 sum
->InPktsUnknownSCI
,
2954 MACSEC_SECY_STATS_ATTR_PAD
) ||
2955 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI
,
2957 MACSEC_SECY_STATS_ATTR_PAD
) ||
2958 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN
,
2960 MACSEC_SECY_STATS_ATTR_PAD
))
2966 static int nla_put_secy(struct macsec_secy
*secy
, struct sk_buff
*skb
)
2968 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2969 struct nlattr
*secy_nest
= nla_nest_start_noflag(skb
,
2976 switch (secy
->key_len
) {
2977 case MACSEC_GCM_AES_128_SAK_LEN
:
2978 csid
= secy
->xpn
? MACSEC_CIPHER_ID_GCM_AES_XPN_128
: MACSEC_DEFAULT_CIPHER_ID
;
2980 case MACSEC_GCM_AES_256_SAK_LEN
:
2981 csid
= secy
->xpn
? MACSEC_CIPHER_ID_GCM_AES_XPN_256
: MACSEC_CIPHER_ID_GCM_AES_256
;
2987 if (nla_put_sci(skb
, MACSEC_SECY_ATTR_SCI
, secy
->sci
,
2988 MACSEC_SECY_ATTR_PAD
) ||
2989 nla_put_u64_64bit(skb
, MACSEC_SECY_ATTR_CIPHER_SUITE
,
2990 csid
, MACSEC_SECY_ATTR_PAD
) ||
2991 nla_put_u8(skb
, MACSEC_SECY_ATTR_ICV_LEN
, secy
->icv_len
) ||
2992 nla_put_u8(skb
, MACSEC_SECY_ATTR_OPER
, secy
->operational
) ||
2993 nla_put_u8(skb
, MACSEC_SECY_ATTR_PROTECT
, secy
->protect_frames
) ||
2994 nla_put_u8(skb
, MACSEC_SECY_ATTR_REPLAY
, secy
->replay_protect
) ||
2995 nla_put_u8(skb
, MACSEC_SECY_ATTR_VALIDATE
, secy
->validate_frames
) ||
2996 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCRYPT
, tx_sc
->encrypt
) ||
2997 nla_put_u8(skb
, MACSEC_SECY_ATTR_INC_SCI
, tx_sc
->send_sci
) ||
2998 nla_put_u8(skb
, MACSEC_SECY_ATTR_ES
, tx_sc
->end_station
) ||
2999 nla_put_u8(skb
, MACSEC_SECY_ATTR_SCB
, tx_sc
->scb
) ||
3000 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCODING_SA
, tx_sc
->encoding_sa
))
3003 if (secy
->replay_protect
) {
3004 if (nla_put_u32(skb
, MACSEC_SECY_ATTR_WINDOW
, secy
->replay_window
))
3008 nla_nest_end(skb
, secy_nest
);
3012 nla_nest_cancel(skb
, secy_nest
);
3016 static noinline_for_stack
int
3017 dump_secy(struct macsec_secy
*secy
, struct net_device
*dev
,
3018 struct sk_buff
*skb
, struct netlink_callback
*cb
)
3020 struct macsec_tx_sc_stats tx_sc_stats
= {0, };
3021 struct macsec_tx_sa_stats tx_sa_stats
= {0, };
3022 struct macsec_rx_sc_stats rx_sc_stats
= {0, };
3023 struct macsec_rx_sa_stats rx_sa_stats
= {0, };
3024 struct macsec_dev
*macsec
= netdev_priv(dev
);
3025 struct macsec_dev_stats dev_stats
= {0, };
3026 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
3027 struct nlattr
*txsa_list
, *rxsc_list
;
3028 struct macsec_rx_sc
*rx_sc
;
3029 struct nlattr
*attr
;
3033 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
3034 &macsec_fam
, NLM_F_MULTI
, MACSEC_CMD_GET_TXSC
);
3038 genl_dump_check_consistent(cb
, hdr
);
3040 if (nla_put_u32(skb
, MACSEC_ATTR_IFINDEX
, dev
->ifindex
))
3041 goto nla_put_failure
;
3043 attr
= nla_nest_start_noflag(skb
, MACSEC_ATTR_OFFLOAD
);
3045 goto nla_put_failure
;
3046 if (nla_put_u8(skb
, MACSEC_OFFLOAD_ATTR_TYPE
, macsec
->offload
))
3047 goto nla_put_failure
;
3048 nla_nest_end(skb
, attr
);
3050 if (nla_put_secy(secy
, skb
))
3051 goto nla_put_failure
;
3053 attr
= nla_nest_start_noflag(skb
, MACSEC_ATTR_TXSC_STATS
);
3055 goto nla_put_failure
;
3057 get_tx_sc_stats(dev
, &tx_sc_stats
);
3058 if (copy_tx_sc_stats(skb
, &tx_sc_stats
)) {
3059 nla_nest_cancel(skb
, attr
);
3060 goto nla_put_failure
;
3062 nla_nest_end(skb
, attr
);
3064 attr
= nla_nest_start_noflag(skb
, MACSEC_ATTR_SECY_STATS
);
3066 goto nla_put_failure
;
3067 get_secy_stats(dev
, &dev_stats
);
3068 if (copy_secy_stats(skb
, &dev_stats
)) {
3069 nla_nest_cancel(skb
, attr
);
3070 goto nla_put_failure
;
3072 nla_nest_end(skb
, attr
);
3074 txsa_list
= nla_nest_start_noflag(skb
, MACSEC_ATTR_TXSA_LIST
);
3076 goto nla_put_failure
;
3077 for (i
= 0, j
= 1; i
< MACSEC_NUM_AN
; i
++) {
3078 struct macsec_tx_sa
*tx_sa
= rtnl_dereference(tx_sc
->sa
[i
]);
3079 struct nlattr
*txsa_nest
;
3086 txsa_nest
= nla_nest_start_noflag(skb
, j
++);
3088 nla_nest_cancel(skb
, txsa_list
);
3089 goto nla_put_failure
;
3092 attr
= nla_nest_start_noflag(skb
, MACSEC_SA_ATTR_STATS
);
3094 nla_nest_cancel(skb
, txsa_nest
);
3095 nla_nest_cancel(skb
, txsa_list
);
3096 goto nla_put_failure
;
3098 memset(&tx_sa_stats
, 0, sizeof(tx_sa_stats
));
3099 get_tx_sa_stats(dev
, i
, tx_sa
, &tx_sa_stats
);
3100 if (copy_tx_sa_stats(skb
, &tx_sa_stats
)) {
3101 nla_nest_cancel(skb
, attr
);
3102 nla_nest_cancel(skb
, txsa_nest
);
3103 nla_nest_cancel(skb
, txsa_list
);
3104 goto nla_put_failure
;
3106 nla_nest_end(skb
, attr
);
3109 pn
= tx_sa
->next_pn
;
3110 pn_len
= MACSEC_XPN_PN_LEN
;
3112 pn
= tx_sa
->next_pn_halves
.lower
;
3113 pn_len
= MACSEC_DEFAULT_PN_LEN
;
3116 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
3117 nla_put(skb
, MACSEC_SA_ATTR_PN
, pn_len
, &pn
) ||
3118 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, tx_sa
->key
.id
) ||
3119 (secy
->xpn
&& nla_put_ssci(skb
, MACSEC_SA_ATTR_SSCI
, tx_sa
->ssci
)) ||
3120 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, tx_sa
->active
)) {
3121 nla_nest_cancel(skb
, txsa_nest
);
3122 nla_nest_cancel(skb
, txsa_list
);
3123 goto nla_put_failure
;
3126 nla_nest_end(skb
, txsa_nest
);
3128 nla_nest_end(skb
, txsa_list
);
3130 rxsc_list
= nla_nest_start_noflag(skb
, MACSEC_ATTR_RXSC_LIST
);
3132 goto nla_put_failure
;
3135 for_each_rxsc_rtnl(secy
, rx_sc
) {
3137 struct nlattr
*rxsa_list
;
3138 struct nlattr
*rxsc_nest
= nla_nest_start_noflag(skb
, j
++);
3141 nla_nest_cancel(skb
, rxsc_list
);
3142 goto nla_put_failure
;
3145 if (nla_put_u8(skb
, MACSEC_RXSC_ATTR_ACTIVE
, rx_sc
->active
) ||
3146 nla_put_sci(skb
, MACSEC_RXSC_ATTR_SCI
, rx_sc
->sci
,
3147 MACSEC_RXSC_ATTR_PAD
)) {
3148 nla_nest_cancel(skb
, rxsc_nest
);
3149 nla_nest_cancel(skb
, rxsc_list
);
3150 goto nla_put_failure
;
3153 attr
= nla_nest_start_noflag(skb
, MACSEC_RXSC_ATTR_STATS
);
3155 nla_nest_cancel(skb
, rxsc_nest
);
3156 nla_nest_cancel(skb
, rxsc_list
);
3157 goto nla_put_failure
;
3159 memset(&rx_sc_stats
, 0, sizeof(rx_sc_stats
));
3160 get_rx_sc_stats(dev
, rx_sc
, &rx_sc_stats
);
3161 if (copy_rx_sc_stats(skb
, &rx_sc_stats
)) {
3162 nla_nest_cancel(skb
, attr
);
3163 nla_nest_cancel(skb
, rxsc_nest
);
3164 nla_nest_cancel(skb
, rxsc_list
);
3165 goto nla_put_failure
;
3167 nla_nest_end(skb
, attr
);
3169 rxsa_list
= nla_nest_start_noflag(skb
,
3170 MACSEC_RXSC_ATTR_SA_LIST
);
3172 nla_nest_cancel(skb
, rxsc_nest
);
3173 nla_nest_cancel(skb
, rxsc_list
);
3174 goto nla_put_failure
;
3177 for (i
= 0, k
= 1; i
< MACSEC_NUM_AN
; i
++) {
3178 struct macsec_rx_sa
*rx_sa
= rtnl_dereference(rx_sc
->sa
[i
]);
3179 struct nlattr
*rxsa_nest
;
3186 rxsa_nest
= nla_nest_start_noflag(skb
, k
++);
3188 nla_nest_cancel(skb
, rxsa_list
);
3189 nla_nest_cancel(skb
, rxsc_nest
);
3190 nla_nest_cancel(skb
, rxsc_list
);
3191 goto nla_put_failure
;
3194 attr
= nla_nest_start_noflag(skb
,
3195 MACSEC_SA_ATTR_STATS
);
3197 nla_nest_cancel(skb
, rxsa_list
);
3198 nla_nest_cancel(skb
, rxsc_nest
);
3199 nla_nest_cancel(skb
, rxsc_list
);
3200 goto nla_put_failure
;
3202 memset(&rx_sa_stats
, 0, sizeof(rx_sa_stats
));
3203 get_rx_sa_stats(dev
, rx_sc
, i
, rx_sa
, &rx_sa_stats
);
3204 if (copy_rx_sa_stats(skb
, &rx_sa_stats
)) {
3205 nla_nest_cancel(skb
, attr
);
3206 nla_nest_cancel(skb
, rxsa_list
);
3207 nla_nest_cancel(skb
, rxsc_nest
);
3208 nla_nest_cancel(skb
, rxsc_list
);
3209 goto nla_put_failure
;
3211 nla_nest_end(skb
, attr
);
3214 pn
= rx_sa
->next_pn
;
3215 pn_len
= MACSEC_XPN_PN_LEN
;
3217 pn
= rx_sa
->next_pn_halves
.lower
;
3218 pn_len
= MACSEC_DEFAULT_PN_LEN
;
3221 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
3222 nla_put(skb
, MACSEC_SA_ATTR_PN
, pn_len
, &pn
) ||
3223 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, rx_sa
->key
.id
) ||
3224 (secy
->xpn
&& nla_put_ssci(skb
, MACSEC_SA_ATTR_SSCI
, rx_sa
->ssci
)) ||
3225 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, rx_sa
->active
)) {
3226 nla_nest_cancel(skb
, rxsa_nest
);
3227 nla_nest_cancel(skb
, rxsc_nest
);
3228 nla_nest_cancel(skb
, rxsc_list
);
3229 goto nla_put_failure
;
3231 nla_nest_end(skb
, rxsa_nest
);
3234 nla_nest_end(skb
, rxsa_list
);
3235 nla_nest_end(skb
, rxsc_nest
);
3238 nla_nest_end(skb
, rxsc_list
);
3240 genlmsg_end(skb
, hdr
);
3245 genlmsg_cancel(skb
, hdr
);
3249 static int macsec_generation
= 1; /* protected by RTNL */
3251 static int macsec_dump_txsc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3253 struct net
*net
= sock_net(skb
->sk
);
3254 struct net_device
*dev
;
3257 dev_idx
= cb
->args
[0];
3262 cb
->seq
= macsec_generation
;
3264 for_each_netdev(net
, dev
) {
3265 struct macsec_secy
*secy
;
3270 if (!netif_is_macsec(dev
))
3273 secy
= &macsec_priv(dev
)->secy
;
3274 if (dump_secy(secy
, dev
, skb
, cb
) < 0)
3286 static const struct genl_ops macsec_genl_ops
[] = {
3288 .cmd
= MACSEC_CMD_GET_TXSC
,
3289 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3290 .dumpit
= macsec_dump_txsc
,
3293 .cmd
= MACSEC_CMD_ADD_RXSC
,
3294 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3295 .doit
= macsec_add_rxsc
,
3296 .flags
= GENL_ADMIN_PERM
,
3299 .cmd
= MACSEC_CMD_DEL_RXSC
,
3300 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3301 .doit
= macsec_del_rxsc
,
3302 .flags
= GENL_ADMIN_PERM
,
3305 .cmd
= MACSEC_CMD_UPD_RXSC
,
3306 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3307 .doit
= macsec_upd_rxsc
,
3308 .flags
= GENL_ADMIN_PERM
,
3311 .cmd
= MACSEC_CMD_ADD_TXSA
,
3312 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3313 .doit
= macsec_add_txsa
,
3314 .flags
= GENL_ADMIN_PERM
,
3317 .cmd
= MACSEC_CMD_DEL_TXSA
,
3318 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3319 .doit
= macsec_del_txsa
,
3320 .flags
= GENL_ADMIN_PERM
,
3323 .cmd
= MACSEC_CMD_UPD_TXSA
,
3324 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3325 .doit
= macsec_upd_txsa
,
3326 .flags
= GENL_ADMIN_PERM
,
3329 .cmd
= MACSEC_CMD_ADD_RXSA
,
3330 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3331 .doit
= macsec_add_rxsa
,
3332 .flags
= GENL_ADMIN_PERM
,
3335 .cmd
= MACSEC_CMD_DEL_RXSA
,
3336 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3337 .doit
= macsec_del_rxsa
,
3338 .flags
= GENL_ADMIN_PERM
,
3341 .cmd
= MACSEC_CMD_UPD_RXSA
,
3342 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3343 .doit
= macsec_upd_rxsa
,
3344 .flags
= GENL_ADMIN_PERM
,
3347 .cmd
= MACSEC_CMD_UPD_OFFLOAD
,
3348 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
3349 .doit
= macsec_upd_offload
,
3350 .flags
= GENL_ADMIN_PERM
,
3354 static struct genl_family macsec_fam __ro_after_init
= {
3355 .name
= MACSEC_GENL_NAME
,
3357 .version
= MACSEC_GENL_VERSION
,
3358 .maxattr
= MACSEC_ATTR_MAX
,
3359 .policy
= macsec_genl_policy
,
3361 .module
= THIS_MODULE
,
3362 .ops
= macsec_genl_ops
,
3363 .n_ops
= ARRAY_SIZE(macsec_genl_ops
),
3366 static netdev_tx_t
macsec_start_xmit(struct sk_buff
*skb
,
3367 struct net_device
*dev
)
3369 struct macsec_dev
*macsec
= netdev_priv(dev
);
3370 struct macsec_secy
*secy
= &macsec
->secy
;
3371 struct pcpu_secy_stats
*secy_stats
;
3374 if (macsec_is_offloaded(netdev_priv(dev
))) {
3375 skb
->dev
= macsec
->real_dev
;
3376 return dev_queue_xmit(skb
);
3380 if (!secy
->protect_frames
) {
3381 secy_stats
= this_cpu_ptr(macsec
->stats
);
3382 u64_stats_update_begin(&secy_stats
->syncp
);
3383 secy_stats
->stats
.OutPktsUntagged
++;
3384 u64_stats_update_end(&secy_stats
->syncp
);
3385 skb
->dev
= macsec
->real_dev
;
3387 ret
= dev_queue_xmit(skb
);
3388 count_tx(dev
, ret
, len
);
3392 if (!secy
->operational
) {
3394 dev
->stats
.tx_dropped
++;
3395 return NETDEV_TX_OK
;
3398 skb
= macsec_encrypt(skb
, dev
);
3400 if (PTR_ERR(skb
) != -EINPROGRESS
)
3401 dev
->stats
.tx_dropped
++;
3402 return NETDEV_TX_OK
;
3405 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
3407 macsec_encrypt_finish(skb
, dev
);
3409 ret
= dev_queue_xmit(skb
);
3410 count_tx(dev
, ret
, len
);
3414 #define SW_MACSEC_FEATURES \
3415 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3417 /* If h/w offloading is enabled, use real device features save for
3418 * VLAN_FEATURES - they require additional ops
3419 * HW_MACSEC - no reason to report it
3421 #define REAL_DEV_FEATURES(dev) \
3422 ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
3424 static int macsec_dev_init(struct net_device
*dev
)
3426 struct macsec_dev
*macsec
= macsec_priv(dev
);
3427 struct net_device
*real_dev
= macsec
->real_dev
;
3430 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
3434 err
= gro_cells_init(&macsec
->gro_cells
, dev
);
3436 free_percpu(dev
->tstats
);
3440 if (macsec_is_offloaded(macsec
)) {
3441 dev
->features
= REAL_DEV_FEATURES(real_dev
);
3443 dev
->features
= real_dev
->features
& SW_MACSEC_FEATURES
;
3444 dev
->features
|= NETIF_F_LLTX
| NETIF_F_GSO_SOFTWARE
;
3447 dev
->needed_headroom
= real_dev
->needed_headroom
+
3448 MACSEC_NEEDED_HEADROOM
;
3449 dev
->needed_tailroom
= real_dev
->needed_tailroom
+
3450 MACSEC_NEEDED_TAILROOM
;
3452 if (is_zero_ether_addr(dev
->dev_addr
))
3453 eth_hw_addr_inherit(dev
, real_dev
);
3454 if (is_zero_ether_addr(dev
->broadcast
))
3455 memcpy(dev
->broadcast
, real_dev
->broadcast
, dev
->addr_len
);
3460 static void macsec_dev_uninit(struct net_device
*dev
)
3462 struct macsec_dev
*macsec
= macsec_priv(dev
);
3464 gro_cells_destroy(&macsec
->gro_cells
);
3465 free_percpu(dev
->tstats
);
3468 static netdev_features_t
macsec_fix_features(struct net_device
*dev
,
3469 netdev_features_t features
)
3471 struct macsec_dev
*macsec
= macsec_priv(dev
);
3472 struct net_device
*real_dev
= macsec
->real_dev
;
3474 if (macsec_is_offloaded(macsec
))
3475 return REAL_DEV_FEATURES(real_dev
);
3477 features
&= (real_dev
->features
& SW_MACSEC_FEATURES
) |
3478 NETIF_F_GSO_SOFTWARE
| NETIF_F_SOFT_FEATURES
;
3479 features
|= NETIF_F_LLTX
;
3484 static int macsec_dev_open(struct net_device
*dev
)
3486 struct macsec_dev
*macsec
= macsec_priv(dev
);
3487 struct net_device
*real_dev
= macsec
->real_dev
;
3490 err
= dev_uc_add(real_dev
, dev
->dev_addr
);
3494 if (dev
->flags
& IFF_ALLMULTI
) {
3495 err
= dev_set_allmulti(real_dev
, 1);
3500 if (dev
->flags
& IFF_PROMISC
) {
3501 err
= dev_set_promiscuity(real_dev
, 1);
3503 goto clear_allmulti
;
3506 /* If h/w offloading is available, propagate to the device */
3507 if (macsec_is_offloaded(macsec
)) {
3508 const struct macsec_ops
*ops
;
3509 struct macsec_context ctx
;
3511 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
3514 goto clear_allmulti
;
3517 ctx
.secy
= &macsec
->secy
;
3518 err
= macsec_offload(ops
->mdo_dev_open
, &ctx
);
3520 goto clear_allmulti
;
3523 if (netif_carrier_ok(real_dev
))
3524 netif_carrier_on(dev
);
3528 if (dev
->flags
& IFF_ALLMULTI
)
3529 dev_set_allmulti(real_dev
, -1);
3531 dev_uc_del(real_dev
, dev
->dev_addr
);
3532 netif_carrier_off(dev
);
3536 static int macsec_dev_stop(struct net_device
*dev
)
3538 struct macsec_dev
*macsec
= macsec_priv(dev
);
3539 struct net_device
*real_dev
= macsec
->real_dev
;
3541 netif_carrier_off(dev
);
3543 /* If h/w offloading is available, propagate to the device */
3544 if (macsec_is_offloaded(macsec
)) {
3545 const struct macsec_ops
*ops
;
3546 struct macsec_context ctx
;
3548 ops
= macsec_get_ops(macsec
, &ctx
);
3550 ctx
.secy
= &macsec
->secy
;
3551 macsec_offload(ops
->mdo_dev_stop
, &ctx
);
3555 dev_mc_unsync(real_dev
, dev
);
3556 dev_uc_unsync(real_dev
, dev
);
3558 if (dev
->flags
& IFF_ALLMULTI
)
3559 dev_set_allmulti(real_dev
, -1);
3561 if (dev
->flags
& IFF_PROMISC
)
3562 dev_set_promiscuity(real_dev
, -1);
3564 dev_uc_del(real_dev
, dev
->dev_addr
);
3569 static void macsec_dev_change_rx_flags(struct net_device
*dev
, int change
)
3571 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
3573 if (!(dev
->flags
& IFF_UP
))
3576 if (change
& IFF_ALLMULTI
)
3577 dev_set_allmulti(real_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
3579 if (change
& IFF_PROMISC
)
3580 dev_set_promiscuity(real_dev
,
3581 dev
->flags
& IFF_PROMISC
? 1 : -1);
3584 static void macsec_dev_set_rx_mode(struct net_device
*dev
)
3586 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
3588 dev_mc_sync(real_dev
, dev
);
3589 dev_uc_sync(real_dev
, dev
);
3592 static int macsec_set_mac_address(struct net_device
*dev
, void *p
)
3594 struct macsec_dev
*macsec
= macsec_priv(dev
);
3595 struct net_device
*real_dev
= macsec
->real_dev
;
3596 struct sockaddr
*addr
= p
;
3599 if (!is_valid_ether_addr(addr
->sa_data
))
3600 return -EADDRNOTAVAIL
;
3602 if (!(dev
->flags
& IFF_UP
))
3605 err
= dev_uc_add(real_dev
, addr
->sa_data
);
3609 dev_uc_del(real_dev
, dev
->dev_addr
);
3612 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
3613 macsec
->secy
.sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3615 /* If h/w offloading is available, propagate to the device */
3616 if (macsec_is_offloaded(macsec
)) {
3617 const struct macsec_ops
*ops
;
3618 struct macsec_context ctx
;
3620 ops
= macsec_get_ops(macsec
, &ctx
);
3622 ctx
.secy
= &macsec
->secy
;
3623 macsec_offload(ops
->mdo_upd_secy
, &ctx
);
3630 static int macsec_change_mtu(struct net_device
*dev
, int new_mtu
)
3632 struct macsec_dev
*macsec
= macsec_priv(dev
);
3633 unsigned int extra
= macsec
->secy
.icv_len
+ macsec_extra_len(true);
3635 if (macsec
->real_dev
->mtu
- extra
< new_mtu
)
3643 static void macsec_get_stats64(struct net_device
*dev
,
3644 struct rtnl_link_stats64
*s
)
3651 for_each_possible_cpu(cpu
) {
3652 struct pcpu_sw_netstats
*stats
;
3653 struct pcpu_sw_netstats tmp
;
3656 stats
= per_cpu_ptr(dev
->tstats
, cpu
);
3658 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
3659 tmp
.rx_packets
= stats
->rx_packets
;
3660 tmp
.rx_bytes
= stats
->rx_bytes
;
3661 tmp
.tx_packets
= stats
->tx_packets
;
3662 tmp
.tx_bytes
= stats
->tx_bytes
;
3663 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
3665 s
->rx_packets
+= tmp
.rx_packets
;
3666 s
->rx_bytes
+= tmp
.rx_bytes
;
3667 s
->tx_packets
+= tmp
.tx_packets
;
3668 s
->tx_bytes
+= tmp
.tx_bytes
;
3671 s
->rx_dropped
= dev
->stats
.rx_dropped
;
3672 s
->tx_dropped
= dev
->stats
.tx_dropped
;
3675 static int macsec_get_iflink(const struct net_device
*dev
)
3677 return macsec_priv(dev
)->real_dev
->ifindex
;
3680 static const struct net_device_ops macsec_netdev_ops
= {
3681 .ndo_init
= macsec_dev_init
,
3682 .ndo_uninit
= macsec_dev_uninit
,
3683 .ndo_open
= macsec_dev_open
,
3684 .ndo_stop
= macsec_dev_stop
,
3685 .ndo_fix_features
= macsec_fix_features
,
3686 .ndo_change_mtu
= macsec_change_mtu
,
3687 .ndo_set_rx_mode
= macsec_dev_set_rx_mode
,
3688 .ndo_change_rx_flags
= macsec_dev_change_rx_flags
,
3689 .ndo_set_mac_address
= macsec_set_mac_address
,
3690 .ndo_start_xmit
= macsec_start_xmit
,
3691 .ndo_get_stats64
= macsec_get_stats64
,
3692 .ndo_get_iflink
= macsec_get_iflink
,
3695 static const struct device_type macsec_type
= {
3699 static const struct nla_policy macsec_rtnl_policy
[IFLA_MACSEC_MAX
+ 1] = {
3700 [IFLA_MACSEC_SCI
] = { .type
= NLA_U64
},
3701 [IFLA_MACSEC_PORT
] = { .type
= NLA_U16
},
3702 [IFLA_MACSEC_ICV_LEN
] = { .type
= NLA_U8
},
3703 [IFLA_MACSEC_CIPHER_SUITE
] = { .type
= NLA_U64
},
3704 [IFLA_MACSEC_WINDOW
] = { .type
= NLA_U32
},
3705 [IFLA_MACSEC_ENCODING_SA
] = { .type
= NLA_U8
},
3706 [IFLA_MACSEC_ENCRYPT
] = { .type
= NLA_U8
},
3707 [IFLA_MACSEC_PROTECT
] = { .type
= NLA_U8
},
3708 [IFLA_MACSEC_INC_SCI
] = { .type
= NLA_U8
},
3709 [IFLA_MACSEC_ES
] = { .type
= NLA_U8
},
3710 [IFLA_MACSEC_SCB
] = { .type
= NLA_U8
},
3711 [IFLA_MACSEC_REPLAY_PROTECT
] = { .type
= NLA_U8
},
3712 [IFLA_MACSEC_VALIDATION
] = { .type
= NLA_U8
},
3715 static void macsec_free_netdev(struct net_device
*dev
)
3717 struct macsec_dev
*macsec
= macsec_priv(dev
);
3719 free_percpu(macsec
->stats
);
3720 free_percpu(macsec
->secy
.tx_sc
.stats
);
3724 static void macsec_setup(struct net_device
*dev
)
3728 dev
->max_mtu
= ETH_MAX_MTU
;
3729 dev
->priv_flags
|= IFF_NO_QUEUE
;
3730 dev
->netdev_ops
= &macsec_netdev_ops
;
3731 dev
->needs_free_netdev
= true;
3732 dev
->priv_destructor
= macsec_free_netdev
;
3733 SET_NETDEV_DEVTYPE(dev
, &macsec_type
);
3735 eth_zero_addr(dev
->broadcast
);
3738 static int macsec_changelink_common(struct net_device
*dev
,
3739 struct nlattr
*data
[])
3741 struct macsec_secy
*secy
;
3742 struct macsec_tx_sc
*tx_sc
;
3744 secy
= &macsec_priv(dev
)->secy
;
3745 tx_sc
= &secy
->tx_sc
;
3747 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3748 struct macsec_tx_sa
*tx_sa
;
3750 tx_sc
->encoding_sa
= nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]);
3751 tx_sa
= rtnl_dereference(tx_sc
->sa
[tx_sc
->encoding_sa
]);
3753 secy
->operational
= tx_sa
&& tx_sa
->active
;
3756 if (data
[IFLA_MACSEC_WINDOW
])
3757 secy
->replay_window
= nla_get_u32(data
[IFLA_MACSEC_WINDOW
]);
3759 if (data
[IFLA_MACSEC_ENCRYPT
])
3760 tx_sc
->encrypt
= !!nla_get_u8(data
[IFLA_MACSEC_ENCRYPT
]);
3762 if (data
[IFLA_MACSEC_PROTECT
])
3763 secy
->protect_frames
= !!nla_get_u8(data
[IFLA_MACSEC_PROTECT
]);
3765 if (data
[IFLA_MACSEC_INC_SCI
])
3766 tx_sc
->send_sci
= !!nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]);
3768 if (data
[IFLA_MACSEC_ES
])
3769 tx_sc
->end_station
= !!nla_get_u8(data
[IFLA_MACSEC_ES
]);
3771 if (data
[IFLA_MACSEC_SCB
])
3772 tx_sc
->scb
= !!nla_get_u8(data
[IFLA_MACSEC_SCB
]);
3774 if (data
[IFLA_MACSEC_REPLAY_PROTECT
])
3775 secy
->replay_protect
= !!nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
]);
3777 if (data
[IFLA_MACSEC_VALIDATION
])
3778 secy
->validate_frames
= nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]);
3780 if (data
[IFLA_MACSEC_CIPHER_SUITE
]) {
3781 switch (nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
])) {
3782 case MACSEC_CIPHER_ID_GCM_AES_128
:
3783 case MACSEC_DEFAULT_CIPHER_ID
:
3784 secy
->key_len
= MACSEC_GCM_AES_128_SAK_LEN
;
3787 case MACSEC_CIPHER_ID_GCM_AES_256
:
3788 secy
->key_len
= MACSEC_GCM_AES_256_SAK_LEN
;
3791 case MACSEC_CIPHER_ID_GCM_AES_XPN_128
:
3792 secy
->key_len
= MACSEC_GCM_AES_128_SAK_LEN
;
3795 case MACSEC_CIPHER_ID_GCM_AES_XPN_256
:
3796 secy
->key_len
= MACSEC_GCM_AES_256_SAK_LEN
;
3807 static int macsec_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
3808 struct nlattr
*data
[],
3809 struct netlink_ext_ack
*extack
)
3811 struct macsec_dev
*macsec
= macsec_priv(dev
);
3812 struct macsec_tx_sa tx_sc
;
3813 struct macsec_secy secy
;
3819 if (data
[IFLA_MACSEC_CIPHER_SUITE
] ||
3820 data
[IFLA_MACSEC_ICV_LEN
] ||
3821 data
[IFLA_MACSEC_SCI
] ||
3822 data
[IFLA_MACSEC_PORT
])
3825 /* Keep a copy of unmodified secy and tx_sc, in case the offload
3826 * propagation fails, to revert macsec_changelink_common.
3828 memcpy(&secy
, &macsec
->secy
, sizeof(secy
));
3829 memcpy(&tx_sc
, &macsec
->secy
.tx_sc
, sizeof(tx_sc
));
3831 ret
= macsec_changelink_common(dev
, data
);
3835 /* If h/w offloading is available, propagate to the device */
3836 if (macsec_is_offloaded(macsec
)) {
3837 const struct macsec_ops
*ops
;
3838 struct macsec_context ctx
;
3841 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
3847 ctx
.secy
= &macsec
->secy
;
3848 ret
= macsec_offload(ops
->mdo_upd_secy
, &ctx
);
3856 memcpy(&macsec
->secy
.tx_sc
, &tx_sc
, sizeof(tx_sc
));
3857 memcpy(&macsec
->secy
, &secy
, sizeof(secy
));
3862 static void macsec_del_dev(struct macsec_dev
*macsec
)
3866 while (macsec
->secy
.rx_sc
) {
3867 struct macsec_rx_sc
*rx_sc
= rtnl_dereference(macsec
->secy
.rx_sc
);
3869 rcu_assign_pointer(macsec
->secy
.rx_sc
, rx_sc
->next
);
3873 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
3874 struct macsec_tx_sa
*sa
= rtnl_dereference(macsec
->secy
.tx_sc
.sa
[i
]);
3877 RCU_INIT_POINTER(macsec
->secy
.tx_sc
.sa
[i
], NULL
);
3883 static void macsec_common_dellink(struct net_device
*dev
, struct list_head
*head
)
3885 struct macsec_dev
*macsec
= macsec_priv(dev
);
3886 struct net_device
*real_dev
= macsec
->real_dev
;
3888 unregister_netdevice_queue(dev
, head
);
3889 list_del_rcu(&macsec
->secys
);
3890 macsec_del_dev(macsec
);
3891 netdev_upper_dev_unlink(real_dev
, dev
);
3893 macsec_generation
++;
3896 static void macsec_dellink(struct net_device
*dev
, struct list_head
*head
)
3898 struct macsec_dev
*macsec
= macsec_priv(dev
);
3899 struct net_device
*real_dev
= macsec
->real_dev
;
3900 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3902 /* If h/w offloading is available, propagate to the device */
3903 if (macsec_is_offloaded(macsec
)) {
3904 const struct macsec_ops
*ops
;
3905 struct macsec_context ctx
;
3907 ops
= macsec_get_ops(netdev_priv(dev
), &ctx
);
3909 ctx
.secy
= &macsec
->secy
;
3910 macsec_offload(ops
->mdo_del_secy
, &ctx
);
3914 macsec_common_dellink(dev
, head
);
3916 if (list_empty(&rxd
->secys
)) {
3917 netdev_rx_handler_unregister(real_dev
);
3922 static int register_macsec_dev(struct net_device
*real_dev
,
3923 struct net_device
*dev
)
3925 struct macsec_dev
*macsec
= macsec_priv(dev
);
3926 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3931 rxd
= kmalloc(sizeof(*rxd
), GFP_KERNEL
);
3935 INIT_LIST_HEAD(&rxd
->secys
);
3937 err
= netdev_rx_handler_register(real_dev
, macsec_handle_frame
,
3945 list_add_tail_rcu(&macsec
->secys
, &rxd
->secys
);
3949 static bool sci_exists(struct net_device
*dev
, sci_t sci
)
3951 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(dev
);
3952 struct macsec_dev
*macsec
;
3954 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
3955 if (macsec
->secy
.sci
== sci
)
3962 static int macsec_add_dev(struct net_device
*dev
, sci_t sci
, u8 icv_len
)
3964 struct macsec_dev
*macsec
= macsec_priv(dev
);
3965 struct macsec_secy
*secy
= &macsec
->secy
;
3967 macsec
->stats
= netdev_alloc_pcpu_stats(struct pcpu_secy_stats
);
3971 secy
->tx_sc
.stats
= netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats
);
3972 if (!secy
->tx_sc
.stats
) {
3973 free_percpu(macsec
->stats
);
3977 if (sci
== MACSEC_UNDEF_SCI
)
3978 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3981 secy
->operational
= true;
3982 secy
->key_len
= DEFAULT_SAK_LEN
;
3983 secy
->icv_len
= icv_len
;
3984 secy
->validate_frames
= MACSEC_VALIDATE_DEFAULT
;
3985 secy
->protect_frames
= true;
3986 secy
->replay_protect
= false;
3987 secy
->xpn
= DEFAULT_XPN
;
3990 secy
->tx_sc
.active
= true;
3991 secy
->tx_sc
.encoding_sa
= DEFAULT_ENCODING_SA
;
3992 secy
->tx_sc
.encrypt
= DEFAULT_ENCRYPT
;
3993 secy
->tx_sc
.send_sci
= DEFAULT_SEND_SCI
;
3994 secy
->tx_sc
.end_station
= false;
3995 secy
->tx_sc
.scb
= false;
4000 static int macsec_newlink(struct net
*net
, struct net_device
*dev
,
4001 struct nlattr
*tb
[], struct nlattr
*data
[],
4002 struct netlink_ext_ack
*extack
)
4004 struct macsec_dev
*macsec
= macsec_priv(dev
);
4005 struct net_device
*real_dev
;
4008 u8 icv_len
= DEFAULT_ICV_LEN
;
4009 rx_handler_func_t
*rx_handler
;
4013 real_dev
= __dev_get_by_index(net
, nla_get_u32(tb
[IFLA_LINK
]));
4016 if (real_dev
->type
!= ARPHRD_ETHER
)
4019 dev
->priv_flags
|= IFF_MACSEC
;
4021 macsec
->real_dev
= real_dev
;
4023 if (data
&& data
[IFLA_MACSEC_OFFLOAD
])
4024 macsec
->offload
= nla_get_offload(data
[IFLA_MACSEC_OFFLOAD
]);
4026 /* MACsec offloading is off by default */
4027 macsec
->offload
= MACSEC_OFFLOAD_OFF
;
4029 /* Check if the offloading mode is supported by the underlying layers */
4030 if (macsec
->offload
!= MACSEC_OFFLOAD_OFF
&&
4031 !macsec_check_offload(macsec
->offload
, macsec
))
4034 if (data
&& data
[IFLA_MACSEC_ICV_LEN
])
4035 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
4036 dev
->mtu
= real_dev
->mtu
- icv_len
- macsec_extra_len(true);
4038 rx_handler
= rtnl_dereference(real_dev
->rx_handler
);
4039 if (rx_handler
&& rx_handler
!= macsec_handle_frame
)
4042 err
= register_netdevice(dev
);
4046 err
= netdev_upper_dev_link(real_dev
, dev
, extack
);
4050 /* need to be already registered so that ->init has run and
4051 * the MAC addr is set
4053 if (data
&& data
[IFLA_MACSEC_SCI
])
4054 sci
= nla_get_sci(data
[IFLA_MACSEC_SCI
]);
4055 else if (data
&& data
[IFLA_MACSEC_PORT
])
4056 sci
= dev_to_sci(dev
, nla_get_be16(data
[IFLA_MACSEC_PORT
]));
4058 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
4060 if (rx_handler
&& sci_exists(real_dev
, sci
)) {
4065 err
= macsec_add_dev(dev
, sci
, icv_len
);
4070 err
= macsec_changelink_common(dev
, data
);
4075 /* If h/w offloading is available, propagate to the device */
4076 if (macsec_is_offloaded(macsec
)) {
4077 const struct macsec_ops
*ops
;
4078 struct macsec_context ctx
;
4080 ops
= macsec_get_ops(macsec
, &ctx
);
4082 ctx
.secy
= &macsec
->secy
;
4083 err
= macsec_offload(ops
->mdo_add_secy
, &ctx
);
4089 err
= register_macsec_dev(real_dev
, dev
);
4093 netif_stacked_transfer_operstate(real_dev
, dev
);
4094 linkwatch_fire_event(dev
);
4096 macsec_generation
++;
4101 macsec_del_dev(macsec
);
4103 netdev_upper_dev_unlink(real_dev
, dev
);
4105 unregister_netdevice(dev
);
4109 static int macsec_validate_attr(struct nlattr
*tb
[], struct nlattr
*data
[],
4110 struct netlink_ext_ack
*extack
)
4112 u64 csid
= MACSEC_DEFAULT_CIPHER_ID
;
4113 u8 icv_len
= DEFAULT_ICV_LEN
;
4120 if (data
[IFLA_MACSEC_CIPHER_SUITE
])
4121 csid
= nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
]);
4123 if (data
[IFLA_MACSEC_ICV_LEN
]) {
4124 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
4125 if (icv_len
!= DEFAULT_ICV_LEN
) {
4126 char dummy_key
[DEFAULT_SAK_LEN
] = { 0 };
4127 struct crypto_aead
*dummy_tfm
;
4129 dummy_tfm
= macsec_alloc_tfm(dummy_key
,
4132 if (IS_ERR(dummy_tfm
))
4133 return PTR_ERR(dummy_tfm
);
4134 crypto_free_aead(dummy_tfm
);
4139 case MACSEC_CIPHER_ID_GCM_AES_128
:
4140 case MACSEC_CIPHER_ID_GCM_AES_256
:
4141 case MACSEC_CIPHER_ID_GCM_AES_XPN_128
:
4142 case MACSEC_CIPHER_ID_GCM_AES_XPN_256
:
4143 case MACSEC_DEFAULT_CIPHER_ID
:
4144 if (icv_len
< MACSEC_MIN_ICV_LEN
||
4145 icv_len
> MACSEC_STD_ICV_LEN
)
4152 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
4153 if (nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]) >= MACSEC_NUM_AN
)
4157 for (flag
= IFLA_MACSEC_ENCODING_SA
+ 1;
4158 flag
< IFLA_MACSEC_VALIDATION
;
4161 if (nla_get_u8(data
[flag
]) > 1)
4166 es
= data
[IFLA_MACSEC_ES
] ? nla_get_u8(data
[IFLA_MACSEC_ES
]) : false;
4167 sci
= data
[IFLA_MACSEC_INC_SCI
] ? nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]) : false;
4168 scb
= data
[IFLA_MACSEC_SCB
] ? nla_get_u8(data
[IFLA_MACSEC_SCB
]) : false;
4170 if ((sci
&& (scb
|| es
)) || (scb
&& es
))
4173 if (data
[IFLA_MACSEC_VALIDATION
] &&
4174 nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]) > MACSEC_VALIDATE_MAX
)
4177 if ((data
[IFLA_MACSEC_REPLAY_PROTECT
] &&
4178 nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
])) &&
4179 !data
[IFLA_MACSEC_WINDOW
])
4185 static struct net
*macsec_get_link_net(const struct net_device
*dev
)
4187 return dev_net(macsec_priv(dev
)->real_dev
);
4190 static size_t macsec_get_size(const struct net_device
*dev
)
4192 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4193 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4194 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4195 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4196 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4197 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4198 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4199 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4200 nla_total_size(1) + /* IFLA_MACSEC_ES */
4201 nla_total_size(1) + /* IFLA_MACSEC_SCB */
4202 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4203 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4207 static int macsec_fill_info(struct sk_buff
*skb
,
4208 const struct net_device
*dev
)
4210 struct macsec_secy
*secy
= &macsec_priv(dev
)->secy
;
4211 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
4214 switch (secy
->key_len
) {
4215 case MACSEC_GCM_AES_128_SAK_LEN
:
4216 csid
= secy
->xpn
? MACSEC_CIPHER_ID_GCM_AES_XPN_128
: MACSEC_DEFAULT_CIPHER_ID
;
4218 case MACSEC_GCM_AES_256_SAK_LEN
:
4219 csid
= secy
->xpn
? MACSEC_CIPHER_ID_GCM_AES_XPN_256
: MACSEC_CIPHER_ID_GCM_AES_256
;
4222 goto nla_put_failure
;
4225 if (nla_put_sci(skb
, IFLA_MACSEC_SCI
, secy
->sci
,
4227 nla_put_u8(skb
, IFLA_MACSEC_ICV_LEN
, secy
->icv_len
) ||
4228 nla_put_u64_64bit(skb
, IFLA_MACSEC_CIPHER_SUITE
,
4229 csid
, IFLA_MACSEC_PAD
) ||
4230 nla_put_u8(skb
, IFLA_MACSEC_ENCODING_SA
, tx_sc
->encoding_sa
) ||
4231 nla_put_u8(skb
, IFLA_MACSEC_ENCRYPT
, tx_sc
->encrypt
) ||
4232 nla_put_u8(skb
, IFLA_MACSEC_PROTECT
, secy
->protect_frames
) ||
4233 nla_put_u8(skb
, IFLA_MACSEC_INC_SCI
, tx_sc
->send_sci
) ||
4234 nla_put_u8(skb
, IFLA_MACSEC_ES
, tx_sc
->end_station
) ||
4235 nla_put_u8(skb
, IFLA_MACSEC_SCB
, tx_sc
->scb
) ||
4236 nla_put_u8(skb
, IFLA_MACSEC_REPLAY_PROTECT
, secy
->replay_protect
) ||
4237 nla_put_u8(skb
, IFLA_MACSEC_VALIDATION
, secy
->validate_frames
) ||
4239 goto nla_put_failure
;
4241 if (secy
->replay_protect
) {
4242 if (nla_put_u32(skb
, IFLA_MACSEC_WINDOW
, secy
->replay_window
))
4243 goto nla_put_failure
;
4252 static struct rtnl_link_ops macsec_link_ops __read_mostly
= {
4254 .priv_size
= sizeof(struct macsec_dev
),
4255 .maxtype
= IFLA_MACSEC_MAX
,
4256 .policy
= macsec_rtnl_policy
,
4257 .setup
= macsec_setup
,
4258 .validate
= macsec_validate_attr
,
4259 .newlink
= macsec_newlink
,
4260 .changelink
= macsec_changelink
,
4261 .dellink
= macsec_dellink
,
4262 .get_size
= macsec_get_size
,
4263 .fill_info
= macsec_fill_info
,
4264 .get_link_net
= macsec_get_link_net
,
4267 static bool is_macsec_master(struct net_device
*dev
)
4269 return rcu_access_pointer(dev
->rx_handler
) == macsec_handle_frame
;
4272 static int macsec_notify(struct notifier_block
*this, unsigned long event
,
4275 struct net_device
*real_dev
= netdev_notifier_info_to_dev(ptr
);
4278 if (!is_macsec_master(real_dev
))
4284 case NETDEV_CHANGE
: {
4285 struct macsec_dev
*m
, *n
;
4286 struct macsec_rxh_data
*rxd
;
4288 rxd
= macsec_data_rtnl(real_dev
);
4289 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
4290 struct net_device
*dev
= m
->secy
.netdev
;
4292 netif_stacked_transfer_operstate(real_dev
, dev
);
4296 case NETDEV_UNREGISTER
: {
4297 struct macsec_dev
*m
, *n
;
4298 struct macsec_rxh_data
*rxd
;
4300 rxd
= macsec_data_rtnl(real_dev
);
4301 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
4302 macsec_common_dellink(m
->secy
.netdev
, &head
);
4305 netdev_rx_handler_unregister(real_dev
);
4308 unregister_netdevice_many(&head
);
4311 case NETDEV_CHANGEMTU
: {
4312 struct macsec_dev
*m
;
4313 struct macsec_rxh_data
*rxd
;
4315 rxd
= macsec_data_rtnl(real_dev
);
4316 list_for_each_entry(m
, &rxd
->secys
, secys
) {
4317 struct net_device
*dev
= m
->secy
.netdev
;
4318 unsigned int mtu
= real_dev
->mtu
- (m
->secy
.icv_len
+
4319 macsec_extra_len(true));
4322 dev_set_mtu(dev
, mtu
);
4330 static struct notifier_block macsec_notifier
= {
4331 .notifier_call
= macsec_notify
,
4334 static int __init
macsec_init(void)
4338 pr_info("MACsec IEEE 802.1AE\n");
4339 err
= register_netdevice_notifier(&macsec_notifier
);
4343 err
= rtnl_link_register(&macsec_link_ops
);
4347 err
= genl_register_family(&macsec_fam
);
4354 rtnl_link_unregister(&macsec_link_ops
);
4356 unregister_netdevice_notifier(&macsec_notifier
);
4360 static void __exit
macsec_exit(void)
4362 genl_unregister_family(&macsec_fam
);
4363 rtnl_link_unregister(&macsec_link_ops
);
4364 unregister_netdevice_notifier(&macsec_notifier
);
4368 module_init(macsec_init
);
4369 module_exit(macsec_exit
);
4371 MODULE_ALIAS_RTNL_LINK("macsec");
4372 MODULE_ALIAS_GENL_FAMILY("macsec");
4374 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4375 MODULE_LICENSE("GPL v2");