Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / net / mac802154 / llsec.c
blob585d33144c33f847a9201e55a7cb9646c3c553cf
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2014 Fraunhofer ITWM
5 * Written by:
6 * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
7 */
9 #include <linux/err.h>
10 #include <linux/bug.h>
11 #include <linux/completion.h>
12 #include <linux/ieee802154.h>
13 #include <linux/rculist.h>
15 #include <crypto/aead.h>
16 #include <crypto/skcipher.h>
18 #include "ieee802154_i.h"
19 #include "llsec.h"
21 static void llsec_key_put(struct mac802154_llsec_key *key);
22 static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
23 const struct ieee802154_llsec_key_id *b);
25 static void llsec_dev_free(struct mac802154_llsec_device *dev);
27 void mac802154_llsec_init(struct mac802154_llsec *sec)
29 memset(sec, 0, sizeof(*sec));
31 memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
33 INIT_LIST_HEAD(&sec->table.security_levels);
34 INIT_LIST_HEAD(&sec->table.devices);
35 INIT_LIST_HEAD(&sec->table.keys);
36 hash_init(sec->devices_short);
37 hash_init(sec->devices_hw);
38 rwlock_init(&sec->lock);
41 void mac802154_llsec_destroy(struct mac802154_llsec *sec)
43 struct ieee802154_llsec_seclevel *sl, *sn;
44 struct ieee802154_llsec_device *dev, *dn;
45 struct ieee802154_llsec_key_entry *key, *kn;
47 list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
48 struct mac802154_llsec_seclevel *msl;
50 msl = container_of(sl, struct mac802154_llsec_seclevel, level);
51 list_del(&sl->list);
52 kfree_sensitive(msl);
55 list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
56 struct mac802154_llsec_device *mdev;
58 mdev = container_of(dev, struct mac802154_llsec_device, dev);
59 list_del(&dev->list);
60 llsec_dev_free(mdev);
63 list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
64 struct mac802154_llsec_key *mkey;
66 mkey = container_of(key->key, struct mac802154_llsec_key, key);
67 list_del(&key->list);
68 llsec_key_put(mkey);
69 kfree_sensitive(key);
73 int mac802154_llsec_get_params(struct mac802154_llsec *sec,
74 struct ieee802154_llsec_params *params)
76 read_lock_bh(&sec->lock);
77 *params = sec->params;
78 read_unlock_bh(&sec->lock);
80 return 0;
83 int mac802154_llsec_set_params(struct mac802154_llsec *sec,
84 const struct ieee802154_llsec_params *params,
85 int changed)
87 write_lock_bh(&sec->lock);
89 if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
90 sec->params.enabled = params->enabled;
91 if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
92 sec->params.frame_counter = params->frame_counter;
93 if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
94 sec->params.out_level = params->out_level;
95 if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
96 sec->params.out_key = params->out_key;
97 if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
98 sec->params.default_key_source = params->default_key_source;
99 if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
100 sec->params.pan_id = params->pan_id;
101 if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
102 sec->params.hwaddr = params->hwaddr;
103 if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
104 sec->params.coord_hwaddr = params->coord_hwaddr;
105 if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
106 sec->params.coord_shortaddr = params->coord_shortaddr;
108 write_unlock_bh(&sec->lock);
110 return 0;
113 static struct mac802154_llsec_key*
114 llsec_key_alloc(const struct ieee802154_llsec_key *template)
116 const int authsizes[3] = { 4, 8, 16 };
117 struct mac802154_llsec_key *key;
118 int i;
120 key = kzalloc(sizeof(*key), GFP_KERNEL);
121 if (!key)
122 return NULL;
124 kref_init(&key->ref);
125 key->key = *template;
127 BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
129 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
130 key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
131 CRYPTO_ALG_ASYNC);
132 if (IS_ERR(key->tfm[i]))
133 goto err_tfm;
134 if (crypto_aead_setkey(key->tfm[i], template->key,
135 IEEE802154_LLSEC_KEY_SIZE))
136 goto err_tfm;
137 if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
138 goto err_tfm;
141 key->tfm0 = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
142 if (IS_ERR(key->tfm0))
143 goto err_tfm;
145 if (crypto_sync_skcipher_setkey(key->tfm0, template->key,
146 IEEE802154_LLSEC_KEY_SIZE))
147 goto err_tfm0;
149 return key;
151 err_tfm0:
152 crypto_free_sync_skcipher(key->tfm0);
153 err_tfm:
154 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
155 if (key->tfm[i])
156 crypto_free_aead(key->tfm[i]);
158 kfree_sensitive(key);
159 return NULL;
162 static void llsec_key_release(struct kref *ref)
164 struct mac802154_llsec_key *key;
165 int i;
167 key = container_of(ref, struct mac802154_llsec_key, ref);
169 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
170 crypto_free_aead(key->tfm[i]);
172 crypto_free_sync_skcipher(key->tfm0);
173 kfree_sensitive(key);
176 static struct mac802154_llsec_key*
177 llsec_key_get(struct mac802154_llsec_key *key)
179 kref_get(&key->ref);
180 return key;
183 static void llsec_key_put(struct mac802154_llsec_key *key)
185 kref_put(&key->ref, llsec_key_release);
188 static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
189 const struct ieee802154_llsec_key_id *b)
191 if (a->mode != b->mode)
192 return false;
194 if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
195 return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
197 if (a->id != b->id)
198 return false;
200 switch (a->mode) {
201 case IEEE802154_SCF_KEY_INDEX:
202 return true;
203 case IEEE802154_SCF_KEY_SHORT_INDEX:
204 return a->short_source == b->short_source;
205 case IEEE802154_SCF_KEY_HW_INDEX:
206 return a->extended_source == b->extended_source;
209 return false;
212 int mac802154_llsec_key_add(struct mac802154_llsec *sec,
213 const struct ieee802154_llsec_key_id *id,
214 const struct ieee802154_llsec_key *key)
216 struct mac802154_llsec_key *mkey = NULL;
217 struct ieee802154_llsec_key_entry *pos, *new;
219 if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
220 key->cmd_frame_ids)
221 return -EINVAL;
223 list_for_each_entry(pos, &sec->table.keys, list) {
224 if (llsec_key_id_equal(&pos->id, id))
225 return -EEXIST;
227 if (memcmp(pos->key->key, key->key,
228 IEEE802154_LLSEC_KEY_SIZE))
229 continue;
231 mkey = container_of(pos->key, struct mac802154_llsec_key, key);
233 /* Don't allow multiple instances of the same AES key to have
234 * different allowed frame types/command frame ids, as this is
235 * not possible in the 802.15.4 PIB.
237 if (pos->key->frame_types != key->frame_types ||
238 pos->key->cmd_frame_ids != key->cmd_frame_ids)
239 return -EEXIST;
241 break;
244 new = kzalloc(sizeof(*new), GFP_KERNEL);
245 if (!new)
246 return -ENOMEM;
248 if (!mkey)
249 mkey = llsec_key_alloc(key);
250 else
251 mkey = llsec_key_get(mkey);
253 if (!mkey)
254 goto fail;
256 new->id = *id;
257 new->key = &mkey->key;
259 list_add_rcu(&new->list, &sec->table.keys);
261 return 0;
263 fail:
264 kfree_sensitive(new);
265 return -ENOMEM;
268 int mac802154_llsec_key_del(struct mac802154_llsec *sec,
269 const struct ieee802154_llsec_key_id *key)
271 struct ieee802154_llsec_key_entry *pos;
273 list_for_each_entry(pos, &sec->table.keys, list) {
274 struct mac802154_llsec_key *mkey;
276 mkey = container_of(pos->key, struct mac802154_llsec_key, key);
278 if (llsec_key_id_equal(&pos->id, key)) {
279 list_del_rcu(&pos->list);
280 llsec_key_put(mkey);
281 return 0;
285 return -ENOENT;
288 static bool llsec_dev_use_shortaddr(__le16 short_addr)
290 return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
291 short_addr != cpu_to_le16(0xffff);
294 static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
296 return ((__force u16)short_addr) << 16 | (__force u16)pan_id;
299 static u64 llsec_dev_hash_long(__le64 hwaddr)
301 return (__force u64)hwaddr;
304 static struct mac802154_llsec_device*
305 llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
306 __le16 pan_id)
308 struct mac802154_llsec_device *dev;
309 u32 key = llsec_dev_hash_short(short_addr, pan_id);
311 hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
312 if (dev->dev.short_addr == short_addr &&
313 dev->dev.pan_id == pan_id)
314 return dev;
317 return NULL;
320 static struct mac802154_llsec_device*
321 llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
323 struct mac802154_llsec_device *dev;
324 u64 key = llsec_dev_hash_long(hwaddr);
326 hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
327 if (dev->dev.hwaddr == hwaddr)
328 return dev;
331 return NULL;
334 static void llsec_dev_free(struct mac802154_llsec_device *dev)
336 struct ieee802154_llsec_device_key *pos, *pn;
337 struct mac802154_llsec_device_key *devkey;
339 list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
340 devkey = container_of(pos, struct mac802154_llsec_device_key,
341 devkey);
343 list_del(&pos->list);
344 kfree_sensitive(devkey);
347 kfree_sensitive(dev);
350 int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
351 const struct ieee802154_llsec_device *dev)
353 struct mac802154_llsec_device *entry;
354 u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
355 u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
357 BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
359 if ((llsec_dev_use_shortaddr(dev->short_addr) &&
360 llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
361 llsec_dev_find_long(sec, dev->hwaddr))
362 return -EEXIST;
364 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
365 if (!entry)
366 return -ENOMEM;
368 entry->dev = *dev;
369 spin_lock_init(&entry->lock);
370 INIT_LIST_HEAD(&entry->dev.keys);
372 if (llsec_dev_use_shortaddr(dev->short_addr))
373 hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
374 else
375 INIT_HLIST_NODE(&entry->bucket_s);
377 hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
378 list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
380 return 0;
383 static void llsec_dev_free_rcu(struct rcu_head *rcu)
385 llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
388 int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
390 struct mac802154_llsec_device *pos;
392 pos = llsec_dev_find_long(sec, device_addr);
393 if (!pos)
394 return -ENOENT;
396 hash_del_rcu(&pos->bucket_s);
397 hash_del_rcu(&pos->bucket_hw);
398 list_del_rcu(&pos->dev.list);
399 call_rcu(&pos->rcu, llsec_dev_free_rcu);
401 return 0;
404 static struct mac802154_llsec_device_key*
405 llsec_devkey_find(struct mac802154_llsec_device *dev,
406 const struct ieee802154_llsec_key_id *key)
408 struct ieee802154_llsec_device_key *devkey;
410 list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
411 if (!llsec_key_id_equal(key, &devkey->key_id))
412 continue;
414 return container_of(devkey, struct mac802154_llsec_device_key,
415 devkey);
418 return NULL;
421 int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
422 __le64 dev_addr,
423 const struct ieee802154_llsec_device_key *key)
425 struct mac802154_llsec_device *dev;
426 struct mac802154_llsec_device_key *devkey;
428 dev = llsec_dev_find_long(sec, dev_addr);
430 if (!dev)
431 return -ENOENT;
433 if (llsec_devkey_find(dev, &key->key_id))
434 return -EEXIST;
436 devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
437 if (!devkey)
438 return -ENOMEM;
440 devkey->devkey = *key;
441 list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
442 return 0;
445 int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
446 __le64 dev_addr,
447 const struct ieee802154_llsec_device_key *key)
449 struct mac802154_llsec_device *dev;
450 struct mac802154_llsec_device_key *devkey;
452 dev = llsec_dev_find_long(sec, dev_addr);
454 if (!dev)
455 return -ENOENT;
457 devkey = llsec_devkey_find(dev, &key->key_id);
458 if (!devkey)
459 return -ENOENT;
461 list_del_rcu(&devkey->devkey.list);
462 kfree_rcu(devkey, rcu);
463 return 0;
466 static struct mac802154_llsec_seclevel*
467 llsec_find_seclevel(const struct mac802154_llsec *sec,
468 const struct ieee802154_llsec_seclevel *sl)
470 struct ieee802154_llsec_seclevel *pos;
472 list_for_each_entry(pos, &sec->table.security_levels, list) {
473 if (pos->frame_type != sl->frame_type ||
474 (pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
475 pos->cmd_frame_id != sl->cmd_frame_id) ||
476 pos->device_override != sl->device_override ||
477 pos->sec_levels != sl->sec_levels)
478 continue;
480 return container_of(pos, struct mac802154_llsec_seclevel,
481 level);
484 return NULL;
487 int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
488 const struct ieee802154_llsec_seclevel *sl)
490 struct mac802154_llsec_seclevel *entry;
492 if (llsec_find_seclevel(sec, sl))
493 return -EEXIST;
495 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
496 if (!entry)
497 return -ENOMEM;
499 entry->level = *sl;
501 list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
503 return 0;
506 int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
507 const struct ieee802154_llsec_seclevel *sl)
509 struct mac802154_llsec_seclevel *pos;
511 pos = llsec_find_seclevel(sec, sl);
512 if (!pos)
513 return -ENOENT;
515 list_del_rcu(&pos->level.list);
516 kfree_rcu(pos, rcu);
518 return 0;
521 static int llsec_recover_addr(struct mac802154_llsec *sec,
522 struct ieee802154_addr *addr)
524 __le16 caddr = sec->params.coord_shortaddr;
526 addr->pan_id = sec->params.pan_id;
528 if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
529 return -EINVAL;
530 } else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
531 addr->extended_addr = sec->params.coord_hwaddr;
532 addr->mode = IEEE802154_ADDR_LONG;
533 } else {
534 addr->short_addr = sec->params.coord_shortaddr;
535 addr->mode = IEEE802154_ADDR_SHORT;
538 return 0;
541 static struct mac802154_llsec_key*
542 llsec_lookup_key(struct mac802154_llsec *sec,
543 const struct ieee802154_hdr *hdr,
544 const struct ieee802154_addr *addr,
545 struct ieee802154_llsec_key_id *key_id)
547 struct ieee802154_addr devaddr = *addr;
548 u8 key_id_mode = hdr->sec.key_id_mode;
549 struct ieee802154_llsec_key_entry *key_entry;
550 struct mac802154_llsec_key *key;
552 if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
553 devaddr.mode == IEEE802154_ADDR_NONE) {
554 if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
555 devaddr.extended_addr = sec->params.coord_hwaddr;
556 devaddr.mode = IEEE802154_ADDR_LONG;
557 } else if (llsec_recover_addr(sec, &devaddr) < 0) {
558 return NULL;
562 list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
563 const struct ieee802154_llsec_key_id *id = &key_entry->id;
565 if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
566 continue;
568 if (id->mode != key_id_mode)
569 continue;
571 if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
572 if (ieee802154_addr_equal(&devaddr, &id->device_addr))
573 goto found;
574 } else {
575 if (id->id != hdr->sec.key_id)
576 continue;
578 if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
579 (key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
580 id->short_source == hdr->sec.short_src) ||
581 (key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
582 id->extended_source == hdr->sec.extended_src))
583 goto found;
587 return NULL;
589 found:
590 key = container_of(key_entry->key, struct mac802154_llsec_key, key);
591 if (key_id)
592 *key_id = key_entry->id;
593 return llsec_key_get(key);
596 static void llsec_geniv(u8 iv[16], __le64 addr,
597 const struct ieee802154_sechdr *sec)
599 __be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
600 __be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
602 iv[0] = 1; /* L' = L - 1 = 1 */
603 memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
604 memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
605 iv[13] = sec->level;
606 iv[14] = 0;
607 iv[15] = 1;
610 static int
611 llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
612 const struct ieee802154_hdr *hdr,
613 struct mac802154_llsec_key *key)
615 u8 iv[16];
616 struct scatterlist src;
617 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
618 int err, datalen;
619 unsigned char *data;
621 llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
622 /* Compute data payload offset and data length */
623 data = skb_mac_header(skb) + skb->mac_len;
624 datalen = skb_tail_pointer(skb) - data;
625 sg_init_one(&src, data, datalen);
627 skcipher_request_set_sync_tfm(req, key->tfm0);
628 skcipher_request_set_callback(req, 0, NULL, NULL);
629 skcipher_request_set_crypt(req, &src, &src, datalen, iv);
630 err = crypto_skcipher_encrypt(req);
631 skcipher_request_zero(req);
632 return err;
635 static struct crypto_aead*
636 llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
638 int i;
640 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
641 if (crypto_aead_authsize(key->tfm[i]) == authlen)
642 return key->tfm[i];
644 BUG();
647 static int
648 llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
649 const struct ieee802154_hdr *hdr,
650 struct mac802154_llsec_key *key)
652 u8 iv[16];
653 unsigned char *data;
654 int authlen, assoclen, datalen, rc;
655 struct scatterlist sg;
656 struct aead_request *req;
658 authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
659 llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
661 req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
662 if (!req)
663 return -ENOMEM;
665 assoclen = skb->mac_len;
667 data = skb_mac_header(skb) + skb->mac_len;
668 datalen = skb_tail_pointer(skb) - data;
670 skb_put(skb, authlen);
672 sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen + authlen);
674 if (!(hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC)) {
675 assoclen += datalen;
676 datalen = 0;
679 aead_request_set_callback(req, 0, NULL, NULL);
680 aead_request_set_crypt(req, &sg, &sg, datalen, iv);
681 aead_request_set_ad(req, assoclen);
683 rc = crypto_aead_encrypt(req);
685 kfree_sensitive(req);
687 return rc;
690 static int llsec_do_encrypt(struct sk_buff *skb,
691 const struct mac802154_llsec *sec,
692 const struct ieee802154_hdr *hdr,
693 struct mac802154_llsec_key *key)
695 if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
696 return llsec_do_encrypt_unauth(skb, sec, hdr, key);
697 else
698 return llsec_do_encrypt_auth(skb, sec, hdr, key);
701 int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
703 struct ieee802154_hdr hdr;
704 int rc, authlen, hlen;
705 struct mac802154_llsec_key *key;
706 u32 frame_ctr;
708 hlen = ieee802154_hdr_pull(skb, &hdr);
710 if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
711 return -EINVAL;
713 if (!hdr.fc.security_enabled ||
714 (hdr.sec.level == IEEE802154_SCF_SECLEVEL_NONE)) {
715 skb_push(skb, hlen);
716 return 0;
719 authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
721 if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
722 return -EMSGSIZE;
724 rcu_read_lock();
726 read_lock_bh(&sec->lock);
728 if (!sec->params.enabled) {
729 rc = -EINVAL;
730 goto fail_read;
733 key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
734 if (!key) {
735 rc = -ENOKEY;
736 goto fail_read;
739 read_unlock_bh(&sec->lock);
741 write_lock_bh(&sec->lock);
743 frame_ctr = be32_to_cpu(sec->params.frame_counter);
744 hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
745 if (frame_ctr == 0xFFFFFFFF) {
746 write_unlock_bh(&sec->lock);
747 llsec_key_put(key);
748 rc = -EOVERFLOW;
749 goto fail;
752 sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
754 write_unlock_bh(&sec->lock);
756 rcu_read_unlock();
758 skb->mac_len = ieee802154_hdr_push(skb, &hdr);
759 skb_reset_mac_header(skb);
761 rc = llsec_do_encrypt(skb, sec, &hdr, key);
762 llsec_key_put(key);
764 return rc;
766 fail_read:
767 read_unlock_bh(&sec->lock);
768 fail:
769 rcu_read_unlock();
770 return rc;
773 static struct mac802154_llsec_device*
774 llsec_lookup_dev(struct mac802154_llsec *sec,
775 const struct ieee802154_addr *addr)
777 struct ieee802154_addr devaddr = *addr;
778 struct mac802154_llsec_device *dev = NULL;
780 if (devaddr.mode == IEEE802154_ADDR_NONE &&
781 llsec_recover_addr(sec, &devaddr) < 0)
782 return NULL;
784 if (devaddr.mode == IEEE802154_ADDR_SHORT) {
785 u32 key = llsec_dev_hash_short(devaddr.short_addr,
786 devaddr.pan_id);
788 hash_for_each_possible_rcu(sec->devices_short, dev,
789 bucket_s, key) {
790 if (dev->dev.pan_id == devaddr.pan_id &&
791 dev->dev.short_addr == devaddr.short_addr)
792 return dev;
794 } else {
795 u64 key = llsec_dev_hash_long(devaddr.extended_addr);
797 hash_for_each_possible_rcu(sec->devices_hw, dev,
798 bucket_hw, key) {
799 if (dev->dev.hwaddr == devaddr.extended_addr)
800 return dev;
804 return NULL;
807 static int
808 llsec_lookup_seclevel(const struct mac802154_llsec *sec,
809 u8 frame_type, u8 cmd_frame_id,
810 struct ieee802154_llsec_seclevel *rlevel)
812 struct ieee802154_llsec_seclevel *level;
814 list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
815 if (level->frame_type == frame_type &&
816 (frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
817 level->cmd_frame_id == cmd_frame_id)) {
818 *rlevel = *level;
819 return 0;
823 return -EINVAL;
826 static int
827 llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
828 const struct ieee802154_hdr *hdr,
829 struct mac802154_llsec_key *key, __le64 dev_addr)
831 u8 iv[16];
832 unsigned char *data;
833 int datalen;
834 struct scatterlist src;
835 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
836 int err;
838 llsec_geniv(iv, dev_addr, &hdr->sec);
839 data = skb_mac_header(skb) + skb->mac_len;
840 datalen = skb_tail_pointer(skb) - data;
842 sg_init_one(&src, data, datalen);
844 skcipher_request_set_sync_tfm(req, key->tfm0);
845 skcipher_request_set_callback(req, 0, NULL, NULL);
846 skcipher_request_set_crypt(req, &src, &src, datalen, iv);
848 err = crypto_skcipher_decrypt(req);
849 skcipher_request_zero(req);
850 return err;
853 static int
854 llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
855 const struct ieee802154_hdr *hdr,
856 struct mac802154_llsec_key *key, __le64 dev_addr)
858 u8 iv[16];
859 unsigned char *data;
860 int authlen, datalen, assoclen, rc;
861 struct scatterlist sg;
862 struct aead_request *req;
864 authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
865 llsec_geniv(iv, dev_addr, &hdr->sec);
867 req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
868 if (!req)
869 return -ENOMEM;
871 assoclen = skb->mac_len;
873 data = skb_mac_header(skb) + skb->mac_len;
874 datalen = skb_tail_pointer(skb) - data;
876 sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen);
878 if (!(hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC)) {
879 assoclen += datalen - authlen;
880 datalen = authlen;
883 aead_request_set_callback(req, 0, NULL, NULL);
884 aead_request_set_crypt(req, &sg, &sg, datalen, iv);
885 aead_request_set_ad(req, assoclen);
887 rc = crypto_aead_decrypt(req);
889 kfree_sensitive(req);
890 skb_trim(skb, skb->len - authlen);
892 return rc;
895 static int
896 llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
897 const struct ieee802154_hdr *hdr,
898 struct mac802154_llsec_key *key, __le64 dev_addr)
900 if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
901 return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
902 else
903 return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
906 static int
907 llsec_update_devkey_record(struct mac802154_llsec_device *dev,
908 const struct ieee802154_llsec_key_id *in_key)
910 struct mac802154_llsec_device_key *devkey;
912 devkey = llsec_devkey_find(dev, in_key);
914 if (!devkey) {
915 struct mac802154_llsec_device_key *next;
917 next = kzalloc(sizeof(*devkey), GFP_ATOMIC);
918 if (!next)
919 return -ENOMEM;
921 next->devkey.key_id = *in_key;
923 spin_lock_bh(&dev->lock);
925 devkey = llsec_devkey_find(dev, in_key);
926 if (!devkey)
927 list_add_rcu(&next->devkey.list, &dev->dev.keys);
928 else
929 kfree_sensitive(next);
931 spin_unlock_bh(&dev->lock);
934 return 0;
937 static int
938 llsec_update_devkey_info(struct mac802154_llsec_device *dev,
939 const struct ieee802154_llsec_key_id *in_key,
940 u32 frame_counter)
942 struct mac802154_llsec_device_key *devkey = NULL;
944 if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
945 devkey = llsec_devkey_find(dev, in_key);
946 if (!devkey)
947 return -ENOENT;
950 if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RECORD) {
951 int rc = llsec_update_devkey_record(dev, in_key);
953 if (rc < 0)
954 return rc;
957 spin_lock_bh(&dev->lock);
959 if ((!devkey && frame_counter < dev->dev.frame_counter) ||
960 (devkey && frame_counter < devkey->devkey.frame_counter)) {
961 spin_unlock_bh(&dev->lock);
962 return -EINVAL;
965 if (devkey)
966 devkey->devkey.frame_counter = frame_counter + 1;
967 else
968 dev->dev.frame_counter = frame_counter + 1;
970 spin_unlock_bh(&dev->lock);
972 return 0;
975 int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
977 struct ieee802154_hdr hdr;
978 struct mac802154_llsec_key *key;
979 struct ieee802154_llsec_key_id key_id;
980 struct mac802154_llsec_device *dev;
981 struct ieee802154_llsec_seclevel seclevel;
982 int err;
983 __le64 dev_addr;
984 u32 frame_ctr;
986 if (ieee802154_hdr_peek(skb, &hdr) < 0)
987 return -EINVAL;
988 if (!hdr.fc.security_enabled)
989 return 0;
990 if (hdr.fc.version == 0)
991 return -EINVAL;
993 read_lock_bh(&sec->lock);
994 if (!sec->params.enabled) {
995 read_unlock_bh(&sec->lock);
996 return -EINVAL;
998 read_unlock_bh(&sec->lock);
1000 rcu_read_lock();
1002 key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
1003 if (!key) {
1004 err = -ENOKEY;
1005 goto fail;
1008 dev = llsec_lookup_dev(sec, &hdr.source);
1009 if (!dev) {
1010 err = -EINVAL;
1011 goto fail_dev;
1014 if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
1015 err = -EINVAL;
1016 goto fail_dev;
1019 if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
1020 (hdr.sec.level == 0 && seclevel.device_override &&
1021 !dev->dev.seclevel_exempt)) {
1022 err = -EINVAL;
1023 goto fail_dev;
1026 frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
1028 if (frame_ctr == 0xffffffff) {
1029 err = -EOVERFLOW;
1030 goto fail_dev;
1033 err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
1034 if (err)
1035 goto fail_dev;
1037 dev_addr = dev->dev.hwaddr;
1039 rcu_read_unlock();
1041 err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
1042 llsec_key_put(key);
1043 return err;
1045 fail_dev:
1046 llsec_key_put(key);
1047 fail:
1048 rcu_read_unlock();
1049 return err;