Linux 5.7.7
[linux/fpc-iii.git] / net / mac80211 / wpa.c
blob91bf32af55e9aab807ee350d4db06da2c9bdb161
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2002-2004, Instant802 Networks, Inc.
4 * Copyright 2008, Jouni Malinen <j@w1.fi>
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
8 #include <linux/netdevice.h>
9 #include <linux/types.h>
10 #include <linux/skbuff.h>
11 #include <linux/compiler.h>
12 #include <linux/ieee80211.h>
13 #include <linux/gfp.h>
14 #include <asm/unaligned.h>
15 #include <net/mac80211.h>
16 #include <crypto/aes.h>
17 #include <crypto/algapi.h>
19 #include "ieee80211_i.h"
20 #include "michael.h"
21 #include "tkip.h"
22 #include "aes_ccm.h"
23 #include "aes_cmac.h"
24 #include "aes_gmac.h"
25 #include "aes_gcm.h"
26 #include "wpa.h"
28 ieee80211_tx_result
29 ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
31 u8 *data, *key, *mic;
32 size_t data_len;
33 unsigned int hdrlen;
34 struct ieee80211_hdr *hdr;
35 struct sk_buff *skb = tx->skb;
36 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
37 int tail;
39 hdr = (struct ieee80211_hdr *)skb->data;
40 if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
41 skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control))
42 return TX_CONTINUE;
44 hdrlen = ieee80211_hdrlen(hdr->frame_control);
45 if (skb->len < hdrlen)
46 return TX_DROP;
48 data = skb->data + hdrlen;
49 data_len = skb->len - hdrlen;
51 if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) {
52 /* Need to use software crypto for the test */
53 info->control.hw_key = NULL;
56 if (info->control.hw_key &&
57 (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
58 ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) &&
59 !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
60 IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) {
61 /* hwaccel - with no need for SW-generated MMIC or MIC space */
62 return TX_CONTINUE;
65 tail = MICHAEL_MIC_LEN;
66 if (!info->control.hw_key)
67 tail += IEEE80211_TKIP_ICV_LEN;
69 if (WARN(skb_tailroom(skb) < tail ||
70 skb_headroom(skb) < IEEE80211_TKIP_IV_LEN,
71 "mmic: not enough head/tail (%d/%d,%d/%d)\n",
72 skb_headroom(skb), IEEE80211_TKIP_IV_LEN,
73 skb_tailroom(skb), tail))
74 return TX_DROP;
76 mic = skb_put(skb, MICHAEL_MIC_LEN);
78 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) {
79 /* Zeroed MIC can help with debug */
80 memset(mic, 0, MICHAEL_MIC_LEN);
81 return TX_CONTINUE;
84 key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
85 michael_mic(key, hdr, data, data_len, mic);
86 if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
87 mic[0]++;
89 return TX_CONTINUE;
93 ieee80211_rx_result
94 ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
96 u8 *data, *key = NULL;
97 size_t data_len;
98 unsigned int hdrlen;
99 u8 mic[MICHAEL_MIC_LEN];
100 struct sk_buff *skb = rx->skb;
101 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
102 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
105 * it makes no sense to check for MIC errors on anything other
106 * than data frames.
108 if (!ieee80211_is_data_present(hdr->frame_control))
109 return RX_CONTINUE;
112 * No way to verify the MIC if the hardware stripped it or
113 * the IV with the key index. In this case we have solely rely
114 * on the driver to set RX_FLAG_MMIC_ERROR in the event of a
115 * MIC failure report.
117 if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) {
118 if (status->flag & RX_FLAG_MMIC_ERROR)
119 goto mic_fail_no_key;
121 if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
122 rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
123 goto update_iv;
125 return RX_CONTINUE;
129 * Some hardware seems to generate Michael MIC failure reports; even
130 * though, the frame was not encrypted with TKIP and therefore has no
131 * MIC. Ignore the flag them to avoid triggering countermeasures.
133 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
134 !(status->flag & RX_FLAG_DECRYPTED))
135 return RX_CONTINUE;
137 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) {
139 * APs with pairwise keys should never receive Michael MIC
140 * errors for non-zero keyidx because these are reserved for
141 * group keys and only the AP is sending real multicast
142 * frames in the BSS.
144 return RX_DROP_UNUSABLE;
147 if (status->flag & RX_FLAG_MMIC_ERROR)
148 goto mic_fail;
150 hdrlen = ieee80211_hdrlen(hdr->frame_control);
151 if (skb->len < hdrlen + MICHAEL_MIC_LEN)
152 return RX_DROP_UNUSABLE;
154 if (skb_linearize(rx->skb))
155 return RX_DROP_UNUSABLE;
156 hdr = (void *)skb->data;
158 data = skb->data + hdrlen;
159 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
160 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
161 michael_mic(key, hdr, data, data_len, mic);
162 if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
163 goto mic_fail;
165 /* remove Michael MIC from payload */
166 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
168 update_iv:
169 /* update IV in key information to be able to detect replays */
170 rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
171 rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
173 return RX_CONTINUE;
175 mic_fail:
176 rx->key->u.tkip.mic_failures++;
178 mic_fail_no_key:
180 * In some cases the key can be unset - e.g. a multicast packet, in
181 * a driver that supports HW encryption. Send up the key idx only if
182 * the key is set.
184 cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2,
185 is_multicast_ether_addr(hdr->addr1) ?
186 NL80211_KEYTYPE_GROUP :
187 NL80211_KEYTYPE_PAIRWISE,
188 rx->key ? rx->key->conf.keyidx : -1,
189 NULL, GFP_ATOMIC);
190 return RX_DROP_UNUSABLE;
193 static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
195 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
196 struct ieee80211_key *key = tx->key;
197 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
198 unsigned int hdrlen;
199 int len, tail;
200 u64 pn;
201 u8 *pos;
203 if (info->control.hw_key &&
204 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
205 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
206 /* hwaccel - with no need for software-generated IV */
207 return 0;
210 hdrlen = ieee80211_hdrlen(hdr->frame_control);
211 len = skb->len - hdrlen;
213 if (info->control.hw_key)
214 tail = 0;
215 else
216 tail = IEEE80211_TKIP_ICV_LEN;
218 if (WARN_ON(skb_tailroom(skb) < tail ||
219 skb_headroom(skb) < IEEE80211_TKIP_IV_LEN))
220 return -1;
222 pos = skb_push(skb, IEEE80211_TKIP_IV_LEN);
223 memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen);
224 pos += hdrlen;
226 /* the HW only needs room for the IV, but not the actual IV */
227 if (info->control.hw_key &&
228 (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
229 return 0;
231 /* Increase IV for the frame */
232 pn = atomic64_inc_return(&key->conf.tx_pn);
233 pos = ieee80211_tkip_add_iv(pos, &key->conf, pn);
235 /* hwaccel - with software IV */
236 if (info->control.hw_key)
237 return 0;
239 /* Add room for ICV */
240 skb_put(skb, IEEE80211_TKIP_ICV_LEN);
242 return ieee80211_tkip_encrypt_data(&tx->local->wep_tx_ctx,
243 key, skb, pos, len);
247 ieee80211_tx_result
248 ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
250 struct sk_buff *skb;
252 ieee80211_tx_set_protected(tx);
254 skb_queue_walk(&tx->skbs, skb) {
255 if (tkip_encrypt_skb(tx, skb) < 0)
256 return TX_DROP;
259 return TX_CONTINUE;
263 ieee80211_rx_result
264 ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
266 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
267 int hdrlen, res, hwaccel = 0;
268 struct ieee80211_key *key = rx->key;
269 struct sk_buff *skb = rx->skb;
270 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
272 hdrlen = ieee80211_hdrlen(hdr->frame_control);
274 if (!ieee80211_is_data(hdr->frame_control))
275 return RX_CONTINUE;
277 if (!rx->sta || skb->len - hdrlen < 12)
278 return RX_DROP_UNUSABLE;
280 /* it may be possible to optimize this a bit more */
281 if (skb_linearize(rx->skb))
282 return RX_DROP_UNUSABLE;
283 hdr = (void *)skb->data;
286 * Let TKIP code verify IV, but skip decryption.
287 * In the case where hardware checks the IV as well,
288 * we don't even get here, see ieee80211_rx_h_decrypt()
290 if (status->flag & RX_FLAG_DECRYPTED)
291 hwaccel = 1;
293 res = ieee80211_tkip_decrypt_data(&rx->local->wep_rx_ctx,
294 key, skb->data + hdrlen,
295 skb->len - hdrlen, rx->sta->sta.addr,
296 hdr->addr1, hwaccel, rx->security_idx,
297 &rx->tkip_iv32,
298 &rx->tkip_iv16);
299 if (res != TKIP_DECRYPT_OK)
300 return RX_DROP_UNUSABLE;
302 /* Trim ICV */
303 if (!(status->flag & RX_FLAG_ICV_STRIPPED))
304 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
306 /* Remove IV */
307 memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
308 skb_pull(skb, IEEE80211_TKIP_IV_LEN);
310 return RX_CONTINUE;
314 static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
316 __le16 mask_fc;
317 int a4_included, mgmt;
318 u8 qos_tid;
319 u16 len_a;
320 unsigned int hdrlen;
321 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
324 * Mask FC: zero subtype b4 b5 b6 (if not mgmt)
325 * Retry, PwrMgt, MoreData; set Protected
327 mgmt = ieee80211_is_mgmt(hdr->frame_control);
328 mask_fc = hdr->frame_control;
329 mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
330 IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
331 if (!mgmt)
332 mask_fc &= ~cpu_to_le16(0x0070);
333 mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
335 hdrlen = ieee80211_hdrlen(hdr->frame_control);
336 len_a = hdrlen - 2;
337 a4_included = ieee80211_has_a4(hdr->frame_control);
339 if (ieee80211_is_data_qos(hdr->frame_control))
340 qos_tid = ieee80211_get_tid(hdr);
341 else
342 qos_tid = 0;
344 /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
345 * mode authentication are not allowed to collide, yet both are derived
346 * from this vector b_0. We only set L := 1 here to indicate that the
347 * data size can be represented in (L+1) bytes. The CCM layer will take
348 * care of storing the data length in the top (L+1) bytes and setting
349 * and clearing the other bits as is required to derive the two IVs.
351 b_0[0] = 0x1;
353 /* Nonce: Nonce Flags | A2 | PN
354 * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7)
356 b_0[1] = qos_tid | (mgmt << 4);
357 memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
358 memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN);
360 /* AAD (extra authenticate-only data) / masked 802.11 header
361 * FC | A1 | A2 | A3 | SC | [A4] | [QC] */
362 put_unaligned_be16(len_a, &aad[0]);
363 put_unaligned(mask_fc, (__le16 *)&aad[2]);
364 memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
366 /* Mask Seq#, leave Frag# */
367 aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f;
368 aad[23] = 0;
370 if (a4_included) {
371 memcpy(&aad[24], hdr->addr4, ETH_ALEN);
372 aad[30] = qos_tid;
373 aad[31] = 0;
374 } else {
375 memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
376 aad[24] = qos_tid;
381 static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id)
383 hdr[0] = pn[5];
384 hdr[1] = pn[4];
385 hdr[2] = 0;
386 hdr[3] = 0x20 | (key_id << 6);
387 hdr[4] = pn[3];
388 hdr[5] = pn[2];
389 hdr[6] = pn[1];
390 hdr[7] = pn[0];
394 static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr)
396 pn[0] = hdr[7];
397 pn[1] = hdr[6];
398 pn[2] = hdr[5];
399 pn[3] = hdr[4];
400 pn[4] = hdr[1];
401 pn[5] = hdr[0];
405 static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
406 unsigned int mic_len)
408 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
409 struct ieee80211_key *key = tx->key;
410 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
411 int hdrlen, len, tail;
412 u8 *pos;
413 u8 pn[6];
414 u64 pn64;
415 u8 aad[CCM_AAD_LEN];
416 u8 b_0[AES_BLOCK_SIZE];
418 if (info->control.hw_key &&
419 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
420 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
421 !((info->control.hw_key->flags &
422 IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
423 ieee80211_is_mgmt(hdr->frame_control))) {
425 * hwaccel has no need for preallocated room for CCMP
426 * header or MIC fields
428 return 0;
431 hdrlen = ieee80211_hdrlen(hdr->frame_control);
432 len = skb->len - hdrlen;
434 if (info->control.hw_key)
435 tail = 0;
436 else
437 tail = mic_len;
439 if (WARN_ON(skb_tailroom(skb) < tail ||
440 skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN))
441 return -1;
443 pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN);
444 memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen);
446 /* the HW only needs room for the IV, but not the actual IV */
447 if (info->control.hw_key &&
448 (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
449 return 0;
451 hdr = (struct ieee80211_hdr *) pos;
452 pos += hdrlen;
454 pn64 = atomic64_inc_return(&key->conf.tx_pn);
456 pn[5] = pn64;
457 pn[4] = pn64 >> 8;
458 pn[3] = pn64 >> 16;
459 pn[2] = pn64 >> 24;
460 pn[1] = pn64 >> 32;
461 pn[0] = pn64 >> 40;
463 ccmp_pn2hdr(pos, pn, key->conf.keyidx);
465 /* hwaccel - with software CCMP header */
466 if (info->control.hw_key)
467 return 0;
469 pos += IEEE80211_CCMP_HDR_LEN;
470 ccmp_special_blocks(skb, pn, b_0, aad);
471 return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
472 skb_put(skb, mic_len));
476 ieee80211_tx_result
477 ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx,
478 unsigned int mic_len)
480 struct sk_buff *skb;
482 ieee80211_tx_set_protected(tx);
484 skb_queue_walk(&tx->skbs, skb) {
485 if (ccmp_encrypt_skb(tx, skb, mic_len) < 0)
486 return TX_DROP;
489 return TX_CONTINUE;
493 ieee80211_rx_result
494 ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
495 unsigned int mic_len)
497 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
498 int hdrlen;
499 struct ieee80211_key *key = rx->key;
500 struct sk_buff *skb = rx->skb;
501 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
502 u8 pn[IEEE80211_CCMP_PN_LEN];
503 int data_len;
504 int queue;
506 hdrlen = ieee80211_hdrlen(hdr->frame_control);
508 if (!ieee80211_is_data(hdr->frame_control) &&
509 !ieee80211_is_robust_mgmt_frame(skb))
510 return RX_CONTINUE;
512 if (status->flag & RX_FLAG_DECRYPTED) {
513 if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
514 return RX_DROP_UNUSABLE;
515 if (status->flag & RX_FLAG_MIC_STRIPPED)
516 mic_len = 0;
517 } else {
518 if (skb_linearize(rx->skb))
519 return RX_DROP_UNUSABLE;
522 data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
523 if (!rx->sta || data_len < 0)
524 return RX_DROP_UNUSABLE;
526 if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
527 int res;
529 ccmp_hdr2pn(pn, skb->data + hdrlen);
531 queue = rx->security_idx;
533 res = memcmp(pn, key->u.ccmp.rx_pn[queue],
534 IEEE80211_CCMP_PN_LEN);
535 if (res < 0 ||
536 (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
537 key->u.ccmp.replays++;
538 return RX_DROP_UNUSABLE;
541 if (!(status->flag & RX_FLAG_DECRYPTED)) {
542 u8 aad[2 * AES_BLOCK_SIZE];
543 u8 b_0[AES_BLOCK_SIZE];
544 /* hardware didn't decrypt/verify MIC */
545 ccmp_special_blocks(skb, pn, b_0, aad);
547 if (ieee80211_aes_ccm_decrypt(
548 key->u.ccmp.tfm, b_0, aad,
549 skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
550 data_len,
551 skb->data + skb->len - mic_len))
552 return RX_DROP_UNUSABLE;
555 memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
558 /* Remove CCMP header and MIC */
559 if (pskb_trim(skb, skb->len - mic_len))
560 return RX_DROP_UNUSABLE;
561 memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen);
562 skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
564 return RX_CONTINUE;
567 static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
569 __le16 mask_fc;
570 u8 qos_tid;
571 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
573 memcpy(j_0, hdr->addr2, ETH_ALEN);
574 memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN);
575 j_0[13] = 0;
576 j_0[14] = 0;
577 j_0[AES_BLOCK_SIZE - 1] = 0x01;
579 /* AAD (extra authenticate-only data) / masked 802.11 header
580 * FC | A1 | A2 | A3 | SC | [A4] | [QC]
582 put_unaligned_be16(ieee80211_hdrlen(hdr->frame_control) - 2, &aad[0]);
583 /* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
584 * Retry, PwrMgt, MoreData; set Protected
586 mask_fc = hdr->frame_control;
587 mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
588 IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
589 if (!ieee80211_is_mgmt(hdr->frame_control))
590 mask_fc &= ~cpu_to_le16(0x0070);
591 mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
593 put_unaligned(mask_fc, (__le16 *)&aad[2]);
594 memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
596 /* Mask Seq#, leave Frag# */
597 aad[22] = *((u8 *)&hdr->seq_ctrl) & 0x0f;
598 aad[23] = 0;
600 if (ieee80211_is_data_qos(hdr->frame_control))
601 qos_tid = ieee80211_get_tid(hdr);
602 else
603 qos_tid = 0;
605 if (ieee80211_has_a4(hdr->frame_control)) {
606 memcpy(&aad[24], hdr->addr4, ETH_ALEN);
607 aad[30] = qos_tid;
608 aad[31] = 0;
609 } else {
610 memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
611 aad[24] = qos_tid;
615 static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
617 hdr[0] = pn[5];
618 hdr[1] = pn[4];
619 hdr[2] = 0;
620 hdr[3] = 0x20 | (key_id << 6);
621 hdr[4] = pn[3];
622 hdr[5] = pn[2];
623 hdr[6] = pn[1];
624 hdr[7] = pn[0];
627 static inline void gcmp_hdr2pn(u8 *pn, const u8 *hdr)
629 pn[0] = hdr[7];
630 pn[1] = hdr[6];
631 pn[2] = hdr[5];
632 pn[3] = hdr[4];
633 pn[4] = hdr[1];
634 pn[5] = hdr[0];
637 static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
639 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
640 struct ieee80211_key *key = tx->key;
641 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
642 int hdrlen, len, tail;
643 u8 *pos;
644 u8 pn[6];
645 u64 pn64;
646 u8 aad[GCM_AAD_LEN];
647 u8 j_0[AES_BLOCK_SIZE];
649 if (info->control.hw_key &&
650 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
651 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
652 !((info->control.hw_key->flags &
653 IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
654 ieee80211_is_mgmt(hdr->frame_control))) {
655 /* hwaccel has no need for preallocated room for GCMP
656 * header or MIC fields
658 return 0;
661 hdrlen = ieee80211_hdrlen(hdr->frame_control);
662 len = skb->len - hdrlen;
664 if (info->control.hw_key)
665 tail = 0;
666 else
667 tail = IEEE80211_GCMP_MIC_LEN;
669 if (WARN_ON(skb_tailroom(skb) < tail ||
670 skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN))
671 return -1;
673 pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN);
674 memmove(pos, pos + IEEE80211_GCMP_HDR_LEN, hdrlen);
675 skb_set_network_header(skb, skb_network_offset(skb) +
676 IEEE80211_GCMP_HDR_LEN);
678 /* the HW only needs room for the IV, but not the actual IV */
679 if (info->control.hw_key &&
680 (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
681 return 0;
683 hdr = (struct ieee80211_hdr *)pos;
684 pos += hdrlen;
686 pn64 = atomic64_inc_return(&key->conf.tx_pn);
688 pn[5] = pn64;
689 pn[4] = pn64 >> 8;
690 pn[3] = pn64 >> 16;
691 pn[2] = pn64 >> 24;
692 pn[1] = pn64 >> 32;
693 pn[0] = pn64 >> 40;
695 gcmp_pn2hdr(pos, pn, key->conf.keyidx);
697 /* hwaccel - with software GCMP header */
698 if (info->control.hw_key)
699 return 0;
701 pos += IEEE80211_GCMP_HDR_LEN;
702 gcmp_special_blocks(skb, pn, j_0, aad);
703 return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
704 skb_put(skb, IEEE80211_GCMP_MIC_LEN));
707 ieee80211_tx_result
708 ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx)
710 struct sk_buff *skb;
712 ieee80211_tx_set_protected(tx);
714 skb_queue_walk(&tx->skbs, skb) {
715 if (gcmp_encrypt_skb(tx, skb) < 0)
716 return TX_DROP;
719 return TX_CONTINUE;
722 ieee80211_rx_result
723 ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
725 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
726 int hdrlen;
727 struct ieee80211_key *key = rx->key;
728 struct sk_buff *skb = rx->skb;
729 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
730 u8 pn[IEEE80211_GCMP_PN_LEN];
731 int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN;
733 hdrlen = ieee80211_hdrlen(hdr->frame_control);
735 if (!ieee80211_is_data(hdr->frame_control) &&
736 !ieee80211_is_robust_mgmt_frame(skb))
737 return RX_CONTINUE;
739 if (status->flag & RX_FLAG_DECRYPTED) {
740 if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
741 return RX_DROP_UNUSABLE;
742 if (status->flag & RX_FLAG_MIC_STRIPPED)
743 mic_len = 0;
744 } else {
745 if (skb_linearize(rx->skb))
746 return RX_DROP_UNUSABLE;
749 data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
750 if (!rx->sta || data_len < 0)
751 return RX_DROP_UNUSABLE;
753 if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
754 int res;
756 gcmp_hdr2pn(pn, skb->data + hdrlen);
758 queue = rx->security_idx;
760 res = memcmp(pn, key->u.gcmp.rx_pn[queue],
761 IEEE80211_GCMP_PN_LEN);
762 if (res < 0 ||
763 (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
764 key->u.gcmp.replays++;
765 return RX_DROP_UNUSABLE;
768 if (!(status->flag & RX_FLAG_DECRYPTED)) {
769 u8 aad[2 * AES_BLOCK_SIZE];
770 u8 j_0[AES_BLOCK_SIZE];
771 /* hardware didn't decrypt/verify MIC */
772 gcmp_special_blocks(skb, pn, j_0, aad);
774 if (ieee80211_aes_gcm_decrypt(
775 key->u.gcmp.tfm, j_0, aad,
776 skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
777 data_len,
778 skb->data + skb->len -
779 IEEE80211_GCMP_MIC_LEN))
780 return RX_DROP_UNUSABLE;
783 memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
786 /* Remove GCMP header and MIC */
787 if (pskb_trim(skb, skb->len - mic_len))
788 return RX_DROP_UNUSABLE;
789 memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
790 skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
792 return RX_CONTINUE;
795 static ieee80211_tx_result
796 ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
797 struct sk_buff *skb)
799 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
800 struct ieee80211_key *key = tx->key;
801 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
802 int hdrlen;
803 u8 *pos, iv_len = key->conf.iv_len;
805 if (info->control.hw_key &&
806 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
807 /* hwaccel has no need for preallocated head room */
808 return TX_CONTINUE;
811 if (unlikely(skb_headroom(skb) < iv_len &&
812 pskb_expand_head(skb, iv_len, 0, GFP_ATOMIC)))
813 return TX_DROP;
815 hdrlen = ieee80211_hdrlen(hdr->frame_control);
817 pos = skb_push(skb, iv_len);
818 memmove(pos, pos + iv_len, hdrlen);
820 return TX_CONTINUE;
823 static inline int ieee80211_crypto_cs_pn_compare(u8 *pn1, u8 *pn2, int len)
825 int i;
827 /* pn is little endian */
828 for (i = len - 1; i >= 0; i--) {
829 if (pn1[i] < pn2[i])
830 return -1;
831 else if (pn1[i] > pn2[i])
832 return 1;
835 return 0;
838 static ieee80211_rx_result
839 ieee80211_crypto_cs_decrypt(struct ieee80211_rx_data *rx)
841 struct ieee80211_key *key = rx->key;
842 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
843 const struct ieee80211_cipher_scheme *cs = NULL;
844 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
845 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
846 int data_len;
847 u8 *rx_pn;
848 u8 *skb_pn;
849 u8 qos_tid;
851 if (!rx->sta || !rx->sta->cipher_scheme ||
852 !(status->flag & RX_FLAG_DECRYPTED))
853 return RX_DROP_UNUSABLE;
855 if (!ieee80211_is_data(hdr->frame_control))
856 return RX_CONTINUE;
858 cs = rx->sta->cipher_scheme;
860 data_len = rx->skb->len - hdrlen - cs->hdr_len;
862 if (data_len < 0)
863 return RX_DROP_UNUSABLE;
865 if (ieee80211_is_data_qos(hdr->frame_control))
866 qos_tid = ieee80211_get_tid(hdr);
867 else
868 qos_tid = 0;
870 if (skb_linearize(rx->skb))
871 return RX_DROP_UNUSABLE;
873 hdr = (struct ieee80211_hdr *)rx->skb->data;
875 rx_pn = key->u.gen.rx_pn[qos_tid];
876 skb_pn = rx->skb->data + hdrlen + cs->pn_off;
878 if (ieee80211_crypto_cs_pn_compare(skb_pn, rx_pn, cs->pn_len) <= 0)
879 return RX_DROP_UNUSABLE;
881 memcpy(rx_pn, skb_pn, cs->pn_len);
883 /* remove security header and MIC */
884 if (pskb_trim(rx->skb, rx->skb->len - cs->mic_len))
885 return RX_DROP_UNUSABLE;
887 memmove(rx->skb->data + cs->hdr_len, rx->skb->data, hdrlen);
888 skb_pull(rx->skb, cs->hdr_len);
890 return RX_CONTINUE;
893 static void bip_aad(struct sk_buff *skb, u8 *aad)
895 __le16 mask_fc;
896 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
898 /* BIP AAD: FC(masked) || A1 || A2 || A3 */
900 /* FC type/subtype */
901 /* Mask FC Retry, PwrMgt, MoreData flags to zero */
902 mask_fc = hdr->frame_control;
903 mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM |
904 IEEE80211_FCTL_MOREDATA);
905 put_unaligned(mask_fc, (__le16 *) &aad[0]);
906 /* A1 || A2 || A3 */
907 memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN);
911 static inline void bip_ipn_set64(u8 *d, u64 pn)
913 *d++ = pn;
914 *d++ = pn >> 8;
915 *d++ = pn >> 16;
916 *d++ = pn >> 24;
917 *d++ = pn >> 32;
918 *d = pn >> 40;
921 static inline void bip_ipn_swap(u8 *d, const u8 *s)
923 *d++ = s[5];
924 *d++ = s[4];
925 *d++ = s[3];
926 *d++ = s[2];
927 *d++ = s[1];
928 *d = s[0];
932 ieee80211_tx_result
933 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
935 struct sk_buff *skb;
936 struct ieee80211_tx_info *info;
937 struct ieee80211_key *key = tx->key;
938 struct ieee80211_mmie *mmie;
939 u8 aad[20];
940 u64 pn64;
942 if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
943 return TX_DROP;
945 skb = skb_peek(&tx->skbs);
947 info = IEEE80211_SKB_CB(skb);
949 if (info->control.hw_key &&
950 !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE))
951 return TX_CONTINUE;
953 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
954 return TX_DROP;
956 mmie = skb_put(skb, sizeof(*mmie));
957 mmie->element_id = WLAN_EID_MMIE;
958 mmie->length = sizeof(*mmie) - 2;
959 mmie->key_id = cpu_to_le16(key->conf.keyidx);
961 /* PN = PN + 1 */
962 pn64 = atomic64_inc_return(&key->conf.tx_pn);
964 bip_ipn_set64(mmie->sequence_number, pn64);
966 if (info->control.hw_key)
967 return TX_CONTINUE;
969 bip_aad(skb, aad);
972 * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64)
974 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
975 skb->data + 24, skb->len - 24, mmie->mic);
977 return TX_CONTINUE;
980 ieee80211_tx_result
981 ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
983 struct sk_buff *skb;
984 struct ieee80211_tx_info *info;
985 struct ieee80211_key *key = tx->key;
986 struct ieee80211_mmie_16 *mmie;
987 u8 aad[20];
988 u64 pn64;
990 if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
991 return TX_DROP;
993 skb = skb_peek(&tx->skbs);
995 info = IEEE80211_SKB_CB(skb);
997 if (info->control.hw_key)
998 return TX_CONTINUE;
1000 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
1001 return TX_DROP;
1003 mmie = skb_put(skb, sizeof(*mmie));
1004 mmie->element_id = WLAN_EID_MMIE;
1005 mmie->length = sizeof(*mmie) - 2;
1006 mmie->key_id = cpu_to_le16(key->conf.keyidx);
1008 /* PN = PN + 1 */
1009 pn64 = atomic64_inc_return(&key->conf.tx_pn);
1011 bip_ipn_set64(mmie->sequence_number, pn64);
1013 bip_aad(skb, aad);
1015 /* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128)
1017 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
1018 skb->data + 24, skb->len - 24, mmie->mic);
1020 return TX_CONTINUE;
1023 ieee80211_rx_result
1024 ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
1026 struct sk_buff *skb = rx->skb;
1027 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1028 struct ieee80211_key *key = rx->key;
1029 struct ieee80211_mmie *mmie;
1030 u8 aad[20], mic[8], ipn[6];
1031 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1033 if (!ieee80211_is_mgmt(hdr->frame_control))
1034 return RX_CONTINUE;
1036 /* management frames are already linear */
1038 if (skb->len < 24 + sizeof(*mmie))
1039 return RX_DROP_UNUSABLE;
1041 mmie = (struct ieee80211_mmie *)
1042 (skb->data + skb->len - sizeof(*mmie));
1043 if (mmie->element_id != WLAN_EID_MMIE ||
1044 mmie->length != sizeof(*mmie) - 2)
1045 return RX_DROP_UNUSABLE; /* Invalid MMIE */
1047 bip_ipn_swap(ipn, mmie->sequence_number);
1049 if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
1050 key->u.aes_cmac.replays++;
1051 return RX_DROP_UNUSABLE;
1054 if (!(status->flag & RX_FLAG_DECRYPTED)) {
1055 /* hardware didn't decrypt/verify MIC */
1056 bip_aad(skb, aad);
1057 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
1058 skb->data + 24, skb->len - 24, mic);
1059 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1060 key->u.aes_cmac.icverrors++;
1061 return RX_DROP_UNUSABLE;
1065 memcpy(key->u.aes_cmac.rx_pn, ipn, 6);
1067 /* Remove MMIE */
1068 skb_trim(skb, skb->len - sizeof(*mmie));
1070 return RX_CONTINUE;
1073 ieee80211_rx_result
1074 ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
1076 struct sk_buff *skb = rx->skb;
1077 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1078 struct ieee80211_key *key = rx->key;
1079 struct ieee80211_mmie_16 *mmie;
1080 u8 aad[20], mic[16], ipn[6];
1081 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1083 if (!ieee80211_is_mgmt(hdr->frame_control))
1084 return RX_CONTINUE;
1086 /* management frames are already linear */
1088 if (skb->len < 24 + sizeof(*mmie))
1089 return RX_DROP_UNUSABLE;
1091 mmie = (struct ieee80211_mmie_16 *)
1092 (skb->data + skb->len - sizeof(*mmie));
1093 if (mmie->element_id != WLAN_EID_MMIE ||
1094 mmie->length != sizeof(*mmie) - 2)
1095 return RX_DROP_UNUSABLE; /* Invalid MMIE */
1097 bip_ipn_swap(ipn, mmie->sequence_number);
1099 if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
1100 key->u.aes_cmac.replays++;
1101 return RX_DROP_UNUSABLE;
1104 if (!(status->flag & RX_FLAG_DECRYPTED)) {
1105 /* hardware didn't decrypt/verify MIC */
1106 bip_aad(skb, aad);
1107 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
1108 skb->data + 24, skb->len - 24, mic);
1109 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1110 key->u.aes_cmac.icverrors++;
1111 return RX_DROP_UNUSABLE;
1115 memcpy(key->u.aes_cmac.rx_pn, ipn, 6);
1117 /* Remove MMIE */
1118 skb_trim(skb, skb->len - sizeof(*mmie));
1120 return RX_CONTINUE;
1123 ieee80211_tx_result
1124 ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
1126 struct sk_buff *skb;
1127 struct ieee80211_tx_info *info;
1128 struct ieee80211_key *key = tx->key;
1129 struct ieee80211_mmie_16 *mmie;
1130 struct ieee80211_hdr *hdr;
1131 u8 aad[GMAC_AAD_LEN];
1132 u64 pn64;
1133 u8 nonce[GMAC_NONCE_LEN];
1135 if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
1136 return TX_DROP;
1138 skb = skb_peek(&tx->skbs);
1140 info = IEEE80211_SKB_CB(skb);
1142 if (info->control.hw_key)
1143 return TX_CONTINUE;
1145 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
1146 return TX_DROP;
1148 mmie = skb_put(skb, sizeof(*mmie));
1149 mmie->element_id = WLAN_EID_MMIE;
1150 mmie->length = sizeof(*mmie) - 2;
1151 mmie->key_id = cpu_to_le16(key->conf.keyidx);
1153 /* PN = PN + 1 */
1154 pn64 = atomic64_inc_return(&key->conf.tx_pn);
1156 bip_ipn_set64(mmie->sequence_number, pn64);
1158 bip_aad(skb, aad);
1160 hdr = (struct ieee80211_hdr *)skb->data;
1161 memcpy(nonce, hdr->addr2, ETH_ALEN);
1162 bip_ipn_swap(nonce + ETH_ALEN, mmie->sequence_number);
1164 /* MIC = AES-GMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */
1165 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
1166 skb->data + 24, skb->len - 24, mmie->mic) < 0)
1167 return TX_DROP;
1169 return TX_CONTINUE;
1172 ieee80211_rx_result
1173 ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
1175 struct sk_buff *skb = rx->skb;
1176 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1177 struct ieee80211_key *key = rx->key;
1178 struct ieee80211_mmie_16 *mmie;
1179 u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN];
1180 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1182 if (!ieee80211_is_mgmt(hdr->frame_control))
1183 return RX_CONTINUE;
1185 /* management frames are already linear */
1187 if (skb->len < 24 + sizeof(*mmie))
1188 return RX_DROP_UNUSABLE;
1190 mmie = (struct ieee80211_mmie_16 *)
1191 (skb->data + skb->len - sizeof(*mmie));
1192 if (mmie->element_id != WLAN_EID_MMIE ||
1193 mmie->length != sizeof(*mmie) - 2)
1194 return RX_DROP_UNUSABLE; /* Invalid MMIE */
1196 bip_ipn_swap(ipn, mmie->sequence_number);
1198 if (memcmp(ipn, key->u.aes_gmac.rx_pn, 6) <= 0) {
1199 key->u.aes_gmac.replays++;
1200 return RX_DROP_UNUSABLE;
1203 if (!(status->flag & RX_FLAG_DECRYPTED)) {
1204 /* hardware didn't decrypt/verify MIC */
1205 bip_aad(skb, aad);
1207 memcpy(nonce, hdr->addr2, ETH_ALEN);
1208 memcpy(nonce + ETH_ALEN, ipn, 6);
1210 mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
1211 if (!mic)
1212 return RX_DROP_UNUSABLE;
1213 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
1214 skb->data + 24, skb->len - 24,
1215 mic) < 0 ||
1216 crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1217 key->u.aes_gmac.icverrors++;
1218 kfree(mic);
1219 return RX_DROP_UNUSABLE;
1221 kfree(mic);
1224 memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
1226 /* Remove MMIE */
1227 skb_trim(skb, skb->len - sizeof(*mmie));
1229 return RX_CONTINUE;
1232 ieee80211_tx_result
1233 ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
1235 struct sk_buff *skb;
1236 struct ieee80211_tx_info *info = NULL;
1237 ieee80211_tx_result res;
1239 skb_queue_walk(&tx->skbs, skb) {
1240 info = IEEE80211_SKB_CB(skb);
1242 /* handle hw-only algorithm */
1243 if (!info->control.hw_key)
1244 return TX_DROP;
1246 if (tx->key->flags & KEY_FLAG_CIPHER_SCHEME) {
1247 res = ieee80211_crypto_cs_encrypt(tx, skb);
1248 if (res != TX_CONTINUE)
1249 return res;
1253 ieee80211_tx_set_protected(tx);
1255 return TX_CONTINUE;
1258 ieee80211_rx_result
1259 ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx)
1261 if (rx->sta && rx->sta->cipher_scheme)
1262 return ieee80211_crypto_cs_decrypt(rx);
1264 return RX_DROP_UNUSABLE;