[ARM] pxa: Gumstix Verdex PCMCIA support
[linux-2.6/verdex.git] / drivers / net / wireless / rt2x00 / rt2x00crypto.c
blobde36837dcf86a7c73a3d82ef299ff15742f1024e
1 /*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 Module: rt2x00lib
23 Abstract: rt2x00 crypto specific routines.
26 #include <linux/kernel.h>
27 #include <linux/module.h>
29 #include "rt2x00.h"
30 #include "rt2x00lib.h"
32 enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
34 switch (key->alg) {
35 case ALG_WEP:
36 if (key->keylen == WLAN_KEY_LEN_WEP40)
37 return CIPHER_WEP64;
38 else
39 return CIPHER_WEP128;
40 case ALG_TKIP:
41 return CIPHER_TKIP;
42 case ALG_CCMP:
43 return CIPHER_AES;
44 default:
45 return CIPHER_NONE;
49 void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
50 struct txentry_desc *txdesc)
52 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
53 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
54 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
56 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) || !hw_key)
57 return;
59 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
61 txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
63 if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
64 __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
66 txdesc->key_idx = hw_key->hw_key_idx;
67 txdesc->iv_offset = txdesc->header_length;
68 txdesc->iv_len = hw_key->iv_len;
70 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
71 __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
73 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
74 __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
77 unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
78 struct sk_buff *skb)
80 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
81 struct ieee80211_key_conf *key = tx_info->control.hw_key;
82 unsigned int overhead = 0;
84 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) || !key)
85 return overhead;
88 * Extend frame length to include IV/EIV/ICV/MMIC,
89 * note that these lengths should only be added when
90 * mac80211 does not generate it.
92 overhead += key->icv_len;
94 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
95 overhead += key->iv_len;
97 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
98 if (key->alg == ALG_TKIP)
99 overhead += 8;
102 return overhead;
105 void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
107 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
109 if (unlikely(!txdesc->iv_len))
110 return;
112 /* Copy IV/EIV data */
113 memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
116 void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
118 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
120 if (unlikely(!txdesc->iv_len))
121 return;
123 /* Copy IV/EIV data */
124 memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
126 /* Move ieee80211 header */
127 memmove(skb->data + txdesc->iv_len, skb->data, txdesc->iv_offset);
129 /* Pull buffer to correct size */
130 skb_pull(skb, txdesc->iv_len);
132 /* IV/EIV data has officially been stripped */
133 skbdesc->flags |= SKBDESC_IV_STRIPPED;
136 void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length)
138 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
139 const unsigned int iv_len =
140 ((!!(skbdesc->iv[0])) * 4) + ((!!(skbdesc->iv[1])) * 4);
142 if (!(skbdesc->flags & SKBDESC_IV_STRIPPED))
143 return;
145 skb_push(skb, iv_len);
147 /* Move ieee80211 header */
148 memmove(skb->data, skb->data + iv_len, header_length);
150 /* Copy IV/EIV data */
151 memcpy(skb->data + header_length, skbdesc->iv, iv_len);
153 /* IV/EIV data has returned into the frame */
154 skbdesc->flags &= ~SKBDESC_IV_STRIPPED;
157 void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
158 unsigned int header_length,
159 struct rxdone_entry_desc *rxdesc)
161 unsigned int payload_len = rxdesc->size - header_length;
162 unsigned int align = ALIGN_SIZE(skb, header_length);
163 unsigned int iv_len;
164 unsigned int icv_len;
165 unsigned int transfer = 0;
168 * WEP64/WEP128: Provides IV & ICV
169 * TKIP: Provides IV/EIV & ICV
170 * AES: Provies IV/EIV & ICV
172 switch (rxdesc->cipher) {
173 case CIPHER_WEP64:
174 case CIPHER_WEP128:
175 iv_len = 4;
176 icv_len = 4;
177 break;
178 case CIPHER_TKIP:
179 iv_len = 8;
180 icv_len = 4;
181 break;
182 case CIPHER_AES:
183 iv_len = 8;
184 icv_len = 8;
185 break;
186 default:
187 /* Unsupport type */
188 return;
192 * Make room for new data. There are 2 possibilities
193 * either the alignment is already present between
194 * the 802.11 header and payload. In that case we
195 * we have to move the header less then the iv_len
196 * since we can use the already available l2pad bytes
197 * for the iv data.
198 * When the alignment must be added manually we must
199 * move the header more then iv_len since we must
200 * make room for the payload move as well.
202 if (rxdesc->dev_flags & RXDONE_L2PAD) {
203 skb_push(skb, iv_len - align);
204 skb_put(skb, icv_len);
206 /* Move ieee80211 header */
207 memmove(skb->data + transfer,
208 skb->data + transfer + (iv_len - align),
209 header_length);
210 transfer += header_length;
211 } else {
212 skb_push(skb, iv_len + align);
213 if (align < icv_len)
214 skb_put(skb, icv_len - align);
215 else if (align > icv_len)
216 skb_trim(skb, rxdesc->size + iv_len + icv_len);
218 /* Move ieee80211 header */
219 memmove(skb->data + transfer,
220 skb->data + transfer + iv_len + align,
221 header_length);
222 transfer += header_length;
225 /* Copy IV/EIV data */
226 memcpy(skb->data + transfer, rxdesc->iv, iv_len);
227 transfer += iv_len;
230 * Move payload for alignment purposes. Note that
231 * this is only needed when no l2 padding is present.
233 if (!(rxdesc->dev_flags & RXDONE_L2PAD)) {
234 memmove(skb->data + transfer,
235 skb->data + transfer + align,
236 payload_len);
240 * NOTE: Always count the payload as transfered,
241 * even when alignment was set to zero. This is required
242 * for determining the correct offset for the ICV data.
244 transfer += payload_len;
247 * Copy ICV data
248 * AES appends 8 bytes, we can't fill the upper
249 * 4 bytes, but mac80211 doesn't care about what
250 * we provide here anyway and strips it immediately.
252 memcpy(skb->data + transfer, &rxdesc->icv, 4);
253 transfer += icv_len;
255 /* IV/EIV/ICV has been inserted into frame */
256 rxdesc->size = transfer;
257 rxdesc->flags &= ~RX_FLAG_IV_STRIPPED;