proc: use seq_puts()/seq_putc() where possible
[linux-2.6/next.git] / drivers / net / wireless / iwmc3200wifi / rx.c
bloba944893ae3ca78d9b3371102ffc85ad9b5cea65e
1 /*
2 * Intel Wireless Multicomm 3200 WiFi driver
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
39 #include <linux/kernel.h>
40 #include <linux/netdevice.h>
41 #include <linux/sched.h>
42 #include <linux/etherdevice.h>
43 #include <linux/wireless.h>
44 #include <linux/ieee80211.h>
45 #include <linux/if_arp.h>
46 #include <linux/list.h>
47 #include <linux/slab.h>
48 #include <net/iw_handler.h>
50 #include "iwm.h"
51 #include "debug.h"
52 #include "hal.h"
53 #include "umac.h"
54 #include "lmac.h"
55 #include "commands.h"
56 #include "rx.h"
57 #include "cfg80211.h"
58 #include "eeprom.h"
60 static int iwm_rx_check_udma_hdr(struct iwm_udma_in_hdr *hdr)
62 if ((le32_to_cpu(hdr->cmd) == UMAC_PAD_TERMINAL) ||
63 (le32_to_cpu(hdr->size) == UMAC_PAD_TERMINAL))
64 return -EINVAL;
66 return 0;
69 static inline int iwm_rx_resp_size(struct iwm_udma_in_hdr *hdr)
71 return ALIGN(le32_to_cpu(hdr->size) + sizeof(struct iwm_udma_in_hdr),
72 16);
76 * Notification handlers:
78 * For every possible notification we can receive from the
79 * target, we have a handler.
80 * When we get a target notification, and there is no one
81 * waiting for it, it's just processed through the rx code
82 * path:
84 * iwm_rx_handle()
85 * -> iwm_rx_handle_umac()
86 * -> iwm_rx_handle_wifi()
87 * -> iwm_rx_handle_resp()
88 * -> iwm_ntf_*()
90 * OR
92 * -> iwm_rx_handle_non_wifi()
94 * If there are processes waiting for this notification, then
95 * iwm_rx_handle_wifi() just wakes those processes up and they
96 * grab the pending notification.
98 static int iwm_ntf_error(struct iwm_priv *iwm, u8 *buf,
99 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
101 struct iwm_umac_notif_error *error;
102 struct iwm_fw_error_hdr *fw_err;
104 error = (struct iwm_umac_notif_error *)buf;
105 fw_err = &error->err;
107 memcpy(iwm->last_fw_err, fw_err, sizeof(struct iwm_fw_error_hdr));
109 IWM_ERR(iwm, "%cMAC FW ERROR:\n",
110 (le32_to_cpu(fw_err->category) == UMAC_SYS_ERR_CAT_LMAC) ? 'L' : 'U');
111 IWM_ERR(iwm, "\tCategory: %d\n", le32_to_cpu(fw_err->category));
112 IWM_ERR(iwm, "\tStatus: 0x%x\n", le32_to_cpu(fw_err->status));
113 IWM_ERR(iwm, "\tPC: 0x%x\n", le32_to_cpu(fw_err->pc));
114 IWM_ERR(iwm, "\tblink1: %d\n", le32_to_cpu(fw_err->blink1));
115 IWM_ERR(iwm, "\tblink2: %d\n", le32_to_cpu(fw_err->blink2));
116 IWM_ERR(iwm, "\tilink1: %d\n", le32_to_cpu(fw_err->ilink1));
117 IWM_ERR(iwm, "\tilink2: %d\n", le32_to_cpu(fw_err->ilink2));
118 IWM_ERR(iwm, "\tData1: 0x%x\n", le32_to_cpu(fw_err->data1));
119 IWM_ERR(iwm, "\tData2: 0x%x\n", le32_to_cpu(fw_err->data2));
120 IWM_ERR(iwm, "\tLine number: %d\n", le32_to_cpu(fw_err->line_num));
121 IWM_ERR(iwm, "\tUMAC status: 0x%x\n", le32_to_cpu(fw_err->umac_status));
122 IWM_ERR(iwm, "\tLMAC status: 0x%x\n", le32_to_cpu(fw_err->lmac_status));
123 IWM_ERR(iwm, "\tSDIO status: 0x%x\n", le32_to_cpu(fw_err->sdio_status));
125 iwm_resetting(iwm);
127 return 0;
130 static int iwm_ntf_umac_alive(struct iwm_priv *iwm, u8 *buf,
131 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
133 struct iwm_umac_notif_alive *alive_resp =
134 (struct iwm_umac_notif_alive *)(buf);
135 u16 status = le16_to_cpu(alive_resp->status);
137 if (status == UMAC_NTFY_ALIVE_STATUS_ERR) {
138 IWM_ERR(iwm, "Receive error UMAC_ALIVE\n");
139 return -EIO;
142 iwm_tx_credit_init_pools(iwm, alive_resp);
144 return 0;
147 static int iwm_ntf_init_complete(struct iwm_priv *iwm, u8 *buf,
148 unsigned long buf_size,
149 struct iwm_wifi_cmd *cmd)
151 struct wiphy *wiphy = iwm_to_wiphy(iwm);
152 struct iwm_umac_notif_init_complete *init_complete =
153 (struct iwm_umac_notif_init_complete *)(buf);
154 u16 status = le16_to_cpu(init_complete->status);
155 bool blocked = (status == UMAC_NTFY_INIT_COMPLETE_STATUS_ERR);
157 if (blocked)
158 IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is on (radio off)\n");
159 else
160 IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is off (radio on)\n");
162 wiphy_rfkill_set_hw_state(wiphy, blocked);
164 return 0;
167 static int iwm_ntf_tx_credit_update(struct iwm_priv *iwm, u8 *buf,
168 unsigned long buf_size,
169 struct iwm_wifi_cmd *cmd)
171 int pool_nr, total_freed_pages;
172 unsigned long pool_map;
173 int i, id;
174 struct iwm_umac_notif_page_dealloc *dealloc =
175 (struct iwm_umac_notif_page_dealloc *)buf;
177 pool_nr = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_CNT);
178 pool_map = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_MSK);
180 IWM_DBG_TX(iwm, DBG, "UMAC dealloc notification: pool nr %d, "
181 "update map 0x%lx\n", pool_nr, pool_map);
183 spin_lock(&iwm->tx_credit.lock);
185 for (i = 0; i < pool_nr; i++) {
186 id = GET_VAL32(dealloc->grp_info[i],
187 UMAC_DEALLOC_NTFY_GROUP_NUM);
188 if (test_bit(id, &pool_map)) {
189 total_freed_pages = GET_VAL32(dealloc->grp_info[i],
190 UMAC_DEALLOC_NTFY_PAGE_CNT);
191 iwm_tx_credit_inc(iwm, id, total_freed_pages);
195 spin_unlock(&iwm->tx_credit.lock);
197 return 0;
200 static int iwm_ntf_umac_reset(struct iwm_priv *iwm, u8 *buf,
201 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
203 IWM_DBG_NTF(iwm, DBG, "UMAC RESET done\n");
205 return 0;
208 static int iwm_ntf_lmac_version(struct iwm_priv *iwm, u8 *buf,
209 unsigned long buf_size,
210 struct iwm_wifi_cmd *cmd)
212 IWM_DBG_NTF(iwm, INFO, "LMAC Version: %x.%x\n", buf[9], buf[8]);
214 return 0;
217 static int iwm_ntf_tx(struct iwm_priv *iwm, u8 *buf,
218 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
220 struct iwm_lmac_tx_resp *tx_resp;
221 struct iwm_umac_wifi_in_hdr *hdr;
223 tx_resp = (struct iwm_lmac_tx_resp *)
224 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
225 hdr = (struct iwm_umac_wifi_in_hdr *)buf;
227 IWM_DBG_TX(iwm, DBG, "REPLY_TX, buf size: %lu\n", buf_size);
229 IWM_DBG_TX(iwm, DBG, "Seqnum: %d\n",
230 le16_to_cpu(hdr->sw_hdr.cmd.seq_num));
231 IWM_DBG_TX(iwm, DBG, "\tFrame cnt: %d\n", tx_resp->frame_cnt);
232 IWM_DBG_TX(iwm, DBG, "\tRetry cnt: %d\n",
233 le16_to_cpu(tx_resp->retry_cnt));
234 IWM_DBG_TX(iwm, DBG, "\tSeq ctl: %d\n", le16_to_cpu(tx_resp->seq_ctl));
235 IWM_DBG_TX(iwm, DBG, "\tByte cnt: %d\n",
236 le16_to_cpu(tx_resp->byte_cnt));
237 IWM_DBG_TX(iwm, DBG, "\tStatus: 0x%x\n", le32_to_cpu(tx_resp->status));
239 return 0;
243 static int iwm_ntf_calib_res(struct iwm_priv *iwm, u8 *buf,
244 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
246 u8 opcode;
247 u8 *calib_buf;
248 struct iwm_lmac_calib_hdr *hdr = (struct iwm_lmac_calib_hdr *)
249 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
251 opcode = hdr->opcode;
253 BUG_ON(opcode >= CALIBRATION_CMD_NUM ||
254 opcode < PHY_CALIBRATE_OPCODES_NUM);
256 IWM_DBG_NTF(iwm, DBG, "Store calibration result for opcode: %d\n",
257 opcode);
259 buf_size -= sizeof(struct iwm_umac_wifi_in_hdr);
260 calib_buf = iwm->calib_res[opcode].buf;
262 if (!calib_buf || (iwm->calib_res[opcode].size < buf_size)) {
263 kfree(calib_buf);
264 calib_buf = kzalloc(buf_size, GFP_KERNEL);
265 if (!calib_buf) {
266 IWM_ERR(iwm, "Memory allocation failed: calib_res\n");
267 return -ENOMEM;
269 iwm->calib_res[opcode].buf = calib_buf;
270 iwm->calib_res[opcode].size = buf_size;
273 memcpy(calib_buf, hdr, buf_size);
274 set_bit(opcode - PHY_CALIBRATE_OPCODES_NUM, &iwm->calib_done_map);
276 return 0;
279 static int iwm_ntf_calib_complete(struct iwm_priv *iwm, u8 *buf,
280 unsigned long buf_size,
281 struct iwm_wifi_cmd *cmd)
283 IWM_DBG_NTF(iwm, DBG, "Calibration completed\n");
285 return 0;
288 static int iwm_ntf_calib_cfg(struct iwm_priv *iwm, u8 *buf,
289 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
291 struct iwm_lmac_cal_cfg_resp *cal_resp;
293 cal_resp = (struct iwm_lmac_cal_cfg_resp *)
294 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
296 IWM_DBG_NTF(iwm, DBG, "Calibration CFG command status: %d\n",
297 le32_to_cpu(cal_resp->status));
299 return 0;
302 static int iwm_ntf_wifi_status(struct iwm_priv *iwm, u8 *buf,
303 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
305 struct iwm_umac_notif_wifi_status *status =
306 (struct iwm_umac_notif_wifi_status *)buf;
308 iwm->core_enabled |= le16_to_cpu(status->status);
310 return 0;
313 static struct iwm_rx_ticket_node *
314 iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket)
316 struct iwm_rx_ticket_node *ticket_node;
318 ticket_node = kzalloc(sizeof(struct iwm_rx_ticket_node), GFP_KERNEL);
319 if (!ticket_node) {
320 IWM_ERR(iwm, "Couldn't allocate ticket node\n");
321 return ERR_PTR(-ENOMEM);
324 ticket_node->ticket = kmemdup(ticket, sizeof(struct iwm_rx_ticket),
325 GFP_KERNEL);
326 if (!ticket_node->ticket) {
327 IWM_ERR(iwm, "Couldn't allocate RX ticket\n");
328 kfree(ticket_node);
329 return ERR_PTR(-ENOMEM);
332 INIT_LIST_HEAD(&ticket_node->node);
334 return ticket_node;
337 static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
339 kfree(ticket_node->ticket);
340 kfree(ticket_node);
343 static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
345 u8 id_hash = IWM_RX_ID_GET_HASH(id);
346 struct iwm_rx_packet *packet;
348 spin_lock(&iwm->packet_lock[id_hash]);
349 list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
350 if (packet->id == id) {
351 list_del(&packet->node);
352 spin_unlock(&iwm->packet_lock[id_hash]);
353 return packet;
356 spin_unlock(&iwm->packet_lock[id_hash]);
357 return NULL;
360 static struct iwm_rx_packet *iwm_rx_packet_alloc(struct iwm_priv *iwm, u8 *buf,
361 u32 size, u16 id)
363 struct iwm_rx_packet *packet;
365 packet = kzalloc(sizeof(struct iwm_rx_packet), GFP_KERNEL);
366 if (!packet) {
367 IWM_ERR(iwm, "Couldn't allocate packet\n");
368 return ERR_PTR(-ENOMEM);
371 packet->skb = dev_alloc_skb(size);
372 if (!packet->skb) {
373 IWM_ERR(iwm, "Couldn't allocate packet SKB\n");
374 kfree(packet);
375 return ERR_PTR(-ENOMEM);
378 packet->pkt_size = size;
380 skb_put(packet->skb, size);
381 memcpy(packet->skb->data, buf, size);
382 INIT_LIST_HEAD(&packet->node);
383 packet->id = id;
385 return packet;
388 void iwm_rx_free(struct iwm_priv *iwm)
390 struct iwm_rx_ticket_node *ticket, *nt;
391 struct iwm_rx_packet *packet, *np;
392 int i;
394 spin_lock(&iwm->ticket_lock);
395 list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
396 list_del(&ticket->node);
397 iwm_rx_ticket_node_free(ticket);
399 spin_unlock(&iwm->ticket_lock);
401 for (i = 0; i < IWM_RX_ID_HASH; i++) {
402 spin_lock(&iwm->packet_lock[i]);
403 list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
404 node) {
405 list_del(&packet->node);
406 kfree_skb(packet->skb);
407 kfree(packet);
409 spin_unlock(&iwm->packet_lock[i]);
413 static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
414 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
416 struct iwm_umac_notif_rx_ticket *ntf_rx_ticket =
417 (struct iwm_umac_notif_rx_ticket *)buf;
418 struct iwm_rx_ticket *ticket =
419 (struct iwm_rx_ticket *)ntf_rx_ticket->tickets;
420 int i, schedule_rx = 0;
422 for (i = 0; i < ntf_rx_ticket->num_tickets; i++) {
423 struct iwm_rx_ticket_node *ticket_node;
425 switch (le16_to_cpu(ticket->action)) {
426 case IWM_RX_TICKET_RELEASE:
427 case IWM_RX_TICKET_DROP:
428 /* We can push the packet to the stack */
429 ticket_node = iwm_rx_ticket_node_alloc(iwm, ticket);
430 if (IS_ERR(ticket_node))
431 return PTR_ERR(ticket_node);
433 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
434 __le16_to_cpu(ticket->action) ==
435 IWM_RX_TICKET_RELEASE ?
436 "RELEASE" : "DROP",
437 ticket->id);
438 spin_lock(&iwm->ticket_lock);
439 list_add_tail(&ticket_node->node, &iwm->rx_tickets);
440 spin_unlock(&iwm->ticket_lock);
443 * We received an Rx ticket, most likely there's
444 * a packet pending for it, it's not worth going
445 * through the packet hash list to double check.
446 * Let's just fire the rx worker..
448 schedule_rx = 1;
450 break;
452 default:
453 IWM_ERR(iwm, "Invalid RX ticket action: 0x%x\n",
454 ticket->action);
457 ticket++;
460 if (schedule_rx)
461 queue_work(iwm->rx_wq, &iwm->rx_worker);
463 return 0;
466 static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
467 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
469 struct iwm_umac_wifi_in_hdr *wifi_hdr;
470 struct iwm_rx_packet *packet;
471 u16 id, buf_offset;
472 u32 packet_size;
473 u8 id_hash;
475 IWM_DBG_RX(iwm, DBG, "\n");
477 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
478 id = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
479 buf_offset = sizeof(struct iwm_umac_wifi_in_hdr);
480 packet_size = buf_size - sizeof(struct iwm_umac_wifi_in_hdr);
482 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, seqnum: %d, packet size: %d\n",
483 wifi_hdr->sw_hdr.cmd.cmd, id, packet_size);
484 IWM_DBG_RX(iwm, DBG, "Packet id: %d\n", id);
485 IWM_HEXDUMP(iwm, DBG, RX, "PACKET: ", buf + buf_offset, packet_size);
487 packet = iwm_rx_packet_alloc(iwm, buf + buf_offset, packet_size, id);
488 if (IS_ERR(packet))
489 return PTR_ERR(packet);
491 id_hash = IWM_RX_ID_GET_HASH(id);
492 spin_lock(&iwm->packet_lock[id_hash]);
493 list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
494 spin_unlock(&iwm->packet_lock[id_hash]);
496 /* We might (unlikely) have received the packet _after_ the ticket */
497 queue_work(iwm->rx_wq, &iwm->rx_worker);
499 return 0;
502 /* MLME handlers */
503 static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
504 unsigned long buf_size,
505 struct iwm_wifi_cmd *cmd)
507 struct iwm_umac_notif_assoc_start *start;
509 start = (struct iwm_umac_notif_assoc_start *)buf;
511 IWM_DBG_MLME(iwm, INFO, "Association with %pM Started, reason: %d\n",
512 start->bssid, le32_to_cpu(start->roam_reason));
514 wake_up_interruptible(&iwm->mlme_queue);
516 return 0;
519 static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
521 if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
522 iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
523 (iwm->umac_profile->sec.ucast_cipher ==
524 iwm->umac_profile->sec.mcast_cipher) &&
525 (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
526 return 1;
528 return 0;
531 static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
532 unsigned long buf_size,
533 struct iwm_wifi_cmd *cmd)
535 struct wiphy *wiphy = iwm_to_wiphy(iwm);
536 struct ieee80211_channel *chan;
537 struct iwm_umac_notif_assoc_complete *complete =
538 (struct iwm_umac_notif_assoc_complete *)buf;
540 IWM_DBG_MLME(iwm, INFO, "Association with %pM completed, status: %d\n",
541 complete->bssid, complete->status);
543 switch (le32_to_cpu(complete->status)) {
544 case UMAC_ASSOC_COMPLETE_SUCCESS:
545 chan = ieee80211_get_channel(wiphy,
546 ieee80211_channel_to_frequency(complete->channel));
547 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
548 /* Associated to a unallowed channel, disassociate. */
549 __iwm_invalidate_mlme_profile(iwm);
550 IWM_WARN(iwm, "Couldn't associate with %pM due to "
551 "channel %d is disabled. Check your local "
552 "regulatory setting.\n",
553 complete->bssid, complete->channel);
554 goto failure;
557 set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
558 memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
559 iwm->channel = complete->channel;
561 /* Internal roaming state, avoid notifying SME. */
562 if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
563 && iwm->conf.mode == UMAC_MODE_BSS) {
564 cancel_delayed_work(&iwm->disconnect);
565 cfg80211_roamed(iwm_to_ndev(iwm),
566 complete->bssid,
567 iwm->req_ie, iwm->req_ie_len,
568 iwm->resp_ie, iwm->resp_ie_len,
569 GFP_KERNEL);
570 break;
573 iwm_link_on(iwm);
575 if (iwm->conf.mode == UMAC_MODE_IBSS)
576 goto ibss;
578 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
579 cfg80211_connect_result(iwm_to_ndev(iwm),
580 complete->bssid,
581 iwm->req_ie, iwm->req_ie_len,
582 iwm->resp_ie, iwm->resp_ie_len,
583 WLAN_STATUS_SUCCESS,
584 GFP_KERNEL);
585 else
586 cfg80211_roamed(iwm_to_ndev(iwm),
587 complete->bssid,
588 iwm->req_ie, iwm->req_ie_len,
589 iwm->resp_ie, iwm->resp_ie_len,
590 GFP_KERNEL);
591 break;
592 case UMAC_ASSOC_COMPLETE_FAILURE:
593 failure:
594 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
595 memset(iwm->bssid, 0, ETH_ALEN);
596 iwm->channel = 0;
598 /* Internal roaming state, avoid notifying SME. */
599 if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
600 && iwm->conf.mode == UMAC_MODE_BSS) {
601 cancel_delayed_work(&iwm->disconnect);
602 break;
605 iwm_link_off(iwm);
607 if (iwm->conf.mode == UMAC_MODE_IBSS)
608 goto ibss;
610 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
611 if (!iwm_is_open_wep_profile(iwm)) {
612 cfg80211_connect_result(iwm_to_ndev(iwm),
613 complete->bssid,
614 NULL, 0, NULL, 0,
615 WLAN_STATUS_UNSPECIFIED_FAILURE,
616 GFP_KERNEL);
617 } else {
618 /* Let's try shared WEP auth */
619 IWM_ERR(iwm, "Trying WEP shared auth\n");
620 schedule_work(&iwm->auth_retry_worker);
622 else
623 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
624 GFP_KERNEL);
625 break;
626 default:
627 break;
630 clear_bit(IWM_STATUS_RESETTING, &iwm->status);
631 return 0;
633 ibss:
634 cfg80211_ibss_joined(iwm_to_ndev(iwm), iwm->bssid, GFP_KERNEL);
635 clear_bit(IWM_STATUS_RESETTING, &iwm->status);
636 return 0;
639 static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
640 unsigned long buf_size,
641 struct iwm_wifi_cmd *cmd)
643 struct iwm_umac_notif_profile_invalidate *invalid;
644 u32 reason;
646 invalid = (struct iwm_umac_notif_profile_invalidate *)buf;
647 reason = le32_to_cpu(invalid->reason);
649 IWM_DBG_MLME(iwm, INFO, "Profile Invalidated. Reason: %d\n", reason);
651 if (reason != UMAC_PROFILE_INVALID_REQUEST &&
652 test_bit(IWM_STATUS_SME_CONNECTING, &iwm->status))
653 cfg80211_connect_result(iwm_to_ndev(iwm), NULL, NULL, 0, NULL,
654 0, WLAN_STATUS_UNSPECIFIED_FAILURE,
655 GFP_KERNEL);
657 clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
658 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
660 iwm->umac_profile_active = 0;
661 memset(iwm->bssid, 0, ETH_ALEN);
662 iwm->channel = 0;
664 iwm_link_off(iwm);
666 wake_up_interruptible(&iwm->mlme_queue);
668 return 0;
671 #define IWM_DISCONNECT_INTERVAL (5 * HZ)
673 static int iwm_mlme_connection_terminated(struct iwm_priv *iwm, u8 *buf,
674 unsigned long buf_size,
675 struct iwm_wifi_cmd *cmd)
677 IWM_DBG_MLME(iwm, DBG, "Connection terminated\n");
679 schedule_delayed_work(&iwm->disconnect, IWM_DISCONNECT_INTERVAL);
681 return 0;
684 static int iwm_mlme_scan_complete(struct iwm_priv *iwm, u8 *buf,
685 unsigned long buf_size,
686 struct iwm_wifi_cmd *cmd)
688 int ret;
689 struct iwm_umac_notif_scan_complete *scan_complete =
690 (struct iwm_umac_notif_scan_complete *)buf;
691 u32 result = le32_to_cpu(scan_complete->result);
693 IWM_DBG_MLME(iwm, INFO, "type:0x%x result:0x%x seq:%d\n",
694 le32_to_cpu(scan_complete->type),
695 le32_to_cpu(scan_complete->result),
696 scan_complete->seq_num);
698 if (!test_and_clear_bit(IWM_STATUS_SCANNING, &iwm->status)) {
699 IWM_ERR(iwm, "Scan complete while device not scanning\n");
700 return -EIO;
702 if (!iwm->scan_request)
703 return 0;
705 ret = iwm_cfg80211_inform_bss(iwm);
707 cfg80211_scan_done(iwm->scan_request,
708 (result & UMAC_SCAN_RESULT_ABORTED) ? 1 : !!ret);
709 iwm->scan_request = NULL;
711 return ret;
714 static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
715 unsigned long buf_size,
716 struct iwm_wifi_cmd *cmd)
718 struct iwm_umac_notif_sta_info *umac_sta =
719 (struct iwm_umac_notif_sta_info *)buf;
720 struct iwm_sta_info *sta;
721 int i;
723 switch (le32_to_cpu(umac_sta->opcode)) {
724 case UMAC_OPCODE_ADD_MODIFY:
725 sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
727 IWM_DBG_MLME(iwm, INFO, "%s STA: ID = %d, Color = %d, "
728 "addr = %pM, qos = %d\n",
729 sta->valid ? "Modify" : "Add",
730 GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
731 GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
732 umac_sta->mac_addr,
733 umac_sta->flags & UMAC_STA_FLAG_QOS);
735 sta->valid = 1;
736 sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
737 sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
738 memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
739 break;
740 case UMAC_OPCODE_REMOVE:
741 IWM_DBG_MLME(iwm, INFO, "Remove STA: ID = %d, Color = %d, "
742 "addr = %pM\n",
743 GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
744 GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
745 umac_sta->mac_addr);
747 sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
749 if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
750 sta->valid = 0;
752 break;
753 case UMAC_OPCODE_CLEAR_ALL:
754 for (i = 0; i < IWM_STA_TABLE_NUM; i++)
755 iwm->sta_table[i].valid = 0;
757 break;
758 default:
759 break;
762 return 0;
765 static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
766 unsigned long buf_size,
767 struct iwm_wifi_cmd *cmd)
769 struct wiphy *wiphy = iwm_to_wiphy(iwm);
771 IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
773 wiphy_rfkill_set_hw_state(wiphy, true);
775 return 0;
778 static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
779 unsigned long buf_size,
780 struct iwm_wifi_cmd *cmd)
782 struct wiphy *wiphy = iwm_to_wiphy(iwm);
783 struct ieee80211_mgmt *mgmt;
784 struct iwm_umac_notif_bss_info *umac_bss =
785 (struct iwm_umac_notif_bss_info *)buf;
786 struct ieee80211_channel *channel;
787 struct ieee80211_supported_band *band;
788 struct iwm_bss_info *bss;
789 s32 signal;
790 int freq;
791 u16 frame_len = le16_to_cpu(umac_bss->frame_len);
792 size_t bss_len = sizeof(struct iwm_umac_notif_bss_info) + frame_len;
794 mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
796 IWM_DBG_MLME(iwm, DBG, "New BSS info entry: %pM\n", mgmt->bssid);
797 IWM_DBG_MLME(iwm, DBG, "\tType: 0x%x\n", le32_to_cpu(umac_bss->type));
798 IWM_DBG_MLME(iwm, DBG, "\tTimestamp: %d\n",
799 le32_to_cpu(umac_bss->timestamp));
800 IWM_DBG_MLME(iwm, DBG, "\tTable Index: %d\n",
801 le16_to_cpu(umac_bss->table_idx));
802 IWM_DBG_MLME(iwm, DBG, "\tBand: %d\n", umac_bss->band);
803 IWM_DBG_MLME(iwm, DBG, "\tChannel: %d\n", umac_bss->channel);
804 IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
805 IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
807 list_for_each_entry(bss, &iwm->bss_list, node)
808 if (bss->bss->table_idx == umac_bss->table_idx)
809 break;
811 if (&bss->node != &iwm->bss_list) {
812 /* Remove the old BSS entry, we will add it back later. */
813 list_del(&bss->node);
814 kfree(bss->bss);
815 } else {
816 /* New BSS entry */
818 bss = kzalloc(sizeof(struct iwm_bss_info), GFP_KERNEL);
819 if (!bss) {
820 IWM_ERR(iwm, "Couldn't allocate bss_info\n");
821 return -ENOMEM;
825 bss->bss = kzalloc(bss_len, GFP_KERNEL);
826 if (!bss->bss) {
827 kfree(bss);
828 IWM_ERR(iwm, "Couldn't allocate bss\n");
829 return -ENOMEM;
832 INIT_LIST_HEAD(&bss->node);
833 memcpy(bss->bss, umac_bss, bss_len);
835 if (umac_bss->band == UMAC_BAND_2GHZ)
836 band = wiphy->bands[IEEE80211_BAND_2GHZ];
837 else if (umac_bss->band == UMAC_BAND_5GHZ)
838 band = wiphy->bands[IEEE80211_BAND_5GHZ];
839 else {
840 IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
841 goto err;
844 freq = ieee80211_channel_to_frequency(umac_bss->channel);
845 channel = ieee80211_get_channel(wiphy, freq);
846 signal = umac_bss->rssi * 100;
848 bss->cfg_bss = cfg80211_inform_bss_frame(wiphy, channel,
849 mgmt, frame_len,
850 signal, GFP_KERNEL);
851 if (!bss->cfg_bss)
852 goto err;
854 list_add_tail(&bss->node, &iwm->bss_list);
856 return 0;
857 err:
858 kfree(bss->bss);
859 kfree(bss);
861 return -EINVAL;
864 static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
865 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
867 struct iwm_umac_notif_bss_removed *bss_rm =
868 (struct iwm_umac_notif_bss_removed *)buf;
869 struct iwm_bss_info *bss, *next;
870 u16 table_idx;
871 int i;
873 for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
874 table_idx = le16_to_cpu(bss_rm->entries[i]) &
875 IWM_BSS_REMOVE_INDEX_MSK;
876 list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
877 if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
878 struct ieee80211_mgmt *mgmt;
880 mgmt = (struct ieee80211_mgmt *)
881 (bss->bss->frame_buf);
882 IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
883 mgmt->bssid);
884 list_del(&bss->node);
885 kfree(bss->bss);
886 kfree(bss);
890 return 0;
893 static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
894 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
896 struct iwm_umac_notif_mgt_frame *mgt_frame =
897 (struct iwm_umac_notif_mgt_frame *)buf;
898 struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
900 IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
901 le16_to_cpu(mgt_frame->len));
903 if (ieee80211_is_assoc_req(mgt->frame_control)) {
904 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
905 - offsetof(struct ieee80211_mgmt,
906 u.assoc_req.variable);
907 kfree(iwm->req_ie);
908 iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
909 iwm->req_ie_len, GFP_KERNEL);
910 } else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
911 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
912 - offsetof(struct ieee80211_mgmt,
913 u.reassoc_req.variable);
914 kfree(iwm->req_ie);
915 iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
916 iwm->req_ie_len, GFP_KERNEL);
917 } else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
918 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
919 - offsetof(struct ieee80211_mgmt,
920 u.assoc_resp.variable);
921 kfree(iwm->resp_ie);
922 iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
923 iwm->resp_ie_len, GFP_KERNEL);
924 } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
925 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
926 - offsetof(struct ieee80211_mgmt,
927 u.reassoc_resp.variable);
928 kfree(iwm->resp_ie);
929 iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
930 iwm->resp_ie_len, GFP_KERNEL);
931 } else {
932 IWM_ERR(iwm, "Unsupported management frame: 0x%x",
933 le16_to_cpu(mgt->frame_control));
934 return 0;
937 return 0;
940 static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
941 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
943 struct iwm_umac_notif_wifi_if *notif =
944 (struct iwm_umac_notif_wifi_if *)buf;
946 switch (notif->status) {
947 case WIFI_IF_NTFY_ASSOC_START:
948 return iwm_mlme_assoc_start(iwm, buf, buf_size, cmd);
949 case WIFI_IF_NTFY_ASSOC_COMPLETE:
950 return iwm_mlme_assoc_complete(iwm, buf, buf_size, cmd);
951 case WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE:
952 return iwm_mlme_profile_invalidate(iwm, buf, buf_size, cmd);
953 case WIFI_IF_NTFY_CONNECTION_TERMINATED:
954 return iwm_mlme_connection_terminated(iwm, buf, buf_size, cmd);
955 case WIFI_IF_NTFY_SCAN_COMPLETE:
956 return iwm_mlme_scan_complete(iwm, buf, buf_size, cmd);
957 case WIFI_IF_NTFY_STA_TABLE_CHANGE:
958 return iwm_mlme_update_sta_table(iwm, buf, buf_size, cmd);
959 case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
960 IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
961 break;
962 case WIFI_IF_NTFY_RADIO_PREEMPTION:
963 return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
964 case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
965 return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
966 case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
967 return iwm_mlme_remove_bss(iwm, buf, buf_size, cmd);
968 break;
969 case WIFI_IF_NTFY_MGMT_FRAME:
970 return iwm_mlme_mgt_frame(iwm, buf, buf_size, cmd);
971 case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START:
972 case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE:
973 case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START:
974 case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT:
975 case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START:
976 case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE:
977 case WIFI_DBG_IF_NTFY_CNCT_ATC_START:
978 case WIFI_DBG_IF_NTFY_COEX_NOTIFICATION:
979 case WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP:
980 case WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP:
981 IWM_DBG_MLME(iwm, DBG, "MLME debug notification: 0x%x\n",
982 notif->status);
983 break;
984 default:
985 IWM_ERR(iwm, "Unhandled notification: 0x%x\n", notif->status);
986 break;
989 return 0;
992 #define IWM_STATS_UPDATE_INTERVAL (2 * HZ)
994 static int iwm_ntf_statistics(struct iwm_priv *iwm, u8 *buf,
995 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
997 struct iwm_umac_notif_stats *stats = (struct iwm_umac_notif_stats *)buf;
998 struct iw_statistics *wstats = &iwm->wstats;
999 u16 max_rate = 0;
1000 int i;
1002 IWM_DBG_MLME(iwm, DBG, "Statistics notification received\n");
1004 if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
1005 for (i = 0; i < UMAC_NTF_RATE_SAMPLE_NR; i++) {
1006 max_rate = max_t(u16, max_rate,
1007 max(le16_to_cpu(stats->tx_rate[i]),
1008 le16_to_cpu(stats->rx_rate[i])));
1010 /* UMAC passes rate info multiplies by 2 */
1011 iwm->rate = max_rate >> 1;
1013 iwm->txpower = le32_to_cpu(stats->tx_power);
1015 wstats->status = 0;
1017 wstats->discard.nwid = le32_to_cpu(stats->rx_drop_other_bssid);
1018 wstats->discard.code = le32_to_cpu(stats->rx_drop_decode);
1019 wstats->discard.fragment = le32_to_cpu(stats->rx_drop_reassembly);
1020 wstats->discard.retries = le32_to_cpu(stats->tx_drop_max_retry);
1022 wstats->miss.beacon = le32_to_cpu(stats->missed_beacons);
1024 /* according to cfg80211 */
1025 if (stats->rssi_dbm < -110)
1026 wstats->qual.qual = 0;
1027 else if (stats->rssi_dbm > -40)
1028 wstats->qual.qual = 70;
1029 else
1030 wstats->qual.qual = stats->rssi_dbm + 110;
1032 wstats->qual.level = stats->rssi_dbm;
1033 wstats->qual.noise = stats->noise_dbm;
1034 wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
1036 schedule_delayed_work(&iwm->stats_request, IWM_STATS_UPDATE_INTERVAL);
1038 mod_timer(&iwm->watchdog, round_jiffies(jiffies + IWM_WATCHDOG_PERIOD));
1040 return 0;
1043 static int iwm_ntf_eeprom_proxy(struct iwm_priv *iwm, u8 *buf,
1044 unsigned long buf_size,
1045 struct iwm_wifi_cmd *cmd)
1047 struct iwm_umac_cmd_eeprom_proxy *eeprom_proxy =
1048 (struct iwm_umac_cmd_eeprom_proxy *)
1049 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
1050 struct iwm_umac_cmd_eeprom_proxy_hdr *hdr = &eeprom_proxy->hdr;
1051 u32 hdr_offset = le32_to_cpu(hdr->offset);
1052 u32 hdr_len = le32_to_cpu(hdr->len);
1053 u32 hdr_type = le32_to_cpu(hdr->type);
1055 IWM_DBG_NTF(iwm, DBG, "type: 0x%x, len: %d, offset: 0x%x\n",
1056 hdr_type, hdr_len, hdr_offset);
1058 if ((hdr_offset + hdr_len) > IWM_EEPROM_LEN)
1059 return -EINVAL;
1061 switch (hdr_type) {
1062 case IWM_UMAC_CMD_EEPROM_TYPE_READ:
1063 memcpy(iwm->eeprom + hdr_offset, eeprom_proxy->buf, hdr_len);
1064 break;
1065 case IWM_UMAC_CMD_EEPROM_TYPE_WRITE:
1066 default:
1067 return -ENOTSUPP;
1070 return 0;
1073 static int iwm_ntf_channel_info_list(struct iwm_priv *iwm, u8 *buf,
1074 unsigned long buf_size,
1075 struct iwm_wifi_cmd *cmd)
1077 struct iwm_umac_cmd_get_channel_list *ch_list =
1078 (struct iwm_umac_cmd_get_channel_list *)
1079 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
1080 struct wiphy *wiphy = iwm_to_wiphy(iwm);
1081 struct ieee80211_supported_band *band;
1082 int i;
1084 band = wiphy->bands[IEEE80211_BAND_2GHZ];
1086 for (i = 0; i < band->n_channels; i++) {
1087 unsigned long ch_mask_0 =
1088 le32_to_cpu(ch_list->ch[0].channels_mask);
1089 unsigned long ch_mask_2 =
1090 le32_to_cpu(ch_list->ch[2].channels_mask);
1092 if (!test_bit(i, &ch_mask_0))
1093 band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
1095 if (!test_bit(i, &ch_mask_2))
1096 band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
1099 band = wiphy->bands[IEEE80211_BAND_5GHZ];
1101 for (i = 0; i < min(band->n_channels, 32); i++) {
1102 unsigned long ch_mask_1 =
1103 le32_to_cpu(ch_list->ch[1].channels_mask);
1104 unsigned long ch_mask_3 =
1105 le32_to_cpu(ch_list->ch[3].channels_mask);
1107 if (!test_bit(i, &ch_mask_1))
1108 band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
1110 if (!test_bit(i, &ch_mask_3))
1111 band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
1114 return 0;
1117 static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
1118 unsigned long buf_size,
1119 struct iwm_wifi_cmd *cmd)
1121 struct iwm_umac_notif_stop_resume_tx *stp_res_tx =
1122 (struct iwm_umac_notif_stop_resume_tx *)buf;
1123 struct iwm_sta_info *sta_info;
1124 struct iwm_tid_info *tid_info;
1125 u8 sta_id = STA_ID_N_COLOR_ID(stp_res_tx->sta_id);
1126 u16 tid_msk = le16_to_cpu(stp_res_tx->stop_resume_tid_msk);
1127 int bit, ret = 0;
1128 bool stop = false;
1130 IWM_DBG_NTF(iwm, DBG, "stop/resume notification:\n"
1131 "\tflags: 0x%x\n"
1132 "\tSTA id: %d\n"
1133 "\tTID bitmask: 0x%x\n",
1134 stp_res_tx->flags, stp_res_tx->sta_id,
1135 stp_res_tx->stop_resume_tid_msk);
1137 if (stp_res_tx->flags & UMAC_STOP_TX_FLAG)
1138 stop = true;
1140 sta_info = &iwm->sta_table[sta_id];
1141 if (!sta_info->valid) {
1142 IWM_ERR(iwm, "Stoping an invalid STA: %d %d\n",
1143 sta_id, stp_res_tx->sta_id);
1144 return -EINVAL;
1147 for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
1148 tid_info = &sta_info->tid_info[bit];
1150 mutex_lock(&tid_info->mutex);
1151 tid_info->stopped = stop;
1152 mutex_unlock(&tid_info->mutex);
1154 if (!stop) {
1155 struct iwm_tx_queue *txq;
1156 int queue = iwm_tid_to_queue(bit);
1158 if (queue < 0)
1159 continue;
1161 txq = &iwm->txq[queue];
1163 * If we resume, we have to move our SKBs
1164 * back to the tx queue and queue some work.
1166 spin_lock_bh(&txq->lock);
1167 skb_queue_splice_init(&txq->queue, &txq->stopped_queue);
1168 spin_unlock_bh(&txq->lock);
1170 queue_work(txq->wq, &txq->worker);
1175 /* We send an ACK only for the stop case */
1176 if (stop)
1177 ret = iwm_send_umac_stop_resume_tx(iwm, stp_res_tx);
1179 return ret;
1182 static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1183 unsigned long buf_size,
1184 struct iwm_wifi_cmd *cmd)
1186 struct iwm_umac_wifi_if *hdr;
1188 if (cmd == NULL) {
1189 IWM_ERR(iwm, "Couldn't find expected wifi command\n");
1190 return -EINVAL;
1193 hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
1195 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
1196 "oid is 0x%x\n", hdr->oid);
1198 set_bit(hdr->oid, &iwm->wifi_ntfy[0]);
1199 wake_up_interruptible(&iwm->wifi_ntfy_queue);
1201 switch (hdr->oid) {
1202 case UMAC_WIFI_IF_CMD_SET_PROFILE:
1203 iwm->umac_profile_active = 1;
1204 break;
1205 default:
1206 break;
1209 return 0;
1212 #define CT_KILL_DELAY (30 * HZ)
1213 static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
1214 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
1216 struct wiphy *wiphy = iwm_to_wiphy(iwm);
1217 struct iwm_lmac_card_state *state = (struct iwm_lmac_card_state *)
1218 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
1219 u32 flags = le32_to_cpu(state->flags);
1221 IWM_INFO(iwm, "HW RF Kill %s, CT Kill %s\n",
1222 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
1223 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
1225 if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
1227 * We got a CTKILL event: We bring the interface down in
1228 * oder to cool the device down, and try to bring it up
1229 * 30 seconds later. If it's still too hot, we'll go through
1230 * this code path again.
1232 cancel_delayed_work_sync(&iwm->ct_kill_delay);
1233 schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
1236 wiphy_rfkill_set_hw_state(wiphy, flags &
1237 (IWM_CARD_STATE_HW_DISABLED |
1238 IWM_CARD_STATE_CTKILL_DISABLED));
1240 return 0;
1243 static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
1244 unsigned long buf_size)
1246 struct iwm_umac_wifi_in_hdr *wifi_hdr;
1247 struct iwm_wifi_cmd *cmd;
1248 u8 source, cmd_id;
1249 u16 seq_num;
1250 u32 count;
1252 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
1253 cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
1254 source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
1255 if (source >= IWM_SRC_NUM) {
1256 IWM_CRIT(iwm, "invalid source %d\n", source);
1257 return -EINVAL;
1260 if (cmd_id == REPLY_RX_MPDU_CMD)
1261 trace_iwm_rx_packet(iwm, buf, buf_size);
1262 else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
1263 (source == UMAC_HDI_IN_SOURCE_FW))
1264 trace_iwm_rx_ticket(iwm, buf, buf_size);
1265 else
1266 trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
1268 count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
1269 count += sizeof(struct iwm_umac_wifi_in_hdr) -
1270 sizeof(struct iwm_dev_cmd_hdr);
1271 if (count > buf_size) {
1272 IWM_CRIT(iwm, "count %d, buf size:%ld\n", count, buf_size);
1273 return -EINVAL;
1276 seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
1278 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
1279 cmd_id, source, seq_num);
1282 * If this is a response to a previously sent command, there must
1283 * be a pending command for this sequence number.
1285 cmd = iwm_get_pending_wifi_cmd(iwm, seq_num);
1287 /* Notify the caller only for sync commands. */
1288 switch (source) {
1289 case UMAC_HDI_IN_SOURCE_FHRX:
1290 if (iwm->lmac_handlers[cmd_id] &&
1291 test_bit(cmd_id, &iwm->lmac_handler_map[0]))
1292 return iwm_notif_send(iwm, cmd, cmd_id, source,
1293 buf, count);
1294 break;
1295 case UMAC_HDI_IN_SOURCE_FW:
1296 if (iwm->umac_handlers[cmd_id] &&
1297 test_bit(cmd_id, &iwm->umac_handler_map[0]))
1298 return iwm_notif_send(iwm, cmd, cmd_id, source,
1299 buf, count);
1300 break;
1301 case UMAC_HDI_IN_SOURCE_UDMA:
1302 break;
1305 return iwm_rx_handle_resp(iwm, buf, count, cmd);
1308 int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
1309 struct iwm_wifi_cmd *cmd)
1311 u8 source, cmd_id;
1312 struct iwm_umac_wifi_in_hdr *wifi_hdr;
1313 int ret = 0;
1315 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
1316 cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
1318 source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
1320 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x\n", cmd_id, source);
1322 switch (source) {
1323 case UMAC_HDI_IN_SOURCE_FHRX:
1324 if (iwm->lmac_handlers[cmd_id])
1325 ret = iwm->lmac_handlers[cmd_id]
1326 (iwm, buf, buf_size, cmd);
1327 break;
1328 case UMAC_HDI_IN_SOURCE_FW:
1329 if (iwm->umac_handlers[cmd_id])
1330 ret = iwm->umac_handlers[cmd_id]
1331 (iwm, buf, buf_size, cmd);
1332 break;
1333 case UMAC_HDI_IN_SOURCE_UDMA:
1334 ret = -EINVAL;
1335 break;
1338 kfree(cmd);
1340 return ret;
1343 static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
1344 unsigned long buf_size)
1346 u8 seq_num;
1347 struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
1348 struct iwm_nonwifi_cmd *cmd;
1350 trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
1351 seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
1354 * We received a non wifi answer.
1355 * Let's check if there's a pending command for it, and if so
1356 * replace the command payload with the buffer, and then wake the
1357 * callers up.
1358 * That means we only support synchronised non wifi command response
1359 * schemes.
1361 list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
1362 if (cmd->seq_num == seq_num) {
1363 cmd->resp_received = 1;
1364 cmd->buf.len = buf_size;
1365 memcpy(cmd->buf.hdr, buf, buf_size);
1366 wake_up_interruptible(&iwm->nonwifi_queue);
1369 return 0;
1372 static int iwm_rx_handle_umac(struct iwm_priv *iwm, u8 *buf,
1373 unsigned long buf_size)
1375 int ret = 0;
1376 u8 op_code;
1377 unsigned long buf_offset = 0;
1378 struct iwm_udma_in_hdr *hdr;
1381 * To allow for a more efficient bus usage, UMAC
1382 * messages are encapsulated into UDMA ones. This
1383 * way we can have several UMAC messages in one bus
1384 * transfer.
1385 * A UDMA frame size is always aligned on 16 bytes,
1386 * and a UDMA frame must not start with a UMAC_PAD_TERMINAL
1387 * word. This is how we parse a bus frame into several
1388 * UDMA ones.
1390 while (buf_offset < buf_size) {
1392 hdr = (struct iwm_udma_in_hdr *)(buf + buf_offset);
1394 if (iwm_rx_check_udma_hdr(hdr) < 0) {
1395 IWM_DBG_RX(iwm, DBG, "End of frame\n");
1396 break;
1399 op_code = GET_VAL32(hdr->cmd, UMAC_HDI_IN_CMD_OPCODE);
1401 IWM_DBG_RX(iwm, DBG, "Op code: 0x%x\n", op_code);
1403 if (op_code == UMAC_HDI_IN_OPCODE_WIFI) {
1404 ret |= iwm_rx_handle_wifi(iwm, buf + buf_offset,
1405 buf_size - buf_offset);
1406 } else if (op_code < UMAC_HDI_IN_OPCODE_NONWIFI_MAX) {
1407 if (GET_VAL32(hdr->cmd,
1408 UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) !=
1409 UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) {
1410 IWM_ERR(iwm, "Incorrect hw signature\n");
1411 return -EINVAL;
1413 ret |= iwm_rx_handle_nonwifi(iwm, buf + buf_offset,
1414 buf_size - buf_offset);
1415 } else {
1416 IWM_ERR(iwm, "Invalid RX opcode: 0x%x\n", op_code);
1417 ret |= -EINVAL;
1420 buf_offset += iwm_rx_resp_size(hdr);
1423 return ret;
1426 int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
1428 struct iwm_udma_in_hdr *hdr;
1430 hdr = (struct iwm_udma_in_hdr *)buf;
1432 switch (le32_to_cpu(hdr->cmd)) {
1433 case UMAC_REBOOT_BARKER:
1434 if (test_bit(IWM_STATUS_READY, &iwm->status)) {
1435 IWM_ERR(iwm, "Unexpected BARKER\n");
1437 schedule_work(&iwm->reset_worker);
1439 return 0;
1442 return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
1443 IWM_SRC_UDMA, buf, buf_size);
1444 case UMAC_ACK_BARKER:
1445 return iwm_notif_send(iwm, NULL, IWM_ACK_BARKER_NOTIFICATION,
1446 IWM_SRC_UDMA, NULL, 0);
1447 default:
1448 IWM_DBG_RX(iwm, DBG, "Received cmd: 0x%x\n", hdr->cmd);
1449 return iwm_rx_handle_umac(iwm, buf, buf_size);
1452 return 0;
1455 static const iwm_handler iwm_umac_handlers[] =
1457 [UMAC_NOTIFY_OPCODE_ERROR] = iwm_ntf_error,
1458 [UMAC_NOTIFY_OPCODE_ALIVE] = iwm_ntf_umac_alive,
1459 [UMAC_NOTIFY_OPCODE_INIT_COMPLETE] = iwm_ntf_init_complete,
1460 [UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS] = iwm_ntf_wifi_status,
1461 [UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_mlme,
1462 [UMAC_NOTIFY_OPCODE_PAGE_DEALLOC] = iwm_ntf_tx_credit_update,
1463 [UMAC_NOTIFY_OPCODE_RX_TICKET] = iwm_ntf_rx_ticket,
1464 [UMAC_CMD_OPCODE_RESET] = iwm_ntf_umac_reset,
1465 [UMAC_NOTIFY_OPCODE_STATS] = iwm_ntf_statistics,
1466 [UMAC_CMD_OPCODE_EEPROM_PROXY] = iwm_ntf_eeprom_proxy,
1467 [UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST] = iwm_ntf_channel_info_list,
1468 [UMAC_CMD_OPCODE_STOP_RESUME_STA_TX] = iwm_ntf_stop_resume_tx,
1469 [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
1470 [UMAC_CMD_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_wifi_if_wrapper,
1473 static const iwm_handler iwm_lmac_handlers[] =
1475 [REPLY_TX] = iwm_ntf_tx,
1476 [REPLY_ALIVE] = iwm_ntf_lmac_version,
1477 [CALIBRATION_RES_NOTIFICATION] = iwm_ntf_calib_res,
1478 [CALIBRATION_COMPLETE_NOTIFICATION] = iwm_ntf_calib_complete,
1479 [CALIBRATION_CFG_CMD] = iwm_ntf_calib_cfg,
1480 [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
1481 [CARD_STATE_NOTIFICATION] = iwm_ntf_card_state,
1484 void iwm_rx_setup_handlers(struct iwm_priv *iwm)
1486 iwm->umac_handlers = (iwm_handler *) iwm_umac_handlers;
1487 iwm->lmac_handlers = (iwm_handler *) iwm_lmac_handlers;
1490 static void iwm_remove_iv(struct sk_buff *skb, u32 hdr_total_len)
1492 struct ieee80211_hdr *hdr;
1493 unsigned int hdr_len;
1495 hdr = (struct ieee80211_hdr *)skb->data;
1497 if (!ieee80211_has_protected(hdr->frame_control))
1498 return;
1500 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1501 if (hdr_total_len <= hdr_len)
1502 return;
1504 memmove(skb->data + (hdr_total_len - hdr_len), skb->data, hdr_len);
1505 skb_pull(skb, (hdr_total_len - hdr_len));
1508 static void iwm_rx_adjust_packet(struct iwm_priv *iwm,
1509 struct iwm_rx_packet *packet,
1510 struct iwm_rx_ticket_node *ticket_node)
1512 u32 payload_offset = 0, payload_len;
1513 struct iwm_rx_ticket *ticket = ticket_node->ticket;
1514 struct iwm_rx_mpdu_hdr *mpdu_hdr;
1515 struct ieee80211_hdr *hdr;
1517 mpdu_hdr = (struct iwm_rx_mpdu_hdr *)packet->skb->data;
1518 payload_offset += sizeof(struct iwm_rx_mpdu_hdr);
1519 /* Padding is 0 or 2 bytes */
1520 payload_len = le16_to_cpu(mpdu_hdr->len) +
1521 (le16_to_cpu(ticket->flags) & IWM_RX_TICKET_PAD_SIZE_MSK);
1522 payload_len -= ticket->tail_len;
1524 IWM_DBG_RX(iwm, DBG, "Packet adjusted, len:%d, offset:%d, "
1525 "ticket offset:%d ticket tail len:%d\n",
1526 payload_len, payload_offset, ticket->payload_offset,
1527 ticket->tail_len);
1529 IWM_HEXDUMP(iwm, DBG, RX, "RAW: ", packet->skb->data, packet->skb->len);
1531 skb_pull(packet->skb, payload_offset);
1532 skb_trim(packet->skb, payload_len);
1534 iwm_remove_iv(packet->skb, ticket->payload_offset);
1536 hdr = (struct ieee80211_hdr *) packet->skb->data;
1537 if (ieee80211_is_data_qos(hdr->frame_control)) {
1538 /* UMAC handed QOS_DATA frame with 2 padding bytes appended
1539 * to the qos_ctl field in IEEE 802.11 headers. */
1540 memmove(packet->skb->data + IEEE80211_QOS_CTL_LEN + 2,
1541 packet->skb->data,
1542 ieee80211_hdrlen(hdr->frame_control) -
1543 IEEE80211_QOS_CTL_LEN);
1544 hdr = (struct ieee80211_hdr *) skb_pull(packet->skb,
1545 IEEE80211_QOS_CTL_LEN + 2);
1546 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1549 IWM_HEXDUMP(iwm, DBG, RX, "ADJUSTED: ",
1550 packet->skb->data, packet->skb->len);
1553 static void classify8023(struct sk_buff *skb)
1555 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1557 if (ieee80211_is_data_qos(hdr->frame_control)) {
1558 u8 *qc = ieee80211_get_qos_ctl(hdr);
1559 /* frame has qos control */
1560 skb->priority = *qc & IEEE80211_QOS_CTL_TID_MASK;
1561 } else {
1562 skb->priority = 0;
1566 static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
1568 struct wireless_dev *wdev = iwm_to_wdev(iwm);
1569 struct net_device *ndev = iwm_to_ndev(iwm);
1570 struct sk_buff_head list;
1571 struct sk_buff *frame;
1573 IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
1575 __skb_queue_head_init(&list);
1576 ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0);
1578 while ((frame = __skb_dequeue(&list))) {
1579 ndev->stats.rx_packets++;
1580 ndev->stats.rx_bytes += frame->len;
1582 frame->protocol = eth_type_trans(frame, ndev);
1583 frame->ip_summed = CHECKSUM_NONE;
1584 memset(frame->cb, 0, sizeof(frame->cb));
1586 if (netif_rx_ni(frame) == NET_RX_DROP) {
1587 IWM_ERR(iwm, "Packet dropped\n");
1588 ndev->stats.rx_dropped++;
1593 static void iwm_rx_process_packet(struct iwm_priv *iwm,
1594 struct iwm_rx_packet *packet,
1595 struct iwm_rx_ticket_node *ticket_node)
1597 int ret;
1598 struct sk_buff *skb = packet->skb;
1599 struct wireless_dev *wdev = iwm_to_wdev(iwm);
1600 struct net_device *ndev = iwm_to_ndev(iwm);
1602 IWM_DBG_RX(iwm, DBG, "Processing packet ID %d\n", packet->id);
1604 switch (le16_to_cpu(ticket_node->ticket->action)) {
1605 case IWM_RX_TICKET_RELEASE:
1606 IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
1608 iwm_rx_adjust_packet(iwm, packet, ticket_node);
1609 skb->dev = iwm_to_ndev(iwm);
1610 classify8023(skb);
1612 if (le16_to_cpu(ticket_node->ticket->flags) &
1613 IWM_RX_TICKET_AMSDU_MSK) {
1614 iwm_rx_process_amsdu(iwm, skb);
1615 break;
1618 ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
1619 if (ret < 0) {
1620 IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
1621 "%d\n", ret);
1622 kfree_skb(packet->skb);
1623 break;
1626 IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
1628 ndev->stats.rx_packets++;
1629 ndev->stats.rx_bytes += skb->len;
1631 skb->protocol = eth_type_trans(skb, ndev);
1632 skb->ip_summed = CHECKSUM_NONE;
1633 memset(skb->cb, 0, sizeof(skb->cb));
1635 if (netif_rx_ni(skb) == NET_RX_DROP) {
1636 IWM_ERR(iwm, "Packet dropped\n");
1637 ndev->stats.rx_dropped++;
1639 break;
1640 case IWM_RX_TICKET_DROP:
1641 IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
1642 le16_to_cpu(ticket_node->ticket->flags));
1643 kfree_skb(packet->skb);
1644 break;
1645 default:
1646 IWM_ERR(iwm, "Unknown ticket action: %d\n",
1647 le16_to_cpu(ticket_node->ticket->action));
1648 kfree_skb(packet->skb);
1651 kfree(packet);
1652 iwm_rx_ticket_node_free(ticket_node);
1656 * Rx data processing:
1658 * We're receiving Rx packet from the LMAC, and Rx ticket from
1659 * the UMAC.
1660 * To forward a target data packet upstream (i.e. to the
1661 * kernel network stack), we must have received an Rx ticket
1662 * that tells us we're allowed to release this packet (ticket
1663 * action is IWM_RX_TICKET_RELEASE). The Rx ticket also indicates,
1664 * among other things, where valid data actually starts in the Rx
1665 * packet.
1667 void iwm_rx_worker(struct work_struct *work)
1669 struct iwm_priv *iwm;
1670 struct iwm_rx_ticket_node *ticket, *next;
1672 iwm = container_of(work, struct iwm_priv, rx_worker);
1675 * We go through the tickets list and if there is a pending
1676 * packet for it, we push it upstream.
1677 * We stop whenever a ticket is missing its packet, as we're
1678 * supposed to send the packets in order.
1680 spin_lock(&iwm->ticket_lock);
1681 list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
1682 struct iwm_rx_packet *packet =
1683 iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
1685 if (!packet) {
1686 IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
1687 "to be handled first\n",
1688 le16_to_cpu(ticket->ticket->id));
1689 break;
1692 list_del(&ticket->node);
1693 iwm_rx_process_packet(iwm, packet, ticket);
1695 spin_unlock(&iwm->ticket_lock);