1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file is part of wl1271
5 * Copyright (C) 2008-2009 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
16 #include "wl12xx_80211.h"
19 #define WL18XX_LOGGER_SDIO_BUFF_MAX (0x1020)
20 #define WL18XX_DATA_RAM_BASE_ADDRESS (0x20000000)
21 #define WL18XX_LOGGER_SDIO_BUFF_ADDR (0x40159c)
22 #define WL18XX_LOGGER_BUFF_OFFSET (sizeof(struct fw_logger_information))
23 #define WL18XX_LOGGER_READ_POINT_OFFSET (12)
25 int wlcore_event_fw_logger(struct wl1271
*wl
)
28 struct fw_logger_information fw_log
;
30 u32 internal_fw_addrbase
= WL18XX_DATA_RAM_BASE_ADDRESS
;
31 u32 addr
= WL18XX_LOGGER_SDIO_BUFF_ADDR
;
32 u32 end_buff_addr
= WL18XX_LOGGER_SDIO_BUFF_ADDR
+
33 WL18XX_LOGGER_BUFF_OFFSET
;
40 buffer
= kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX
, GFP_KERNEL
);
42 wl1271_error("Fail to allocate fw logger memory");
43 fw_log
.actual_buff_size
= cpu_to_le32(0);
47 ret
= wlcore_read(wl
, addr
, buffer
, WL18XX_LOGGER_SDIO_BUFF_MAX
,
50 wl1271_error("Fail to read logger buffer, error_id = %d",
52 fw_log
.actual_buff_size
= cpu_to_le32(0);
56 memcpy(&fw_log
, buffer
, sizeof(fw_log
));
58 if (le32_to_cpu(fw_log
.actual_buff_size
) == 0)
61 actual_len
= le32_to_cpu(fw_log
.actual_buff_size
);
62 start_loc
= (le32_to_cpu(fw_log
.buff_read_ptr
) -
63 internal_fw_addrbase
) - addr
;
64 end_buff_addr
+= le32_to_cpu(fw_log
.max_buff_size
);
65 available_len
= end_buff_addr
-
66 (le32_to_cpu(fw_log
.buff_read_ptr
) -
67 internal_fw_addrbase
);
68 actual_len
= min(actual_len
, available_len
);
71 wl12xx_copy_fwlog(wl
, &buffer
[start_loc
], len
);
72 clear_addr
= addr
+ start_loc
+ le32_to_cpu(fw_log
.actual_buff_size
) +
75 len
= le32_to_cpu(fw_log
.actual_buff_size
) - len
;
78 &buffer
[WL18XX_LOGGER_BUFF_OFFSET
],
80 clear_addr
= addr
+ WL18XX_LOGGER_BUFF_OFFSET
+ len
+
84 /* double check that clear address and write pointer are the same */
85 if (clear_addr
!= le32_to_cpu(fw_log
.buff_write_ptr
)) {
86 wl1271_error("Calculate of clear addr Clear = %x, write = %x",
87 clear_addr
, le32_to_cpu(fw_log
.buff_write_ptr
));
90 /* indicate FW about Clear buffer */
91 ret
= wlcore_write32(wl
, addr
+ WL18XX_LOGGER_READ_POINT_OFFSET
,
92 fw_log
.buff_write_ptr
);
96 return le32_to_cpu(fw_log
.actual_buff_size
);
98 EXPORT_SYMBOL_GPL(wlcore_event_fw_logger
);
100 void wlcore_event_rssi_trigger(struct wl1271
*wl
, s8
*metric_arr
)
102 struct wl12xx_vif
*wlvif
;
103 struct ieee80211_vif
*vif
;
104 enum nl80211_cqm_rssi_threshold_event event
;
105 s8 metric
= metric_arr
[0];
107 wl1271_debug(DEBUG_EVENT
, "RSSI trigger metric: %d", metric
);
109 /* TODO: check actual multi-role support */
110 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
111 if (metric
<= wlvif
->rssi_thold
)
112 event
= NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW
;
114 event
= NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH
;
116 vif
= wl12xx_wlvif_to_vif(wlvif
);
117 if (event
!= wlvif
->last_rssi_event
)
118 ieee80211_cqm_rssi_notify(vif
, event
, metric
,
120 wlvif
->last_rssi_event
= event
;
123 EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger
);
125 static void wl1271_stop_ba_event(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
127 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
129 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
130 u8 hlid
= wlvif
->sta
.hlid
;
131 if (!wl
->links
[hlid
].ba_bitmap
)
133 ieee80211_stop_rx_ba_session(vif
, wl
->links
[hlid
].ba_bitmap
,
134 vif
->bss_conf
.bssid
);
137 struct wl1271_link
*lnk
;
138 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
,
140 lnk
= &wl
->links
[hlid
];
144 ieee80211_stop_rx_ba_session(vif
,
151 void wlcore_event_soft_gemini_sense(struct wl1271
*wl
, u8 enable
)
153 struct wl12xx_vif
*wlvif
;
156 set_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
);
158 clear_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
);
159 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
160 wl1271_recalc_rx_streaming(wl
, wlvif
);
164 EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense
);
166 void wlcore_event_sched_scan_completed(struct wl1271
*wl
,
169 wl1271_debug(DEBUG_EVENT
, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
173 ieee80211_sched_scan_stopped(wl
->hw
);
174 wl
->sched_vif
= NULL
;
177 EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed
);
179 void wlcore_event_ba_rx_constraint(struct wl1271
*wl
,
180 unsigned long roles_bitmap
,
181 unsigned long allowed_bitmap
)
183 struct wl12xx_vif
*wlvif
;
185 wl1271_debug(DEBUG_EVENT
, "%s: roles=0x%lx allowed=0x%lx",
186 __func__
, roles_bitmap
, allowed_bitmap
);
188 wl12xx_for_each_wlvif(wl
, wlvif
) {
189 if (wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
||
190 !test_bit(wlvif
->role_id
, &roles_bitmap
))
193 wlvif
->ba_allowed
= !!test_bit(wlvif
->role_id
,
195 if (!wlvif
->ba_allowed
)
196 wl1271_stop_ba_event(wl
, wlvif
);
199 EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint
);
201 void wlcore_event_channel_switch(struct wl1271
*wl
,
202 unsigned long roles_bitmap
,
205 struct wl12xx_vif
*wlvif
;
206 struct ieee80211_vif
*vif
;
208 wl1271_debug(DEBUG_EVENT
, "%s: roles=0x%lx success=%d",
209 __func__
, roles_bitmap
, success
);
211 wl12xx_for_each_wlvif(wl
, wlvif
) {
212 if (wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
||
213 !test_bit(wlvif
->role_id
, &roles_bitmap
))
216 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
,
220 vif
= wl12xx_wlvif_to_vif(wlvif
);
222 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
223 ieee80211_chswitch_done(vif
, success
);
224 cancel_delayed_work(&wlvif
->channel_switch_work
);
226 set_bit(WLVIF_FLAG_BEACON_DISABLED
, &wlvif
->flags
);
227 ieee80211_csa_finish(vif
);
231 EXPORT_SYMBOL_GPL(wlcore_event_channel_switch
);
233 void wlcore_event_dummy_packet(struct wl1271
*wl
)
236 wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
240 wl1271_debug(DEBUG_EVENT
, "DUMMY_PACKET_ID_EVENT_ID");
241 wl1271_tx_dummy_packet(wl
);
243 EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet
);
245 static void wlcore_disconnect_sta(struct wl1271
*wl
, unsigned long sta_bitmap
)
247 u32 num_packets
= wl
->conf
.tx
.max_tx_retries
;
248 struct wl12xx_vif
*wlvif
;
249 struct ieee80211_vif
*vif
;
250 struct ieee80211_sta
*sta
;
254 for_each_set_bit(h
, &sta_bitmap
, wl
->num_links
) {
256 /* find the ap vif connected to this sta */
257 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
258 if (!test_bit(h
, wlvif
->ap
.sta_hlid_map
))
266 vif
= wl12xx_wlvif_to_vif(wlvif
);
267 addr
= wl
->links
[h
].addr
;
270 sta
= ieee80211_find_sta(vif
, addr
);
272 wl1271_debug(DEBUG_EVENT
, "remove sta %d", h
);
273 ieee80211_report_low_ack(sta
, num_packets
);
279 void wlcore_event_max_tx_failure(struct wl1271
*wl
, unsigned long sta_bitmap
)
281 wl1271_debug(DEBUG_EVENT
, "MAX_TX_FAILURE_EVENT_ID");
282 wlcore_disconnect_sta(wl
, sta_bitmap
);
284 EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure
);
286 void wlcore_event_inactive_sta(struct wl1271
*wl
, unsigned long sta_bitmap
)
288 wl1271_debug(DEBUG_EVENT
, "INACTIVE_STA_EVENT_ID");
289 wlcore_disconnect_sta(wl
, sta_bitmap
);
291 EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta
);
293 void wlcore_event_roc_complete(struct wl1271
*wl
)
295 wl1271_debug(DEBUG_EVENT
, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
297 ieee80211_ready_on_channel(wl
->hw
);
299 EXPORT_SYMBOL_GPL(wlcore_event_roc_complete
);
301 void wlcore_event_beacon_loss(struct wl1271
*wl
, unsigned long roles_bitmap
)
304 * We are HW_MONITOR device. On beacon loss - queue
305 * connection loss work. Cancel it on REGAINED event.
307 struct wl12xx_vif
*wlvif
;
308 struct ieee80211_vif
*vif
;
309 int delay
= wl
->conf
.conn
.synch_fail_thold
*
310 wl
->conf
.conn
.bss_lose_timeout
;
312 wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap
);
314 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
315 if (wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
||
316 !test_bit(wlvif
->role_id
, &roles_bitmap
))
319 vif
= wl12xx_wlvif_to_vif(wlvif
);
321 /* don't attempt roaming in case of p2p */
323 ieee80211_connection_loss(vif
);
328 * if the work is already queued, it should take place.
329 * We don't want to delay the connection loss
330 * indication any more.
332 ieee80211_queue_delayed_work(wl
->hw
,
333 &wlvif
->connection_loss_work
,
334 msecs_to_jiffies(delay
));
336 ieee80211_cqm_beacon_loss_notify(vif
, GFP_KERNEL
);
339 EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss
);
341 int wl1271_event_unmask(struct wl1271
*wl
)
345 wl1271_debug(DEBUG_EVENT
, "unmasking event_mask 0x%x", wl
->event_mask
);
346 ret
= wl1271_acx_event_mbox_mask(wl
, ~(wl
->event_mask
));
353 int wl1271_event_handle(struct wl1271
*wl
, u8 mbox_num
)
357 wl1271_debug(DEBUG_EVENT
, "EVENT on mbox %d", mbox_num
);
362 /* first we read the mbox descriptor */
363 ret
= wlcore_read(wl
, wl
->mbox_ptr
[mbox_num
], wl
->mbox
,
364 wl
->mbox_size
, false);
368 /* process the descriptor */
369 ret
= wl
->ops
->process_mailbox_events(wl
);
374 * TODO: we just need this because one bit is in a different
375 * place. Is there any better way?
377 ret
= wl
->ops
->ack_event(wl
);