ARM: mm: Recreate kernel mappings in early_paging_init()
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath9k / wow.c
blob81c88dd606dcd960f43e740b9af8cffcdc25778a
1 /*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/export.h>
18 #include "ath9k.h"
19 #include "reg.h"
20 #include "hw-ops.h"
22 const char *ath9k_hw_wow_event_to_string(u32 wow_event)
24 if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
25 return "Magic pattern";
26 if (wow_event & AH_WOW_USER_PATTERN_EN)
27 return "User pattern";
28 if (wow_event & AH_WOW_LINK_CHANGE)
29 return "Link change";
30 if (wow_event & AH_WOW_BEACON_MISS)
31 return "Beacon miss";
33 return "unknown reason";
35 EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
37 static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
39 struct ath_common *common = ath9k_hw_common(ah);
41 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
43 /* set rx disable bit */
44 REG_WRITE(ah, AR_CR, AR_CR_RXD);
46 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
47 ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
48 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
49 return;
52 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
55 static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
57 struct ath_common *common = ath9k_hw_common(ah);
58 u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
59 u32 ctl[13] = {0};
60 u32 data_word[KAL_NUM_DATA_WORDS];
61 u8 i;
62 u32 wow_ka_data_word0;
64 memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
65 memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
67 /* set the transmit buffer */
68 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
69 ctl[1] = 0;
70 ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
71 ctl[4] = 0;
72 ctl[7] = (ah->txchainmask) << 2;
73 ctl[2] = 0xf << 16; /* tx_tries 0 */
75 for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
76 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
78 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
80 data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
81 (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
82 data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
83 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
84 data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
85 (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
86 data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
87 (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
88 data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
89 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
90 data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
92 if (AR_SREV_9462_20(ah)) {
93 /* AR9462 2.0 has an extra descriptor word (time based
94 * discard) compared to other chips */
95 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
96 wow_ka_data_word0 = AR_WOW_TXBUF(13);
97 } else {
98 wow_ka_data_word0 = AR_WOW_TXBUF(12);
101 for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
102 REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
106 void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
107 u8 *user_mask, int pattern_count,
108 int pattern_len)
110 int i;
111 u32 pattern_val, mask_val;
112 u32 set, clr;
114 /* FIXME: should check count by querying the hardware capability */
115 if (pattern_count >= MAX_NUM_PATTERN)
116 return;
118 REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
120 /* set the registers for pattern */
121 for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
122 memcpy(&pattern_val, user_pattern, 4);
123 REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
124 pattern_val);
125 user_pattern += 4;
128 /* set the registers for mask */
129 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
130 memcpy(&mask_val, user_mask, 4);
131 REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
132 user_mask += 4;
135 /* set the pattern length to be matched
137 * AR_WOW_LENGTH1_REG1
138 * bit 31:24 pattern 0 length
139 * bit 23:16 pattern 1 length
140 * bit 15:8 pattern 2 length
141 * bit 7:0 pattern 3 length
143 * AR_WOW_LENGTH1_REG2
144 * bit 31:24 pattern 4 length
145 * bit 23:16 pattern 5 length
146 * bit 15:8 pattern 6 length
147 * bit 7:0 pattern 7 length
149 * the below logic writes out the new
150 * pattern length for the corresponding
151 * pattern_count, while masking out the
152 * other fields
155 ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
157 if (pattern_count < 4) {
158 /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
159 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
160 AR_WOW_LEN1_SHIFT(pattern_count);
161 clr = AR_WOW_LENGTH1_MASK(pattern_count);
162 REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
163 } else {
164 /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
165 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
166 AR_WOW_LEN2_SHIFT(pattern_count);
167 clr = AR_WOW_LENGTH2_MASK(pattern_count);
168 REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
172 EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
174 u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
176 u32 wow_status = 0;
177 u32 val = 0, rval;
180 * read the WoW status register to know
181 * the wakeup reason
183 rval = REG_READ(ah, AR_WOW_PATTERN);
184 val = AR_WOW_STATUS(rval);
187 * mask only the WoW events that we have enabled. Sometimes
188 * we have spurious WoW events from the AR_WOW_PATTERN
189 * register. This mask will clean it up.
192 val &= ah->wow_event_mask;
194 if (val) {
195 if (val & AR_WOW_MAGIC_PAT_FOUND)
196 wow_status |= AH_WOW_MAGIC_PATTERN_EN;
197 if (AR_WOW_PATTERN_FOUND(val))
198 wow_status |= AH_WOW_USER_PATTERN_EN;
199 if (val & AR_WOW_KEEP_ALIVE_FAIL)
200 wow_status |= AH_WOW_LINK_CHANGE;
201 if (val & AR_WOW_BEACON_FAIL)
202 wow_status |= AH_WOW_BEACON_MISS;
206 * set and clear WOW_PME_CLEAR registers for the chip to
207 * generate next wow signal.
208 * disable D3 before accessing other registers ?
211 /* do we need to check the bit value 0x01000000 (7-10) ?? */
212 REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
213 AR_PMCTRL_PWR_STATE_D1D3);
216 * clear all events
218 REG_WRITE(ah, AR_WOW_PATTERN,
219 AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
222 * restore the beacon threshold to init value
224 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
227 * Restore the way the PCI-E reset, Power-On-Reset, external
228 * PCIE_POR_SHORT pins are tied to its original value.
229 * Previously just before WoW sleep, we untie the PCI-E
230 * reset to our Chip's Power On Reset so that any PCI-E
231 * reset from the bus will not reset our chip
233 if (ah->is_pciexpress)
234 ath9k_hw_configpcipowersave(ah, false);
236 ah->wow_event_mask = 0;
238 return wow_status;
240 EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
242 void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
244 u32 wow_event_mask;
245 u32 set, clr;
248 * wow_event_mask is a mask to the AR_WOW_PATTERN register to
249 * indicate which WoW events we have enabled. The WoW events
250 * are from the 'pattern_enable' in this function and
251 * 'pattern_count' of ath9k_hw_wow_apply_pattern()
253 wow_event_mask = ah->wow_event_mask;
256 * Untie Power-on-Reset from the PCI-E-Reset. When we are in
257 * WOW sleep, we do want the Reset from the PCI-E to disturb
258 * our hw state
260 if (ah->is_pciexpress) {
262 * we need to untie the internal POR (power-on-reset)
263 * to the external PCI-E reset. We also need to tie
264 * the PCI-E Phy reset to the PCI-E reset.
266 set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
267 clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
268 REG_RMW(ah, AR_WA, set, clr);
272 * set the power states appropriately and enable PME
274 set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
275 AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
278 * set and clear WOW_PME_CLEAR registers for the chip
279 * to generate next wow signal.
281 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
282 clr = AR_PMCTRL_WOW_PME_CLR;
283 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
286 * Setup for:
287 * - beacon misses
288 * - magic pattern
289 * - keep alive timeout
290 * - pattern matching
294 * Program default values for pattern backoff, aifs/slot/KAL count,
295 * beacon miss timeout, KAL timeout, etc.
297 set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
298 REG_SET_BIT(ah, AR_WOW_PATTERN, set);
300 set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
301 AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
302 AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
303 REG_SET_BIT(ah, AR_WOW_COUNT, set);
305 if (pattern_enable & AH_WOW_BEACON_MISS)
306 set = AR_WOW_BEACON_TIMO;
307 /* We are not using beacon miss, program a large value */
308 else
309 set = AR_WOW_BEACON_TIMO_MAX;
311 REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
314 * Keep alive timo in ms except AR9280
316 if (!pattern_enable)
317 set = AR_WOW_KEEP_ALIVE_NEVER;
318 else
319 set = KAL_TIMEOUT * 32;
321 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
324 * Keep alive delay in us. based on 'power on clock',
325 * therefore in usec
327 set = KAL_DELAY * 1000;
328 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
331 * Create keep alive pattern to respond to beacons
333 ath9k_wow_create_keep_alive_pattern(ah);
336 * Configure MAC WoW Registers
338 set = 0;
339 /* Send keep alive timeouts anyway */
340 clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
342 if (pattern_enable & AH_WOW_LINK_CHANGE)
343 wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
344 else
345 set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
347 set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
348 REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
351 * we are relying on a bmiss failure. ensure we have
352 * enough threshold to prevent false positives
354 REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
355 AR_WOW_BMISSTHRESHOLD);
357 set = 0;
358 clr = 0;
360 if (pattern_enable & AH_WOW_BEACON_MISS) {
361 set = AR_WOW_BEACON_FAIL_EN;
362 wow_event_mask |= AR_WOW_BEACON_FAIL;
363 } else {
364 clr = AR_WOW_BEACON_FAIL_EN;
367 REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
369 set = 0;
370 clr = 0;
372 * Enable the magic packet registers
374 if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
375 set = AR_WOW_MAGIC_EN;
376 wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
377 } else {
378 clr = AR_WOW_MAGIC_EN;
380 set |= AR_WOW_MAC_INTR_EN;
381 REG_RMW(ah, AR_WOW_PATTERN, set, clr);
383 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
384 AR_WOW_PATTERN_SUPPORTED);
387 * Set the power states appropriately and enable PME
389 clr = 0;
390 set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
391 AR_PMCTRL_PWR_PM_CTRL_ENA;
393 clr = AR_PCIE_PM_CTRL_ENA;
394 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
397 * this is needed to prevent the chip waking up
398 * the host within 3-4 seconds with certain
399 * platform/BIOS. The fix is to enable
400 * D1 & D3 to match original definition and
401 * also match the OTP value. Anyway this
402 * is more related to SW WOW.
404 clr = AR_PMCTRL_PWR_STATE_D1D3;
405 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
407 set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
408 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
410 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
412 /* to bring down WOW power low margin */
413 set = BIT(13);
414 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
415 /* HW WoW */
416 clr = BIT(5);
417 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
419 ath9k_hw_set_powermode_wow_sleep(ah);
420 ah->wow_event_mask = wow_event_mask;
422 EXPORT_SYMBOL(ath9k_hw_wow_enable);