mm-only debug patch...
[mmotm.git] / drivers / net / wireless / iwlwifi / iwl-power.c
blob9c6b149520610ac3a44553ee0ae5541d94ac0ad0
1 /******************************************************************************
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
34 #include <net/mac80211.h>
36 #include "iwl-eeprom.h"
37 #include "iwl-dev.h"
38 #include "iwl-core.h"
39 #include "iwl-io.h"
40 #include "iwl-commands.h"
41 #include "iwl-debug.h"
42 #include "iwl-power.h"
45 * Setting power level allows the card to go to sleep when not busy.
47 * We calculate a sleep command based on the required latency, which
48 * we get from mac80211. In order to handle thermal throttling, we can
49 * also use pre-defined power levels.
53 * For now, keep using power level 1 instead of automatically
54 * adjusting ...
56 bool no_sleep_autoadjust = true;
57 module_param(no_sleep_autoadjust, bool, S_IRUGO);
58 MODULE_PARM_DESC(no_sleep_autoadjust,
59 "don't automatically adjust sleep level "
60 "according to maximum network latency");
63 * This defines the old power levels. They are still used by default
64 * (level 1) and for thermal throttle (levels 3 through 5)
67 struct iwl_power_vec_entry {
68 struct iwl_powertable_cmd cmd;
69 u8 no_dtim;
72 #define IWL_DTIM_RANGE_0_MAX 2
73 #define IWL_DTIM_RANGE_1_MAX 10
75 #define NOSLP cpu_to_le16(0), 0, 0
76 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
77 #define TU_TO_USEC 1024
78 #define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
79 #define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
80 cpu_to_le32(X1), \
81 cpu_to_le32(X2), \
82 cpu_to_le32(X3), \
83 cpu_to_le32(X4)}
84 /* default power management (not Tx power) table values */
85 /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
86 static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
89 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
90 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
91 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
95 /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
96 static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
97 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
99 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
100 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
101 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
104 /* for DTIM period > IWL_DTIM_RANGE_1_MAX */
105 static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
106 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
107 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
108 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
109 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
110 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
113 static void iwl_static_sleep_cmd(struct iwl_priv *priv,
114 struct iwl_powertable_cmd *cmd,
115 enum iwl_power_level lvl, int period)
117 const struct iwl_power_vec_entry *table;
118 int max_sleep, i;
119 bool skip;
121 table = range_2;
122 if (period < IWL_DTIM_RANGE_1_MAX)
123 table = range_1;
124 if (period < IWL_DTIM_RANGE_0_MAX)
125 table = range_0;
127 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
129 *cmd = table[lvl].cmd;
131 if (period == 0) {
132 skip = false;
133 period = 1;
134 } else {
135 skip = !!table[lvl].no_dtim;
138 if (skip) {
139 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
140 max_sleep = le32_to_cpu(slp_itrvl);
141 if (max_sleep == 0xFF)
142 max_sleep = period * (skip + 1);
143 else if (max_sleep > period)
144 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
145 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
146 } else {
147 max_sleep = period;
148 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
151 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
152 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
153 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
155 if (priv->power_data.pci_pm)
156 cmd->flags |= IWL_POWER_PCI_PM_MSK;
157 else
158 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
160 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
163 /* default Thermal Throttling transaction table
164 * Current state | Throttling Down | Throttling Up
165 *=============================================================================
166 * Condition Nxt State Condition Nxt State Condition Nxt State
167 *-----------------------------------------------------------------------------
168 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
169 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
170 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
171 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
172 *=============================================================================
174 static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
175 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
176 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
177 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
179 static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
180 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
181 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
182 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
184 static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
185 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
186 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
187 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
189 static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
190 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
191 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
192 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
195 /* Advance Thermal Throttling default restriction table */
196 static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
197 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
198 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
199 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
200 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
204 static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
205 struct iwl_powertable_cmd *cmd)
207 memset(cmd, 0, sizeof(*cmd));
209 if (priv->power_data.pci_pm)
210 cmd->flags |= IWL_POWER_PCI_PM_MSK;
212 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
215 static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
216 struct iwl_powertable_cmd *cmd,
217 int dynps_ms, int wakeup_period)
220 * These are the original power level 3 sleep successions. The
221 * device may behave better with such succession and was also
222 * only tested with that. Just like the original sleep commands,
223 * also adjust the succession here to the wakeup_period below.
224 * The ranges are the same as for the sleep commands, 0-2, 3-9
225 * and >10, which is selected based on the DTIM interval for
226 * the sleep index but here we use the wakeup period since that
227 * is what we need to do for the latency requirements.
229 static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 };
230 static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 };
231 static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF };
232 const u8 *slp_succ = slp_succ_r0;
233 int i;
235 if (wakeup_period > IWL_DTIM_RANGE_0_MAX)
236 slp_succ = slp_succ_r1;
237 if (wakeup_period > IWL_DTIM_RANGE_1_MAX)
238 slp_succ = slp_succ_r2;
240 memset(cmd, 0, sizeof(*cmd));
242 cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK |
243 IWL_POWER_FAST_PD; /* no use seeing frames for others */
245 if (priv->power_data.pci_pm)
246 cmd->flags |= IWL_POWER_PCI_PM_MSK;
248 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
249 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
251 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
252 cmd->sleep_interval[i] =
253 cpu_to_le32(min_t(int, slp_succ[i], wakeup_period));
255 IWL_DEBUG_POWER(priv, "Automatic sleep command\n");
258 static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
260 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
261 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
262 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
263 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
264 IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
265 le32_to_cpu(cmd->sleep_interval[0]),
266 le32_to_cpu(cmd->sleep_interval[1]),
267 le32_to_cpu(cmd->sleep_interval[2]),
268 le32_to_cpu(cmd->sleep_interval[3]),
269 le32_to_cpu(cmd->sleep_interval[4]));
271 return iwl_send_cmd_pdu(priv, POWER_TABLE_CMD,
272 sizeof(struct iwl_powertable_cmd), cmd);
276 int iwl_power_update_mode(struct iwl_priv *priv, bool force)
278 int ret = 0;
279 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
280 bool enabled = (priv->iw_mode == NL80211_IFTYPE_STATION) &&
281 (priv->hw->conf.flags & IEEE80211_CONF_PS);
282 bool update_chains;
283 struct iwl_powertable_cmd cmd;
284 int dtimper;
286 /* Don't update the RX chain when chain noise calibration is running */
287 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
288 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
290 if (priv->vif)
291 dtimper = priv->vif->bss_conf.dtim_period;
292 else
293 dtimper = 1;
295 if (priv->cfg->broken_powersave)
296 iwl_power_sleep_cam_cmd(priv, &cmd);
297 else if (priv->cfg->supports_idle &&
298 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
299 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
300 else if (tt->state >= IWL_TI_1)
301 iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper);
302 else if (!enabled)
303 iwl_power_sleep_cam_cmd(priv, &cmd);
304 else if (priv->power_data.debug_sleep_level_override >= 0)
305 iwl_static_sleep_cmd(priv, &cmd,
306 priv->power_data.debug_sleep_level_override,
307 dtimper);
308 else if (no_sleep_autoadjust)
309 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_1, dtimper);
310 else
311 iwl_power_fill_sleep_cmd(priv, &cmd,
312 priv->hw->conf.dynamic_ps_timeout,
313 priv->hw->conf.max_sleep_period);
315 if (iwl_is_ready_rf(priv) &&
316 (memcmp(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)) || force)) {
317 if (cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
318 set_bit(STATUS_POWER_PMI, &priv->status);
320 ret = iwl_set_power(priv, &cmd);
321 if (!ret) {
322 if (!(cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
323 clear_bit(STATUS_POWER_PMI, &priv->status);
325 if (priv->cfg->ops->lib->update_chain_flags &&
326 update_chains)
327 priv->cfg->ops->lib->update_chain_flags(priv);
328 else if (priv->cfg->ops->lib->update_chain_flags)
329 IWL_DEBUG_POWER(priv,
330 "Cannot update the power, chain noise "
331 "calibration running: %d\n",
332 priv->chain_noise_data.state);
333 memcpy(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd));
334 } else
335 IWL_ERR(priv, "set power fail, ret = %d", ret);
338 return ret;
340 EXPORT_SYMBOL(iwl_power_update_mode);
342 bool iwl_ht_enabled(struct iwl_priv *priv)
344 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
345 struct iwl_tt_restriction *restriction;
347 if (!priv->thermal_throttle.advanced_tt)
348 return true;
349 restriction = tt->restriction + tt->state;
350 return restriction->is_ht;
352 EXPORT_SYMBOL(iwl_ht_enabled);
354 bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
356 s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
357 bool within_margin = false;
359 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
360 temp = KELVIN_TO_CELSIUS(priv->temperature);
362 if (!priv->thermal_throttle.advanced_tt)
363 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
364 CT_KILL_THRESHOLD_LEGACY) ? true : false;
365 else
366 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
367 CT_KILL_THRESHOLD) ? true : false;
368 return within_margin;
371 enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
373 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
374 struct iwl_tt_restriction *restriction;
376 if (!priv->thermal_throttle.advanced_tt)
377 return IWL_ANT_OK_MULTI;
378 restriction = tt->restriction + tt->state;
379 return restriction->tx_stream;
381 EXPORT_SYMBOL(iwl_tx_ant_restriction);
383 enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
385 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
386 struct iwl_tt_restriction *restriction;
388 if (!priv->thermal_throttle.advanced_tt)
389 return IWL_ANT_OK_MULTI;
390 restriction = tt->restriction + tt->state;
391 return restriction->rx_stream;
394 #define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
395 #define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
398 * toggle the bit to wake up uCode and check the temperature
399 * if the temperature is below CT, uCode will stay awake and send card
400 * state notification with CT_KILL bit clear to inform Thermal Throttling
401 * Management to change state. Otherwise, uCode will go back to sleep
402 * without doing anything, driver should continue the 5 seconds timer
403 * to wake up uCode for temperature check until temperature drop below CT
405 static void iwl_tt_check_exit_ct_kill(unsigned long data)
407 struct iwl_priv *priv = (struct iwl_priv *)data;
408 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
409 unsigned long flags;
411 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
412 return;
414 if (tt->state == IWL_TI_CT_KILL) {
415 if (priv->thermal_throttle.ct_kill_toggle) {
416 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
417 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
418 priv->thermal_throttle.ct_kill_toggle = false;
419 } else {
420 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
421 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
422 priv->thermal_throttle.ct_kill_toggle = true;
424 iwl_read32(priv, CSR_UCODE_DRV_GP1);
425 spin_lock_irqsave(&priv->reg_lock, flags);
426 if (!iwl_grab_nic_access(priv))
427 iwl_release_nic_access(priv);
428 spin_unlock_irqrestore(&priv->reg_lock, flags);
430 /* Reschedule the ct_kill timer to occur in
431 * CT_KILL_EXIT_DURATION seconds to ensure we get a
432 * thermal update */
433 IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
434 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
435 CT_KILL_EXIT_DURATION * HZ);
439 static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
440 bool stop)
442 if (stop) {
443 IWL_DEBUG_POWER(priv, "Stop all queues\n");
444 if (priv->mac80211_registered)
445 ieee80211_stop_queues(priv->hw);
446 IWL_DEBUG_POWER(priv,
447 "Schedule 5 seconds CT_KILL Timer\n");
448 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
449 CT_KILL_EXIT_DURATION * HZ);
450 } else {
451 IWL_DEBUG_POWER(priv, "Wake all queues\n");
452 if (priv->mac80211_registered)
453 ieee80211_wake_queues(priv->hw);
457 static void iwl_tt_ready_for_ct_kill(unsigned long data)
459 struct iwl_priv *priv = (struct iwl_priv *)data;
460 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
462 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
463 return;
465 /* temperature timer expired, ready to go into CT_KILL state */
466 if (tt->state != IWL_TI_CT_KILL) {
467 IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n");
468 tt->state = IWL_TI_CT_KILL;
469 set_bit(STATUS_CT_KILL, &priv->status);
470 iwl_perform_ct_kill_task(priv, true);
474 static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
476 IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
477 /* make request to retrieve statistics information */
478 iwl_send_statistics_request(priv, 0);
479 /* Reschedule the ct_kill wait timer */
480 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
481 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
484 #define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
485 #define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
486 #define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
489 * Legacy thermal throttling
490 * 1) Avoid NIC destruction due to high temperatures
491 * Chip will identify dangerously high temperatures that can
492 * harm the device and will power down
493 * 2) Avoid the NIC power down due to high temperature
494 * Throttle early enough to lower the power consumption before
495 * drastic steps are needed
497 static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
499 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
500 enum iwl_tt_state old_state;
502 #ifdef CONFIG_IWLWIFI_DEBUG
503 if ((tt->tt_previous_temp) &&
504 (temp > tt->tt_previous_temp) &&
505 ((temp - tt->tt_previous_temp) >
506 IWL_TT_INCREASE_MARGIN)) {
507 IWL_DEBUG_POWER(priv,
508 "Temperature increase %d degree Celsius\n",
509 (temp - tt->tt_previous_temp));
511 #endif
512 old_state = tt->state;
513 /* in Celsius */
514 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
515 tt->state = IWL_TI_CT_KILL;
516 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
517 tt->state = IWL_TI_2;
518 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
519 tt->state = IWL_TI_1;
520 else
521 tt->state = IWL_TI_0;
523 #ifdef CONFIG_IWLWIFI_DEBUG
524 tt->tt_previous_temp = temp;
525 #endif
526 /* stop ct_kill_waiting_tm timer */
527 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
528 if (tt->state != old_state) {
529 switch (tt->state) {
530 case IWL_TI_0:
532 * When the system is ready to go back to IWL_TI_0
533 * we only have to call iwl_power_update_mode() to
534 * do so.
536 break;
537 case IWL_TI_1:
538 tt->tt_power_mode = IWL_POWER_INDEX_3;
539 break;
540 case IWL_TI_2:
541 tt->tt_power_mode = IWL_POWER_INDEX_4;
542 break;
543 default:
544 tt->tt_power_mode = IWL_POWER_INDEX_5;
545 break;
547 mutex_lock(&priv->mutex);
548 if (old_state == IWL_TI_CT_KILL)
549 clear_bit(STATUS_CT_KILL, &priv->status);
550 if (tt->state != IWL_TI_CT_KILL &&
551 iwl_power_update_mode(priv, true)) {
552 /* TT state not updated
553 * try again during next temperature read
555 if (old_state == IWL_TI_CT_KILL)
556 set_bit(STATUS_CT_KILL, &priv->status);
557 tt->state = old_state;
558 IWL_ERR(priv, "Cannot update power mode, "
559 "TT state not updated\n");
560 } else {
561 if (tt->state == IWL_TI_CT_KILL) {
562 if (force) {
563 set_bit(STATUS_CT_KILL, &priv->status);
564 iwl_perform_ct_kill_task(priv, true);
565 } else {
566 iwl_prepare_ct_kill_task(priv);
567 tt->state = old_state;
569 } else if (old_state == IWL_TI_CT_KILL &&
570 tt->state != IWL_TI_CT_KILL)
571 iwl_perform_ct_kill_task(priv, false);
572 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
573 tt->state);
574 IWL_DEBUG_POWER(priv, "Power Index change to %u\n",
575 tt->tt_power_mode);
577 mutex_unlock(&priv->mutex);
582 * Advance thermal throttling
583 * 1) Avoid NIC destruction due to high temperatures
584 * Chip will identify dangerously high temperatures that can
585 * harm the device and will power down
586 * 2) Avoid the NIC power down due to high temperature
587 * Throttle early enough to lower the power consumption before
588 * drastic steps are needed
589 * Actions include relaxing the power down sleep thresholds and
590 * decreasing the number of TX streams
591 * 3) Avoid throughput performance impact as much as possible
593 *=============================================================================
594 * Condition Nxt State Condition Nxt State Condition Nxt State
595 *-----------------------------------------------------------------------------
596 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
597 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
598 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
599 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
600 *=============================================================================
602 static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
604 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
605 int i;
606 bool changed = false;
607 enum iwl_tt_state old_state;
608 struct iwl_tt_trans *transaction;
610 old_state = tt->state;
611 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
612 /* based on the current TT state,
613 * find the curresponding transaction table
614 * each table has (IWL_TI_STATE_MAX - 1) entries
615 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
616 * will advance to the correct table.
617 * then based on the current temperature
618 * find the next state need to transaction to
619 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
620 * in the current table to see if transaction is needed
622 transaction = tt->transaction +
623 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
624 if (temp >= transaction->tt_low &&
625 temp <= transaction->tt_high) {
626 #ifdef CONFIG_IWLWIFI_DEBUG
627 if ((tt->tt_previous_temp) &&
628 (temp > tt->tt_previous_temp) &&
629 ((temp - tt->tt_previous_temp) >
630 IWL_TT_INCREASE_MARGIN)) {
631 IWL_DEBUG_POWER(priv,
632 "Temperature increase %d "
633 "degree Celsius\n",
634 (temp - tt->tt_previous_temp));
636 tt->tt_previous_temp = temp;
637 #endif
638 if (old_state !=
639 transaction->next_state) {
640 changed = true;
641 tt->state =
642 transaction->next_state;
644 break;
647 /* stop ct_kill_waiting_tm timer */
648 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
649 if (changed) {
650 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
652 if (tt->state >= IWL_TI_1) {
653 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
654 tt->tt_power_mode = IWL_POWER_INDEX_5;
655 if (!iwl_ht_enabled(priv))
656 /* disable HT */
657 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
658 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
659 RXON_FLG_HT40_PROT_MSK |
660 RXON_FLG_HT_PROT_MSK);
661 else {
662 /* check HT capability and set
663 * according to the system HT capability
664 * in case get disabled before */
665 iwl_set_rxon_ht(priv, &priv->current_ht_config);
668 } else {
670 * restore system power setting -- it will be
671 * recalculated automatically.
674 /* check HT capability and set
675 * according to the system HT capability
676 * in case get disabled before */
677 iwl_set_rxon_ht(priv, &priv->current_ht_config);
679 mutex_lock(&priv->mutex);
680 if (old_state == IWL_TI_CT_KILL)
681 clear_bit(STATUS_CT_KILL, &priv->status);
682 if (tt->state != IWL_TI_CT_KILL &&
683 iwl_power_update_mode(priv, true)) {
684 /* TT state not updated
685 * try again during next temperature read
687 IWL_ERR(priv, "Cannot update power mode, "
688 "TT state not updated\n");
689 if (old_state == IWL_TI_CT_KILL)
690 set_bit(STATUS_CT_KILL, &priv->status);
691 tt->state = old_state;
692 } else {
693 IWL_DEBUG_POWER(priv,
694 "Thermal Throttling to new state: %u\n",
695 tt->state);
696 if (old_state != IWL_TI_CT_KILL &&
697 tt->state == IWL_TI_CT_KILL) {
698 if (force) {
699 IWL_DEBUG_POWER(priv,
700 "Enter IWL_TI_CT_KILL\n");
701 set_bit(STATUS_CT_KILL, &priv->status);
702 iwl_perform_ct_kill_task(priv, true);
703 } else {
704 iwl_prepare_ct_kill_task(priv);
705 tt->state = old_state;
707 } else if (old_state == IWL_TI_CT_KILL &&
708 tt->state != IWL_TI_CT_KILL) {
709 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
710 iwl_perform_ct_kill_task(priv, false);
713 mutex_unlock(&priv->mutex);
717 /* Card State Notification indicated reach critical temperature
718 * if PSP not enable, no Thermal Throttling function will be performed
719 * just set the GP1 bit to acknowledge the event
720 * otherwise, go into IWL_TI_CT_KILL state
721 * since Card State Notification will not provide any temperature reading
722 * for Legacy mode
723 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
724 * for advance mode
725 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
727 static void iwl_bg_ct_enter(struct work_struct *work)
729 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
730 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
732 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
733 return;
735 if (!iwl_is_ready(priv))
736 return;
738 if (tt->state != IWL_TI_CT_KILL) {
739 IWL_ERR(priv, "Device reached critical temperature "
740 "- ucode going to sleep!\n");
741 if (!priv->thermal_throttle.advanced_tt)
742 iwl_legacy_tt_handler(priv,
743 IWL_MINIMAL_POWER_THRESHOLD,
744 true);
745 else
746 iwl_advance_tt_handler(priv,
747 CT_KILL_THRESHOLD + 1, true);
751 /* Card State Notification indicated out of critical temperature
752 * since Card State Notification will not provide any temperature reading
753 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
754 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
756 static void iwl_bg_ct_exit(struct work_struct *work)
758 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
759 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
761 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
762 return;
764 if (!iwl_is_ready(priv))
765 return;
767 /* stop ct_kill_exit_tm timer */
768 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
770 if (tt->state == IWL_TI_CT_KILL) {
771 IWL_ERR(priv,
772 "Device temperature below critical"
773 "- ucode awake!\n");
775 * exit from CT_KILL state
776 * reset the current temperature reading
778 priv->temperature = 0;
779 if (!priv->thermal_throttle.advanced_tt)
780 iwl_legacy_tt_handler(priv,
781 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
782 true);
783 else
784 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
785 true);
789 void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
791 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
792 return;
794 IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
795 queue_work(priv->workqueue, &priv->ct_enter);
797 EXPORT_SYMBOL(iwl_tt_enter_ct_kill);
799 void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
801 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
802 return;
804 IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
805 queue_work(priv->workqueue, &priv->ct_exit);
807 EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
809 static void iwl_bg_tt_work(struct work_struct *work)
811 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
812 s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
814 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
815 return;
817 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
818 temp = KELVIN_TO_CELSIUS(priv->temperature);
820 if (!priv->thermal_throttle.advanced_tt)
821 iwl_legacy_tt_handler(priv, temp, false);
822 else
823 iwl_advance_tt_handler(priv, temp, false);
826 void iwl_tt_handler(struct iwl_priv *priv)
828 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
829 return;
831 IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
832 queue_work(priv->workqueue, &priv->tt_work);
834 EXPORT_SYMBOL(iwl_tt_handler);
836 /* Thermal throttling initialization
837 * For advance thermal throttling:
838 * Initialize Thermal Index and temperature threshold table
839 * Initialize thermal throttling restriction table
841 void iwl_tt_initialize(struct iwl_priv *priv)
843 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
844 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
845 struct iwl_tt_trans *transaction;
847 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n");
849 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
851 tt->state = IWL_TI_0;
852 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
853 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
854 priv->thermal_throttle.ct_kill_exit_tm.function =
855 iwl_tt_check_exit_ct_kill;
856 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
857 priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv;
858 priv->thermal_throttle.ct_kill_waiting_tm.function =
859 iwl_tt_ready_for_ct_kill;
860 /* setup deferred ct kill work */
861 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
862 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
863 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
865 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
866 case CSR_HW_REV_TYPE_6x00:
867 case CSR_HW_REV_TYPE_6x50:
868 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
869 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
870 IWL_TI_STATE_MAX, GFP_KERNEL);
871 tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
872 IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
873 GFP_KERNEL);
874 if (!tt->restriction || !tt->transaction) {
875 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
876 priv->thermal_throttle.advanced_tt = false;
877 kfree(tt->restriction);
878 tt->restriction = NULL;
879 kfree(tt->transaction);
880 tt->transaction = NULL;
881 } else {
882 transaction = tt->transaction +
883 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
884 memcpy(transaction, &tt_range_0[0], size);
885 transaction = tt->transaction +
886 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
887 memcpy(transaction, &tt_range_1[0], size);
888 transaction = tt->transaction +
889 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
890 memcpy(transaction, &tt_range_2[0], size);
891 transaction = tt->transaction +
892 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
893 memcpy(transaction, &tt_range_3[0], size);
894 size = sizeof(struct iwl_tt_restriction) *
895 IWL_TI_STATE_MAX;
896 memcpy(tt->restriction,
897 &restriction_range[0], size);
898 priv->thermal_throttle.advanced_tt = true;
900 break;
901 default:
902 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
903 priv->thermal_throttle.advanced_tt = false;
904 break;
907 EXPORT_SYMBOL(iwl_tt_initialize);
909 /* cleanup thermal throttling management related memory and timer */
910 void iwl_tt_exit(struct iwl_priv *priv)
912 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
914 /* stop ct_kill_exit_tm timer if activated */
915 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
916 /* stop ct_kill_waiting_tm timer if activated */
917 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
918 cancel_work_sync(&priv->tt_work);
919 cancel_work_sync(&priv->ct_enter);
920 cancel_work_sync(&priv->ct_exit);
922 if (priv->thermal_throttle.advanced_tt) {
923 /* free advance thermal throttling memory */
924 kfree(tt->restriction);
925 tt->restriction = NULL;
926 kfree(tt->transaction);
927 tt->transaction = NULL;
930 EXPORT_SYMBOL(iwl_tt_exit);
932 /* initialize to default */
933 void iwl_power_initialize(struct iwl_priv *priv)
935 u16 lctl = iwl_pcie_link_ctl(priv);
937 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
939 priv->power_data.debug_sleep_level_override = -1;
941 memset(&priv->power_data.sleep_cmd, 0,
942 sizeof(priv->power_data.sleep_cmd));
944 EXPORT_SYMBOL(iwl_power_initialize);