iwlwifi: use the DMA state API instead of the pci equivalents
[linux-2.6/next.git] / drivers / net / wireless / iwlwifi / iwl-power.c
blobcda6a94d6cc92234f901470fb3c8fe93650c61e6
1 /******************************************************************************
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
35 #include <net/mac80211.h>
37 #include "iwl-eeprom.h"
38 #include "iwl-dev.h"
39 #include "iwl-core.h"
40 #include "iwl-io.h"
41 #include "iwl-commands.h"
42 #include "iwl-debug.h"
43 #include "iwl-power.h"
46 * Setting power level allows the card to go to sleep when not busy.
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
54 * For now, keep using power level 1 instead of automatically
55 * adjusting ...
57 bool no_sleep_autoadjust = true;
58 module_param(no_sleep_autoadjust, bool, S_IRUGO);
59 MODULE_PARM_DESC(no_sleep_autoadjust,
60 "don't automatically adjust sleep level "
61 "according to maximum network latency");
64 * This defines the old power levels. They are still used by default
65 * (level 1) and for thermal throttle (levels 3 through 5)
68 struct iwl_power_vec_entry {
69 struct iwl_powertable_cmd cmd;
70 u8 no_dtim; /* number of skip dtim */
73 #define IWL_DTIM_RANGE_0_MAX 2
74 #define IWL_DTIM_RANGE_1_MAX 10
76 #define NOSLP cpu_to_le16(0), 0, 0
77 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
78 #define TU_TO_USEC 1024
79 #define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
80 #define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
81 cpu_to_le32(X1), \
82 cpu_to_le32(X2), \
83 cpu_to_le32(X3), \
84 cpu_to_le32(X4)}
85 /* default power management (not Tx power) table values */
86 /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
87 /* DTIM 0 - 2 */
88 static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
89 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
90 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
91 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
92 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
93 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
97 /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
98 /* DTIM 3 - 10 */
99 static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
100 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
101 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
102 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
103 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
104 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
107 /* for DTIM period > IWL_DTIM_RANGE_1_MAX */
108 /* DTIM 11 - */
109 static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
110 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
111 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
112 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
113 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
114 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
117 static void iwl_static_sleep_cmd(struct iwl_priv *priv,
118 struct iwl_powertable_cmd *cmd,
119 enum iwl_power_level lvl, int period)
121 const struct iwl_power_vec_entry *table;
122 int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
123 int i;
124 u8 skip;
125 u32 slp_itrvl;
127 table = range_2;
128 if (period <= IWL_DTIM_RANGE_1_MAX)
129 table = range_1;
130 if (period <= IWL_DTIM_RANGE_0_MAX)
131 table = range_0;
133 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
135 *cmd = table[lvl].cmd;
137 if (period == 0) {
138 skip = 0;
139 period = 1;
140 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
141 max_sleep[i] = 1;
143 } else {
144 skip = table[lvl].no_dtim;
145 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
146 max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
147 max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
150 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
151 /* figure out the listen interval based on dtim period and skip */
152 if (slp_itrvl == 0xFF)
153 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
154 cpu_to_le32(period * (skip + 1));
156 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
157 if (slp_itrvl > period)
158 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
159 cpu_to_le32((slp_itrvl / period) * period);
161 if (skip)
162 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
163 else
164 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
166 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
167 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
168 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
169 cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
171 /* enforce max sleep interval */
172 for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
173 if (le32_to_cpu(cmd->sleep_interval[i]) >
174 (max_sleep[i] * period))
175 cmd->sleep_interval[i] =
176 cpu_to_le32(max_sleep[i] * period);
177 if (i != (IWL_POWER_VEC_SIZE - 1)) {
178 if (le32_to_cpu(cmd->sleep_interval[i]) >
179 le32_to_cpu(cmd->sleep_interval[i+1]))
180 cmd->sleep_interval[i] =
181 cmd->sleep_interval[i+1];
185 if (priv->power_data.pci_pm)
186 cmd->flags |= IWL_POWER_PCI_PM_MSK;
187 else
188 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
190 IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
191 skip, period);
192 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
195 /* default Thermal Throttling transaction table
196 * Current state | Throttling Down | Throttling Up
197 *=============================================================================
198 * Condition Nxt State Condition Nxt State Condition Nxt State
199 *-----------------------------------------------------------------------------
200 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
201 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
202 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
203 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
204 *=============================================================================
206 static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
207 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
208 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
209 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
211 static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
212 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
213 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
214 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
216 static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
217 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
218 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
219 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
221 static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
222 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
223 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
224 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
227 /* Advance Thermal Throttling default restriction table */
228 static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
229 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
230 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
231 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
232 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
236 static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
237 struct iwl_powertable_cmd *cmd)
239 memset(cmd, 0, sizeof(*cmd));
241 if (priv->power_data.pci_pm)
242 cmd->flags |= IWL_POWER_PCI_PM_MSK;
244 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
247 static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
248 struct iwl_powertable_cmd *cmd,
249 int dynps_ms, int wakeup_period)
252 * These are the original power level 3 sleep successions. The
253 * device may behave better with such succession and was also
254 * only tested with that. Just like the original sleep commands,
255 * also adjust the succession here to the wakeup_period below.
256 * The ranges are the same as for the sleep commands, 0-2, 3-9
257 * and >10, which is selected based on the DTIM interval for
258 * the sleep index but here we use the wakeup period since that
259 * is what we need to do for the latency requirements.
261 static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 };
262 static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 };
263 static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF };
264 const u8 *slp_succ = slp_succ_r0;
265 int i;
267 if (wakeup_period > IWL_DTIM_RANGE_0_MAX)
268 slp_succ = slp_succ_r1;
269 if (wakeup_period > IWL_DTIM_RANGE_1_MAX)
270 slp_succ = slp_succ_r2;
272 memset(cmd, 0, sizeof(*cmd));
274 cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK |
275 IWL_POWER_FAST_PD; /* no use seeing frames for others */
277 if (priv->power_data.pci_pm)
278 cmd->flags |= IWL_POWER_PCI_PM_MSK;
280 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
281 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
283 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
284 cmd->sleep_interval[i] =
285 cpu_to_le32(min_t(int, slp_succ[i], wakeup_period));
287 IWL_DEBUG_POWER(priv, "Automatic sleep command\n");
290 static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
292 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
293 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
294 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
295 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
296 IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
297 le32_to_cpu(cmd->sleep_interval[0]),
298 le32_to_cpu(cmd->sleep_interval[1]),
299 le32_to_cpu(cmd->sleep_interval[2]),
300 le32_to_cpu(cmd->sleep_interval[3]),
301 le32_to_cpu(cmd->sleep_interval[4]));
303 return iwl_send_cmd_pdu(priv, POWER_TABLE_CMD,
304 sizeof(struct iwl_powertable_cmd), cmd);
307 /* priv->mutex must be held */
308 int iwl_power_update_mode(struct iwl_priv *priv, bool force)
310 int ret = 0;
311 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
312 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
313 bool update_chains;
314 struct iwl_powertable_cmd cmd;
315 int dtimper;
317 /* Don't update the RX chain when chain noise calibration is running */
318 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
319 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
321 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
323 if (priv->cfg->broken_powersave)
324 iwl_power_sleep_cam_cmd(priv, &cmd);
325 else if (priv->cfg->supports_idle &&
326 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
327 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
328 else if (tt->state >= IWL_TI_1)
329 iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper);
330 else if (!enabled)
331 iwl_power_sleep_cam_cmd(priv, &cmd);
332 else if (priv->power_data.debug_sleep_level_override >= 0)
333 iwl_static_sleep_cmd(priv, &cmd,
334 priv->power_data.debug_sleep_level_override,
335 dtimper);
336 else if (no_sleep_autoadjust)
337 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_1, dtimper);
338 else
339 iwl_power_fill_sleep_cmd(priv, &cmd,
340 priv->hw->conf.dynamic_ps_timeout,
341 priv->hw->conf.max_sleep_period);
343 if (iwl_is_ready_rf(priv) &&
344 (memcmp(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)) || force)) {
345 if (cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
346 set_bit(STATUS_POWER_PMI, &priv->status);
348 ret = iwl_set_power(priv, &cmd);
349 if (!ret) {
350 if (!(cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
351 clear_bit(STATUS_POWER_PMI, &priv->status);
353 if (priv->cfg->ops->lib->update_chain_flags &&
354 update_chains)
355 priv->cfg->ops->lib->update_chain_flags(priv);
356 else if (priv->cfg->ops->lib->update_chain_flags)
357 IWL_DEBUG_POWER(priv,
358 "Cannot update the power, chain noise "
359 "calibration running: %d\n",
360 priv->chain_noise_data.state);
361 memcpy(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd));
362 } else
363 IWL_ERR(priv, "set power fail, ret = %d", ret);
366 return ret;
368 EXPORT_SYMBOL(iwl_power_update_mode);
370 bool iwl_ht_enabled(struct iwl_priv *priv)
372 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
373 struct iwl_tt_restriction *restriction;
375 if (!priv->thermal_throttle.advanced_tt)
376 return true;
377 restriction = tt->restriction + tt->state;
378 return restriction->is_ht;
380 EXPORT_SYMBOL(iwl_ht_enabled);
382 bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
384 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
385 bool within_margin = false;
387 if (priv->cfg->temperature_kelvin)
388 temp = KELVIN_TO_CELSIUS(priv->temperature);
390 if (!priv->thermal_throttle.advanced_tt)
391 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
392 CT_KILL_THRESHOLD_LEGACY) ? true : false;
393 else
394 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
395 CT_KILL_THRESHOLD) ? true : false;
396 return within_margin;
399 enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
401 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
402 struct iwl_tt_restriction *restriction;
404 if (!priv->thermal_throttle.advanced_tt)
405 return IWL_ANT_OK_MULTI;
406 restriction = tt->restriction + tt->state;
407 return restriction->tx_stream;
409 EXPORT_SYMBOL(iwl_tx_ant_restriction);
411 enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
413 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
414 struct iwl_tt_restriction *restriction;
416 if (!priv->thermal_throttle.advanced_tt)
417 return IWL_ANT_OK_MULTI;
418 restriction = tt->restriction + tt->state;
419 return restriction->rx_stream;
422 #define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
423 #define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
426 * toggle the bit to wake up uCode and check the temperature
427 * if the temperature is below CT, uCode will stay awake and send card
428 * state notification with CT_KILL bit clear to inform Thermal Throttling
429 * Management to change state. Otherwise, uCode will go back to sleep
430 * without doing anything, driver should continue the 5 seconds timer
431 * to wake up uCode for temperature check until temperature drop below CT
433 static void iwl_tt_check_exit_ct_kill(unsigned long data)
435 struct iwl_priv *priv = (struct iwl_priv *)data;
436 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
437 unsigned long flags;
439 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
440 return;
442 if (tt->state == IWL_TI_CT_KILL) {
443 if (priv->thermal_throttle.ct_kill_toggle) {
444 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
445 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
446 priv->thermal_throttle.ct_kill_toggle = false;
447 } else {
448 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
449 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
450 priv->thermal_throttle.ct_kill_toggle = true;
452 iwl_read32(priv, CSR_UCODE_DRV_GP1);
453 spin_lock_irqsave(&priv->reg_lock, flags);
454 if (!iwl_grab_nic_access(priv))
455 iwl_release_nic_access(priv);
456 spin_unlock_irqrestore(&priv->reg_lock, flags);
458 /* Reschedule the ct_kill timer to occur in
459 * CT_KILL_EXIT_DURATION seconds to ensure we get a
460 * thermal update */
461 IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
462 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
463 CT_KILL_EXIT_DURATION * HZ);
467 static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
468 bool stop)
470 if (stop) {
471 IWL_DEBUG_POWER(priv, "Stop all queues\n");
472 if (priv->mac80211_registered)
473 ieee80211_stop_queues(priv->hw);
474 IWL_DEBUG_POWER(priv,
475 "Schedule 5 seconds CT_KILL Timer\n");
476 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
477 CT_KILL_EXIT_DURATION * HZ);
478 } else {
479 IWL_DEBUG_POWER(priv, "Wake all queues\n");
480 if (priv->mac80211_registered)
481 ieee80211_wake_queues(priv->hw);
485 static void iwl_tt_ready_for_ct_kill(unsigned long data)
487 struct iwl_priv *priv = (struct iwl_priv *)data;
488 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
490 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
491 return;
493 /* temperature timer expired, ready to go into CT_KILL state */
494 if (tt->state != IWL_TI_CT_KILL) {
495 IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n");
496 tt->state = IWL_TI_CT_KILL;
497 set_bit(STATUS_CT_KILL, &priv->status);
498 iwl_perform_ct_kill_task(priv, true);
502 static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
504 IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
505 /* make request to retrieve statistics information */
506 iwl_send_statistics_request(priv, CMD_SYNC, false);
507 /* Reschedule the ct_kill wait timer */
508 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
509 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
512 #define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
513 #define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
514 #define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
517 * Legacy thermal throttling
518 * 1) Avoid NIC destruction due to high temperatures
519 * Chip will identify dangerously high temperatures that can
520 * harm the device and will power down
521 * 2) Avoid the NIC power down due to high temperature
522 * Throttle early enough to lower the power consumption before
523 * drastic steps are needed
525 static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
527 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
528 enum iwl_tt_state old_state;
530 #ifdef CONFIG_IWLWIFI_DEBUG
531 if ((tt->tt_previous_temp) &&
532 (temp > tt->tt_previous_temp) &&
533 ((temp - tt->tt_previous_temp) >
534 IWL_TT_INCREASE_MARGIN)) {
535 IWL_DEBUG_POWER(priv,
536 "Temperature increase %d degree Celsius\n",
537 (temp - tt->tt_previous_temp));
539 #endif
540 old_state = tt->state;
541 /* in Celsius */
542 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
543 tt->state = IWL_TI_CT_KILL;
544 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
545 tt->state = IWL_TI_2;
546 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
547 tt->state = IWL_TI_1;
548 else
549 tt->state = IWL_TI_0;
551 #ifdef CONFIG_IWLWIFI_DEBUG
552 tt->tt_previous_temp = temp;
553 #endif
554 /* stop ct_kill_waiting_tm timer */
555 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
556 if (tt->state != old_state) {
557 switch (tt->state) {
558 case IWL_TI_0:
560 * When the system is ready to go back to IWL_TI_0
561 * we only have to call iwl_power_update_mode() to
562 * do so.
564 break;
565 case IWL_TI_1:
566 tt->tt_power_mode = IWL_POWER_INDEX_3;
567 break;
568 case IWL_TI_2:
569 tt->tt_power_mode = IWL_POWER_INDEX_4;
570 break;
571 default:
572 tt->tt_power_mode = IWL_POWER_INDEX_5;
573 break;
575 mutex_lock(&priv->mutex);
576 if (old_state == IWL_TI_CT_KILL)
577 clear_bit(STATUS_CT_KILL, &priv->status);
578 if (tt->state != IWL_TI_CT_KILL &&
579 iwl_power_update_mode(priv, true)) {
580 /* TT state not updated
581 * try again during next temperature read
583 if (old_state == IWL_TI_CT_KILL)
584 set_bit(STATUS_CT_KILL, &priv->status);
585 tt->state = old_state;
586 IWL_ERR(priv, "Cannot update power mode, "
587 "TT state not updated\n");
588 } else {
589 if (tt->state == IWL_TI_CT_KILL) {
590 if (force) {
591 set_bit(STATUS_CT_KILL, &priv->status);
592 iwl_perform_ct_kill_task(priv, true);
593 } else {
594 iwl_prepare_ct_kill_task(priv);
595 tt->state = old_state;
597 } else if (old_state == IWL_TI_CT_KILL &&
598 tt->state != IWL_TI_CT_KILL)
599 iwl_perform_ct_kill_task(priv, false);
600 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
601 tt->state);
602 IWL_DEBUG_POWER(priv, "Power Index change to %u\n",
603 tt->tt_power_mode);
605 mutex_unlock(&priv->mutex);
610 * Advance thermal throttling
611 * 1) Avoid NIC destruction due to high temperatures
612 * Chip will identify dangerously high temperatures that can
613 * harm the device and will power down
614 * 2) Avoid the NIC power down due to high temperature
615 * Throttle early enough to lower the power consumption before
616 * drastic steps are needed
617 * Actions include relaxing the power down sleep thresholds and
618 * decreasing the number of TX streams
619 * 3) Avoid throughput performance impact as much as possible
621 *=============================================================================
622 * Condition Nxt State Condition Nxt State Condition Nxt State
623 *-----------------------------------------------------------------------------
624 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
625 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
626 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
627 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
628 *=============================================================================
630 static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
632 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
633 int i;
634 bool changed = false;
635 enum iwl_tt_state old_state;
636 struct iwl_tt_trans *transaction;
638 old_state = tt->state;
639 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
640 /* based on the current TT state,
641 * find the curresponding transaction table
642 * each table has (IWL_TI_STATE_MAX - 1) entries
643 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
644 * will advance to the correct table.
645 * then based on the current temperature
646 * find the next state need to transaction to
647 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
648 * in the current table to see if transaction is needed
650 transaction = tt->transaction +
651 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
652 if (temp >= transaction->tt_low &&
653 temp <= transaction->tt_high) {
654 #ifdef CONFIG_IWLWIFI_DEBUG
655 if ((tt->tt_previous_temp) &&
656 (temp > tt->tt_previous_temp) &&
657 ((temp - tt->tt_previous_temp) >
658 IWL_TT_INCREASE_MARGIN)) {
659 IWL_DEBUG_POWER(priv,
660 "Temperature increase %d "
661 "degree Celsius\n",
662 (temp - tt->tt_previous_temp));
664 tt->tt_previous_temp = temp;
665 #endif
666 if (old_state !=
667 transaction->next_state) {
668 changed = true;
669 tt->state =
670 transaction->next_state;
672 break;
675 /* stop ct_kill_waiting_tm timer */
676 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
677 if (changed) {
678 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
680 if (tt->state >= IWL_TI_1) {
681 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
682 tt->tt_power_mode = IWL_POWER_INDEX_5;
683 if (!iwl_ht_enabled(priv))
684 /* disable HT */
685 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
686 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
687 RXON_FLG_HT40_PROT_MSK |
688 RXON_FLG_HT_PROT_MSK);
689 else {
690 /* check HT capability and set
691 * according to the system HT capability
692 * in case get disabled before */
693 iwl_set_rxon_ht(priv, &priv->current_ht_config);
696 } else {
698 * restore system power setting -- it will be
699 * recalculated automatically.
702 /* check HT capability and set
703 * according to the system HT capability
704 * in case get disabled before */
705 iwl_set_rxon_ht(priv, &priv->current_ht_config);
707 mutex_lock(&priv->mutex);
708 if (old_state == IWL_TI_CT_KILL)
709 clear_bit(STATUS_CT_KILL, &priv->status);
710 if (tt->state != IWL_TI_CT_KILL &&
711 iwl_power_update_mode(priv, true)) {
712 /* TT state not updated
713 * try again during next temperature read
715 IWL_ERR(priv, "Cannot update power mode, "
716 "TT state not updated\n");
717 if (old_state == IWL_TI_CT_KILL)
718 set_bit(STATUS_CT_KILL, &priv->status);
719 tt->state = old_state;
720 } else {
721 IWL_DEBUG_POWER(priv,
722 "Thermal Throttling to new state: %u\n",
723 tt->state);
724 if (old_state != IWL_TI_CT_KILL &&
725 tt->state == IWL_TI_CT_KILL) {
726 if (force) {
727 IWL_DEBUG_POWER(priv,
728 "Enter IWL_TI_CT_KILL\n");
729 set_bit(STATUS_CT_KILL, &priv->status);
730 iwl_perform_ct_kill_task(priv, true);
731 } else {
732 iwl_prepare_ct_kill_task(priv);
733 tt->state = old_state;
735 } else if (old_state == IWL_TI_CT_KILL &&
736 tt->state != IWL_TI_CT_KILL) {
737 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
738 iwl_perform_ct_kill_task(priv, false);
741 mutex_unlock(&priv->mutex);
745 /* Card State Notification indicated reach critical temperature
746 * if PSP not enable, no Thermal Throttling function will be performed
747 * just set the GP1 bit to acknowledge the event
748 * otherwise, go into IWL_TI_CT_KILL state
749 * since Card State Notification will not provide any temperature reading
750 * for Legacy mode
751 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
752 * for advance mode
753 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
755 static void iwl_bg_ct_enter(struct work_struct *work)
757 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
758 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
760 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
761 return;
763 if (!iwl_is_ready(priv))
764 return;
766 if (tt->state != IWL_TI_CT_KILL) {
767 IWL_ERR(priv, "Device reached critical temperature "
768 "- ucode going to sleep!\n");
769 if (!priv->thermal_throttle.advanced_tt)
770 iwl_legacy_tt_handler(priv,
771 IWL_MINIMAL_POWER_THRESHOLD,
772 true);
773 else
774 iwl_advance_tt_handler(priv,
775 CT_KILL_THRESHOLD + 1, true);
779 /* Card State Notification indicated out of critical temperature
780 * since Card State Notification will not provide any temperature reading
781 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
782 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
784 static void iwl_bg_ct_exit(struct work_struct *work)
786 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
787 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
789 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
790 return;
792 if (!iwl_is_ready(priv))
793 return;
795 /* stop ct_kill_exit_tm timer */
796 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
798 if (tt->state == IWL_TI_CT_KILL) {
799 IWL_ERR(priv,
800 "Device temperature below critical"
801 "- ucode awake!\n");
803 * exit from CT_KILL state
804 * reset the current temperature reading
806 priv->temperature = 0;
807 if (!priv->thermal_throttle.advanced_tt)
808 iwl_legacy_tt_handler(priv,
809 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
810 true);
811 else
812 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
813 true);
817 void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
819 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
820 return;
822 IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
823 queue_work(priv->workqueue, &priv->ct_enter);
825 EXPORT_SYMBOL(iwl_tt_enter_ct_kill);
827 void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
829 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
830 return;
832 IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
833 queue_work(priv->workqueue, &priv->ct_exit);
835 EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
837 static void iwl_bg_tt_work(struct work_struct *work)
839 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
840 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
842 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
843 return;
845 if (priv->cfg->temperature_kelvin)
846 temp = KELVIN_TO_CELSIUS(priv->temperature);
848 if (!priv->thermal_throttle.advanced_tt)
849 iwl_legacy_tt_handler(priv, temp, false);
850 else
851 iwl_advance_tt_handler(priv, temp, false);
854 void iwl_tt_handler(struct iwl_priv *priv)
856 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
857 return;
859 IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
860 queue_work(priv->workqueue, &priv->tt_work);
862 EXPORT_SYMBOL(iwl_tt_handler);
864 /* Thermal throttling initialization
865 * For advance thermal throttling:
866 * Initialize Thermal Index and temperature threshold table
867 * Initialize thermal throttling restriction table
869 void iwl_tt_initialize(struct iwl_priv *priv)
871 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
872 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
873 struct iwl_tt_trans *transaction;
875 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
877 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
879 tt->state = IWL_TI_0;
880 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
881 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
882 priv->thermal_throttle.ct_kill_exit_tm.function =
883 iwl_tt_check_exit_ct_kill;
884 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
885 priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv;
886 priv->thermal_throttle.ct_kill_waiting_tm.function =
887 iwl_tt_ready_for_ct_kill;
888 /* setup deferred ct kill work */
889 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
890 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
891 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
893 if (priv->cfg->adv_thermal_throttle) {
894 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
895 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
896 IWL_TI_STATE_MAX, GFP_KERNEL);
897 tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
898 IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
899 GFP_KERNEL);
900 if (!tt->restriction || !tt->transaction) {
901 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
902 priv->thermal_throttle.advanced_tt = false;
903 kfree(tt->restriction);
904 tt->restriction = NULL;
905 kfree(tt->transaction);
906 tt->transaction = NULL;
907 } else {
908 transaction = tt->transaction +
909 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
910 memcpy(transaction, &tt_range_0[0], size);
911 transaction = tt->transaction +
912 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
913 memcpy(transaction, &tt_range_1[0], size);
914 transaction = tt->transaction +
915 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
916 memcpy(transaction, &tt_range_2[0], size);
917 transaction = tt->transaction +
918 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
919 memcpy(transaction, &tt_range_3[0], size);
920 size = sizeof(struct iwl_tt_restriction) *
921 IWL_TI_STATE_MAX;
922 memcpy(tt->restriction,
923 &restriction_range[0], size);
924 priv->thermal_throttle.advanced_tt = true;
926 } else {
927 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
928 priv->thermal_throttle.advanced_tt = false;
931 EXPORT_SYMBOL(iwl_tt_initialize);
933 /* cleanup thermal throttling management related memory and timer */
934 void iwl_tt_exit(struct iwl_priv *priv)
936 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
938 /* stop ct_kill_exit_tm timer if activated */
939 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
940 /* stop ct_kill_waiting_tm timer if activated */
941 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
942 cancel_work_sync(&priv->tt_work);
943 cancel_work_sync(&priv->ct_enter);
944 cancel_work_sync(&priv->ct_exit);
946 if (priv->thermal_throttle.advanced_tt) {
947 /* free advance thermal throttling memory */
948 kfree(tt->restriction);
949 tt->restriction = NULL;
950 kfree(tt->transaction);
951 tt->transaction = NULL;
954 EXPORT_SYMBOL(iwl_tt_exit);
956 /* initialize to default */
957 void iwl_power_initialize(struct iwl_priv *priv)
959 u16 lctl = iwl_pcie_link_ctl(priv);
961 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
963 priv->power_data.debug_sleep_level_override = -1;
965 memset(&priv->power_data.sleep_cmd, 0,
966 sizeof(priv->power_data.sleep_cmd));
968 EXPORT_SYMBOL(iwl_power_initialize);