Blackfin: bf537-stamp: declare SPI IRQ resources
[linux/fpc-iii.git] / drivers / idle / i7300_idle.c
blob949c97ff57e35bec6917c4aab4aca2ebaa165c32
1 /*
2 * (C) Copyright 2008 Intel Corporation
3 * Authors:
4 * Andy Henroid <andrew.d.henroid@intel.com>
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 */
8 /*
9 * Save DIMM power on Intel 7300-based platforms when all CPUs/cores
10 * are idle, using the DIMM thermal throttling capability.
12 * This driver depends on the Intel integrated DMA controller (I/O AT).
13 * If the driver for I/O AT (drivers/dma/ioatdma*) is also enabled,
14 * this driver should work cooperatively.
17 /* #define DEBUG */
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/sched.h>
22 #include <linux/notifier.h>
23 #include <linux/cpumask.h>
24 #include <linux/ktime.h>
25 #include <linux/delay.h>
26 #include <linux/debugfs.h>
27 #include <linux/stop_machine.h>
28 #include <linux/i7300_idle.h>
30 #include <asm/idle.h>
32 #include "../dma/ioatdma_hw.h"
33 #include "../dma/ioatdma_registers.h"
35 #define I7300_IDLE_DRIVER_VERSION "1.55"
36 #define I7300_PRINT "i7300_idle:"
38 #define MAX_STOP_RETRIES 10
40 static int debug;
41 module_param_named(debug, debug, uint, 0644);
42 MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
44 static int forceload;
45 module_param_named(forceload, forceload, uint, 0644);
46 MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000");
48 #define dprintk(fmt, arg...) \
49 do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
52 * Value to set THRTLOW to when initiating throttling
53 * 0 = No throttling
54 * 1 = Throttle when > 4 activations per eval window (Maximum throttling)
55 * 2 = Throttle when > 8 activations
56 * 168 = Throttle when > 672 activations (Minimum throttling)
58 #define MAX_THROTTLE_LOW_LIMIT 168
59 static uint throttle_low_limit = 1;
60 module_param_named(throttle_low_limit, throttle_low_limit, uint, 0644);
61 MODULE_PARM_DESC(throttle_low_limit,
62 "Value for THRTLOWLM activation field "
63 "(0 = disable throttle, 1 = Max throttle, 168 = Min throttle)");
66 * simple invocation and duration statistics
68 static unsigned long total_starts;
69 static unsigned long total_us;
71 #ifdef DEBUG
72 static unsigned long past_skip;
73 #endif
75 static struct pci_dev *fbd_dev;
77 static spinlock_t i7300_idle_lock;
78 static int i7300_idle_active;
80 static u8 i7300_idle_thrtctl_saved;
81 static u8 i7300_idle_thrtlow_saved;
82 static u32 i7300_idle_mc_saved;
84 static cpumask_t idle_cpumask;
85 static ktime_t start_ktime;
86 static unsigned long avg_idle_us;
88 static struct dentry *debugfs_dir;
90 /* Begin: I/O AT Helper routines */
92 #define IOAT_CHANBASE(ioat_ctl, chan) (ioat_ctl + 0x80 + 0x80 * chan)
93 /* Snoop control (disable snoops when coherency is not important) */
94 #define IOAT_DESC_SADDR_SNP_CTL (1UL << 1)
95 #define IOAT_DESC_DADDR_SNP_CTL (1UL << 2)
97 static struct pci_dev *ioat_dev;
98 static struct ioat_dma_descriptor *ioat_desc; /* I/O AT desc & data (1 page) */
99 static unsigned long ioat_desc_phys;
100 static u8 *ioat_iomap; /* I/O AT memory-mapped control regs (aka CB_BAR) */
101 static u8 *ioat_chanbase;
103 /* Start I/O AT memory copy */
104 static int i7300_idle_ioat_start(void)
106 u32 err;
107 /* Clear error (due to circular descriptor pointer) */
108 err = readl(ioat_chanbase + IOAT_CHANERR_OFFSET);
109 if (err)
110 writel(err, ioat_chanbase + IOAT_CHANERR_OFFSET);
112 writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
113 return 0;
116 /* Stop I/O AT memory copy */
117 static void i7300_idle_ioat_stop(void)
119 int i;
120 u64 sts;
122 for (i = 0; i < MAX_STOP_RETRIES; i++) {
123 writeb(IOAT_CHANCMD_RESET,
124 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
126 udelay(10);
128 sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
129 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
131 if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE)
132 break;
136 if (i == MAX_STOP_RETRIES) {
137 dprintk("failed to stop I/O AT after %d retries\n",
138 MAX_STOP_RETRIES);
142 /* Test I/O AT by copying 1024 byte from 2k to 1k */
143 static int __init i7300_idle_ioat_selftest(u8 *ctl,
144 struct ioat_dma_descriptor *desc, unsigned long desc_phys)
146 u64 chan_sts;
148 memset(desc, 0, 2048);
149 memset((u8 *) desc + 2048, 0xab, 1024);
151 desc[0].size = 1024;
152 desc[0].ctl = 0;
153 desc[0].src_addr = desc_phys + 2048;
154 desc[0].dst_addr = desc_phys + 1024;
155 desc[0].next = 0;
157 writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
158 writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
160 udelay(1000);
162 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
163 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
165 if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) {
166 /* Not complete, reset the channel */
167 writeb(IOAT_CHANCMD_RESET,
168 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
169 return -1;
172 if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab ||
173 *(u32 *) ((u8 *) desc + 2044) != 0xabababab) {
174 dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n",
175 *(u32 *) ((u8 *) desc + 2048),
176 *(u32 *) ((u8 *) desc + 1024),
177 *(u32 *) ((u8 *) desc + 3072));
178 return -1;
180 return 0;
183 static struct device dummy_dma_dev = {
184 .init_name = "fallback device",
185 .coherent_dma_mask = DMA_BIT_MASK(64),
186 .dma_mask = &dummy_dma_dev.coherent_dma_mask,
189 /* Setup and initialize I/O AT */
190 /* This driver needs I/O AT as the throttling takes effect only when there is
191 * some memory activity. We use I/O AT to set up a dummy copy, while all CPUs
192 * go idle and memory is throttled.
194 static int __init i7300_idle_ioat_init(void)
196 u8 ver, chan_count, ioat_chan;
197 u16 chan_ctl;
199 ioat_iomap = (u8 *) ioremap_nocache(pci_resource_start(ioat_dev, 0),
200 pci_resource_len(ioat_dev, 0));
202 if (!ioat_iomap) {
203 printk(KERN_ERR I7300_PRINT "failed to map I/O AT registers\n");
204 goto err_ret;
207 ver = readb(ioat_iomap + IOAT_VER_OFFSET);
208 if (ver != IOAT_VER_1_2) {
209 printk(KERN_ERR I7300_PRINT "unknown I/O AT version (%u.%u)\n",
210 ver >> 4, ver & 0xf);
211 goto err_unmap;
214 chan_count = readb(ioat_iomap + IOAT_CHANCNT_OFFSET);
215 if (!chan_count) {
216 printk(KERN_ERR I7300_PRINT "unexpected # of I/O AT channels "
217 "(%u)\n",
218 chan_count);
219 goto err_unmap;
222 ioat_chan = chan_count - 1;
223 ioat_chanbase = IOAT_CHANBASE(ioat_iomap, ioat_chan);
225 chan_ctl = readw(ioat_chanbase + IOAT_CHANCTRL_OFFSET);
226 if (chan_ctl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
227 printk(KERN_ERR I7300_PRINT "channel %d in use\n", ioat_chan);
228 goto err_unmap;
231 writew(IOAT_CHANCTRL_CHANNEL_IN_USE,
232 ioat_chanbase + IOAT_CHANCTRL_OFFSET);
234 ioat_desc = (struct ioat_dma_descriptor *)dma_alloc_coherent(
235 &dummy_dma_dev, 4096,
236 (dma_addr_t *)&ioat_desc_phys, GFP_KERNEL);
237 if (!ioat_desc) {
238 printk(KERN_ERR I7300_PRINT "failed to allocate I/O AT desc\n");
239 goto err_mark_unused;
242 writel(ioat_desc_phys & 0xffffffffUL,
243 ioat_chanbase + IOAT1_CHAINADDR_OFFSET_LOW);
244 writel(ioat_desc_phys >> 32,
245 ioat_chanbase + IOAT1_CHAINADDR_OFFSET_HIGH);
247 if (i7300_idle_ioat_selftest(ioat_iomap, ioat_desc, ioat_desc_phys)) {
248 printk(KERN_ERR I7300_PRINT "I/O AT self-test failed\n");
249 goto err_free;
252 /* Setup circular I/O AT descriptor chain */
253 ioat_desc[0].ctl = IOAT_DESC_SADDR_SNP_CTL | IOAT_DESC_DADDR_SNP_CTL;
254 ioat_desc[0].src_addr = ioat_desc_phys + 2048;
255 ioat_desc[0].dst_addr = ioat_desc_phys + 3072;
256 ioat_desc[0].size = 128;
257 ioat_desc[0].next = ioat_desc_phys + sizeof(struct ioat_dma_descriptor);
259 ioat_desc[1].ctl = ioat_desc[0].ctl;
260 ioat_desc[1].src_addr = ioat_desc[0].src_addr;
261 ioat_desc[1].dst_addr = ioat_desc[0].dst_addr;
262 ioat_desc[1].size = ioat_desc[0].size;
263 ioat_desc[1].next = ioat_desc_phys;
265 return 0;
267 err_free:
268 dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
269 err_mark_unused:
270 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
271 err_unmap:
272 iounmap(ioat_iomap);
273 err_ret:
274 return -ENODEV;
277 /* Cleanup I/O AT */
278 static void __exit i7300_idle_ioat_exit(void)
280 int i;
281 u64 chan_sts;
283 i7300_idle_ioat_stop();
285 /* Wait for a while for the channel to halt before releasing */
286 for (i = 0; i < MAX_STOP_RETRIES; i++) {
287 writeb(IOAT_CHANCMD_RESET,
288 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
290 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
291 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
293 if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
294 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
295 break;
297 udelay(1000);
300 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
301 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
304 * We tried to reset multiple times. If IO A/T channel is still active
305 * flag an error and return without cleanup. Memory leak is better
306 * than random corruption in that extreme error situation.
308 if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
309 printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
310 " Not freeing resources\n");
311 return;
314 dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
315 iounmap(ioat_iomap);
318 /* End: I/O AT Helper routines */
320 #define DIMM_THRTLOW 0x64
321 #define DIMM_THRTCTL 0x67
322 #define DIMM_THRTCTL_THRMHUNT (1UL << 0)
323 #define DIMM_MC 0x40
324 #define DIMM_GTW_MODE (1UL << 17)
325 #define DIMM_GBLACT 0x60
328 * Keep track of an exponential-decaying average of recent idle durations.
329 * The latest duration gets DURATION_WEIGHT_PCT percentage weight
330 * in this average, with the old average getting the remaining weight.
332 * High weights emphasize recent history, low weights include long history.
334 #define DURATION_WEIGHT_PCT 55
337 * When the decaying average of recent durations or the predicted duration
338 * of the next timer interrupt is shorter than duration_threshold, the
339 * driver will decline to throttle.
341 #define DURATION_THRESHOLD_US 100
344 /* Store DIMM thermal throttle configuration */
345 static int i7300_idle_thrt_save(void)
347 u32 new_mc_val;
348 u8 gblactlm;
350 pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &i7300_idle_thrtctl_saved);
351 pci_read_config_byte(fbd_dev, DIMM_THRTLOW, &i7300_idle_thrtlow_saved);
352 pci_read_config_dword(fbd_dev, DIMM_MC, &i7300_idle_mc_saved);
354 * Make sure we have Global Throttling Window Mode set to have a
355 * "short" window. This (mostly) works around an issue where
356 * throttling persists until the end of the global throttling window
357 * size. On the tested system, this was resulting in a maximum of
358 * 64 ms to exit throttling (average 32 ms). The actual numbers
359 * depends on system frequencies. Setting the short window reduces
360 * this by a factor of 4096.
362 * We will only do this only if the system is set for
363 * unlimited-activations while in open-loop throttling (i.e., when
364 * Global Activation Throttle Limit is zero).
366 pci_read_config_byte(fbd_dev, DIMM_GBLACT, &gblactlm);
367 dprintk("thrtctl_saved = 0x%02x, thrtlow_saved = 0x%02x\n",
368 i7300_idle_thrtctl_saved,
369 i7300_idle_thrtlow_saved);
370 dprintk("mc_saved = 0x%08x, gblactlm = 0x%02x\n",
371 i7300_idle_mc_saved,
372 gblactlm);
373 if (gblactlm == 0) {
374 new_mc_val = i7300_idle_mc_saved | DIMM_GTW_MODE;
375 pci_write_config_dword(fbd_dev, DIMM_MC, new_mc_val);
376 return 0;
377 } else {
378 dprintk("could not set GTW_MODE = 1 (OLTT enabled)\n");
379 return -ENODEV;
383 /* Restore DIMM thermal throttle configuration */
384 static void i7300_idle_thrt_restore(void)
386 pci_write_config_dword(fbd_dev, DIMM_MC, i7300_idle_mc_saved);
387 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
388 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
391 /* Enable DIMM thermal throttling */
392 static void i7300_idle_start(void)
394 u8 new_ctl;
395 u8 limit;
397 new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
398 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
400 limit = throttle_low_limit;
401 if (unlikely(limit > MAX_THROTTLE_LOW_LIMIT))
402 limit = MAX_THROTTLE_LOW_LIMIT;
404 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, limit);
406 new_ctl = i7300_idle_thrtctl_saved | DIMM_THRTCTL_THRMHUNT;
407 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
410 /* Disable DIMM thermal throttling */
411 static void i7300_idle_stop(void)
413 u8 new_ctl;
414 u8 got_ctl;
416 new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
417 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
419 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
420 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
421 pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &got_ctl);
422 WARN_ON_ONCE(got_ctl != i7300_idle_thrtctl_saved);
427 * i7300_avg_duration_check()
428 * return 0 if the decaying average of recent idle durations is
429 * more than DURATION_THRESHOLD_US
431 static int i7300_avg_duration_check(void)
433 if (avg_idle_us >= DURATION_THRESHOLD_US)
434 return 0;
436 #ifdef DEBUG
437 past_skip++;
438 #endif
439 return 1;
442 /* Idle notifier to look at idle CPUs */
443 static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
444 void *data)
446 unsigned long flags;
447 ktime_t now_ktime;
448 static ktime_t idle_begin_time;
449 static int time_init = 1;
451 if (!throttle_low_limit)
452 return 0;
454 if (unlikely(time_init)) {
455 time_init = 0;
456 idle_begin_time = ktime_get();
459 spin_lock_irqsave(&i7300_idle_lock, flags);
460 if (val == IDLE_START) {
462 cpu_set(smp_processor_id(), idle_cpumask);
464 if (cpus_weight(idle_cpumask) != num_online_cpus())
465 goto end;
467 now_ktime = ktime_get();
468 idle_begin_time = now_ktime;
470 if (i7300_avg_duration_check())
471 goto end;
473 i7300_idle_active = 1;
474 total_starts++;
475 start_ktime = now_ktime;
477 i7300_idle_start();
478 i7300_idle_ioat_start();
480 } else if (val == IDLE_END) {
481 cpu_clear(smp_processor_id(), idle_cpumask);
482 if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
483 /* First CPU coming out of idle */
484 u64 idle_duration_us;
486 now_ktime = ktime_get();
488 idle_duration_us = ktime_to_us(ktime_sub
489 (now_ktime, idle_begin_time));
491 avg_idle_us =
492 ((100 - DURATION_WEIGHT_PCT) * avg_idle_us +
493 DURATION_WEIGHT_PCT * idle_duration_us) / 100;
495 if (i7300_idle_active) {
496 ktime_t idle_ktime;
498 idle_ktime = ktime_sub(now_ktime, start_ktime);
499 total_us += ktime_to_us(idle_ktime);
501 i7300_idle_ioat_stop();
502 i7300_idle_stop();
503 i7300_idle_active = 0;
507 end:
508 spin_unlock_irqrestore(&i7300_idle_lock, flags);
509 return 0;
512 static struct notifier_block i7300_idle_nb = {
513 .notifier_call = i7300_idle_notifier,
516 MODULE_DEVICE_TABLE(pci, pci_tbl);
518 int stats_open_generic(struct inode *inode, struct file *fp)
520 fp->private_data = inode->i_private;
521 return 0;
524 static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count,
525 loff_t *off)
527 unsigned long *p = fp->private_data;
528 char buf[32];
529 int len;
531 len = snprintf(buf, 32, "%lu\n", *p);
532 return simple_read_from_buffer(ubuf, count, off, buf, len);
535 static const struct file_operations idle_fops = {
536 .open = stats_open_generic,
537 .read = stats_read_ul,
540 struct debugfs_file_info {
541 void *ptr;
542 char name[32];
543 struct dentry *file;
544 } debugfs_file_list[] = {
545 {&total_starts, "total_starts", NULL},
546 {&total_us, "total_us", NULL},
547 #ifdef DEBUG
548 {&past_skip, "past_skip", NULL},
549 #endif
550 {NULL, "", NULL}
553 static int __init i7300_idle_init(void)
555 spin_lock_init(&i7300_idle_lock);
556 cpus_clear(idle_cpumask);
557 total_us = 0;
559 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
560 return -ENODEV;
562 if (i7300_idle_thrt_save())
563 return -ENODEV;
565 if (i7300_idle_ioat_init())
566 return -ENODEV;
568 debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
569 if (debugfs_dir) {
570 int i = 0;
572 while (debugfs_file_list[i].ptr != NULL) {
573 debugfs_file_list[i].file = debugfs_create_file(
574 debugfs_file_list[i].name,
575 S_IRUSR,
576 debugfs_dir,
577 debugfs_file_list[i].ptr,
578 &idle_fops);
579 i++;
583 idle_notifier_register(&i7300_idle_nb);
585 printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION);
586 return 0;
589 static void __exit i7300_idle_exit(void)
591 idle_notifier_unregister(&i7300_idle_nb);
593 if (debugfs_dir) {
594 int i = 0;
596 while (debugfs_file_list[i].file != NULL) {
597 debugfs_remove(debugfs_file_list[i].file);
598 i++;
601 debugfs_remove(debugfs_dir);
603 i7300_idle_thrt_restore();
604 i7300_idle_ioat_exit();
607 module_init(i7300_idle_init);
608 module_exit(i7300_idle_exit);
610 MODULE_AUTHOR("Andy Henroid <andrew.d.henroid@intel.com>");
611 MODULE_DESCRIPTION("Intel Chipset DIMM Idle Power Saving Driver v"
612 I7300_IDLE_DRIVER_VERSION);
613 MODULE_LICENSE("GPL");