1 // SPDX-License-Identifier: GPL-2.0-only
3 * coretemp.c - Linux kernel module for hardware monitoring
5 * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
7 * Inspired from many hwmon drivers
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/jiffies.h>
16 #include <linux/hwmon.h>
17 #include <linux/sysfs.h>
18 #include <linux/hwmon-sysfs.h>
19 #include <linux/err.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpu.h>
24 #include <linux/smp.h>
25 #include <linux/moduleparam.h>
26 #include <linux/pci.h>
28 #include <asm/processor.h>
29 #include <asm/cpu_device_id.h>
30 #include <linux/sched/isolation.h>
32 #define DRVNAME "coretemp"
35 * force_tjmax only matters when TjMax can't be read from the CPU itself.
36 * When set, it replaces the driver's suboptimal heuristic.
38 static int force_tjmax
;
39 module_param_named(tjmax
, force_tjmax
, int, 0444);
40 MODULE_PARM_DESC(tjmax
, "TjMax value in degrees Celsius");
42 #define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
43 #define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
45 enum coretemp_attr_index
{
51 MAX_CORE_ATTRS
= ATTR_TJMAX
+ 1, /* Maximum no of basic attrs */
52 TOTAL_ATTRS
= ATTR_TTARGET
+ 1 /* Maximum no of possible attrs */
56 #define for_each_sibling(i, cpu) \
57 for_each_cpu(i, topology_sibling_cpumask(cpu))
59 #define for_each_sibling(i, cpu) for (i = 0; false; )
63 * Per-Core Temperature Data
64 * @tjmax: The static tjmax value when tjmax cannot be retrieved from
65 * IA32_TEMPERATURE_TARGET MSR.
66 * @last_updated: The time when the current temperature value was updated
67 * earlier (in jiffies).
68 * @cpu_core_id: The CPU Core from which temperature values should be read
69 * This value is passed as "id" field to rdmsr/wrmsr functions.
70 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
71 * from where the temperature values should be read.
72 * @attr_size: Total number of pre-core attrs displayed in the sysfs.
77 unsigned long last_updated
;
83 struct device_attribute sd_attrs
[TOTAL_ATTRS
];
84 char attr_name
[TOTAL_ATTRS
][CORETEMP_NAME_LENGTH
];
85 struct attribute
*attrs
[TOTAL_ATTRS
+ 1];
86 struct attribute_group attr_group
;
87 struct mutex update_lock
;
90 /* Platform Data per Physical CPU */
91 struct platform_data
{
92 struct device
*hwmon_dev
;
96 struct cpumask cpumask
;
97 struct temp_data
*pkg_data
;
98 struct temp_data
**core_data
;
99 struct device_attribute name_attr
;
107 static const struct tjmax_pci tjmax_pci_table
[] = {
108 { 0x0708, 110000 }, /* CE41x0 (Sodaville ) */
109 { 0x0c72, 102000 }, /* Atom S1240 (Centerton) */
110 { 0x0c73, 95000 }, /* Atom S1220 (Centerton) */
111 { 0x0c75, 95000 }, /* Atom S1260 (Centerton) */
119 static const struct tjmax tjmax_table
[] = {
120 { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
121 { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
132 static const struct tjmax_model tjmax_model_table
[] = {
133 { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
134 { 0x1c, ANY
, 90000 }, /* Z5xx, N2xx, possibly others
135 * Note: Also matches 230 and 330,
136 * which are covered by tjmax_table
138 { 0x26, ANY
, 90000 }, /* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
139 * Note: TjMax for E6xxT is 110C, but CPU type
140 * is undetectable by software
142 { 0x27, ANY
, 90000 }, /* Atom Medfield (Z2460) */
143 { 0x35, ANY
, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */
144 { 0x36, ANY
, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
145 * Also matches S12x0 (stepping 9), covered by
150 static bool is_pkg_temp_data(struct temp_data
*tdata
)
152 return tdata
->index
< 0;
155 static int adjust_tjmax(struct cpuinfo_x86
*c
, u32 id
, struct device
*dev
)
157 /* The 100C is default for both mobile and non mobile CPUs */
160 int tjmax_ee
= 85000;
165 u16 devfn
= PCI_DEVFN(0, 0);
166 struct pci_dev
*host_bridge
= pci_get_domain_bus_and_slot(0, 0, devfn
);
169 * Explicit tjmax table entries override heuristics.
170 * First try PCI host bridge IDs, followed by model ID strings
171 * and model/stepping information.
173 if (host_bridge
&& host_bridge
->vendor
== PCI_VENDOR_ID_INTEL
) {
174 for (i
= 0; i
< ARRAY_SIZE(tjmax_pci_table
); i
++) {
175 if (host_bridge
->device
== tjmax_pci_table
[i
].device
) {
176 pci_dev_put(host_bridge
);
177 return tjmax_pci_table
[i
].tjmax
;
181 pci_dev_put(host_bridge
);
183 for (i
= 0; i
< ARRAY_SIZE(tjmax_table
); i
++) {
184 if (strstr(c
->x86_model_id
, tjmax_table
[i
].id
))
185 return tjmax_table
[i
].tjmax
;
188 for (i
= 0; i
< ARRAY_SIZE(tjmax_model_table
); i
++) {
189 const struct tjmax_model
*tm
= &tjmax_model_table
[i
];
190 if (c
->x86_model
== tm
->model
&&
191 (tm
->mask
== ANY
|| c
->x86_stepping
== tm
->mask
))
195 /* Early chips have no MSR for TjMax */
197 if (c
->x86_model
== 0xf && c
->x86_stepping
< 4)
200 if (c
->x86_model
> 0xe && usemsr_ee
) {
204 * Now we can detect the mobile CPU using Intel provided table
205 * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
206 * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
208 err
= rdmsr_safe_on_cpu(id
, 0x17, &eax
, &edx
);
211 "Unable to access MSR 0x17, assuming desktop"
214 } else if (c
->x86_model
< 0x17 && !(eax
& 0x10000000)) {
216 * Trust bit 28 up to Penryn, I could not find any
217 * documentation on that; if you happen to know
218 * someone at Intel please ask
222 /* Platform ID bits 52:50 (EDX starts at bit 32) */
223 platform_id
= (edx
>> 18) & 0x7;
226 * Mobile Penryn CPU seems to be platform ID 7 or 5
229 if (c
->x86_model
== 0x17 &&
230 (platform_id
== 5 || platform_id
== 7)) {
232 * If MSR EE bit is set, set it to 90 degrees C,
233 * otherwise 105 degrees C
242 err
= rdmsr_safe_on_cpu(id
, 0xee, &eax
, &edx
);
245 "Unable to access MSR 0xEE, for Tjmax, left"
247 } else if (eax
& 0x40000000) {
250 } else if (tjmax
== 100000) {
252 * If we don't use msr EE it means we are desktop CPU
253 * (with exeception of Atom)
255 dev_warn(dev
, "Using relative temperature scale!\n");
261 static bool cpu_has_tjmax(struct cpuinfo_x86
*c
)
263 u8 model
= c
->x86_model
;
265 return model
> 0xe &&
273 static int get_tjmax(struct temp_data
*tdata
, struct device
*dev
)
275 struct cpuinfo_x86
*c
= &cpu_data(tdata
->cpu
);
280 /* use static tjmax once it is set */
285 * A new feature of current Intel(R) processors, the
286 * IA32_TEMPERATURE_TARGET contains the TjMax value
288 err
= rdmsr_safe_on_cpu(tdata
->cpu
, MSR_IA32_TEMPERATURE_TARGET
, &eax
, &edx
);
290 if (cpu_has_tjmax(c
))
291 dev_warn(dev
, "Unable to read TjMax from CPU %u\n", tdata
->cpu
);
293 val
= (eax
>> 16) & 0xff;
299 dev_notice(dev
, "TjMax forced to %d degrees C by user\n",
301 tdata
->tjmax
= force_tjmax
* 1000;
304 * An assumption is made for early CPUs and unreadable MSR.
305 * NOTE: the calculated value may not be correct.
307 tdata
->tjmax
= adjust_tjmax(c
, tdata
->cpu
, dev
);
312 static int get_ttarget(struct temp_data
*tdata
, struct device
*dev
)
315 int tjmax
, ttarget_offset
, ret
;
318 * ttarget is valid only if tjmax can be retrieved from
319 * MSR_IA32_TEMPERATURE_TARGET
324 ret
= rdmsr_safe_on_cpu(tdata
->cpu
, MSR_IA32_TEMPERATURE_TARGET
, &eax
, &edx
);
328 tjmax
= (eax
>> 16) & 0xff;
330 /* Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. */
331 ttarget_offset
= (eax
>> 8) & 0xff;
333 return (tjmax
- ttarget_offset
) * 1000;
336 /* Keep track of how many zone pointers we allocated in init() */
337 static int max_zones __read_mostly
;
338 /* Array of zone pointers. Serialized by cpu hotplug lock */
339 static struct platform_device
**zone_devices
;
341 static ssize_t
show_label(struct device
*dev
,
342 struct device_attribute
*devattr
, char *buf
)
344 struct platform_data
*pdata
= dev_get_drvdata(dev
);
345 struct temp_data
*tdata
= container_of(devattr
, struct temp_data
, sd_attrs
[ATTR_LABEL
]);
347 if (is_pkg_temp_data(tdata
))
348 return sprintf(buf
, "Package id %u\n", pdata
->pkg_id
);
350 return sprintf(buf
, "Core %u\n", tdata
->cpu_core_id
);
353 static ssize_t
show_crit_alarm(struct device
*dev
,
354 struct device_attribute
*devattr
, char *buf
)
357 struct temp_data
*tdata
= container_of(devattr
, struct temp_data
,
358 sd_attrs
[ATTR_CRIT_ALARM
]);
360 mutex_lock(&tdata
->update_lock
);
361 rdmsr_on_cpu(tdata
->cpu
, tdata
->status_reg
, &eax
, &edx
);
362 mutex_unlock(&tdata
->update_lock
);
364 return sprintf(buf
, "%d\n", (eax
>> 5) & 1);
367 static ssize_t
show_tjmax(struct device
*dev
,
368 struct device_attribute
*devattr
, char *buf
)
370 struct temp_data
*tdata
= container_of(devattr
, struct temp_data
, sd_attrs
[ATTR_TJMAX
]);
373 mutex_lock(&tdata
->update_lock
);
374 tjmax
= get_tjmax(tdata
, dev
);
375 mutex_unlock(&tdata
->update_lock
);
377 return sprintf(buf
, "%d\n", tjmax
);
380 static ssize_t
show_ttarget(struct device
*dev
,
381 struct device_attribute
*devattr
, char *buf
)
383 struct temp_data
*tdata
= container_of(devattr
, struct temp_data
, sd_attrs
[ATTR_TTARGET
]);
386 mutex_lock(&tdata
->update_lock
);
387 ttarget
= get_ttarget(tdata
, dev
);
388 mutex_unlock(&tdata
->update_lock
);
392 return sprintf(buf
, "%d\n", ttarget
);
395 static ssize_t
show_temp(struct device
*dev
,
396 struct device_attribute
*devattr
, char *buf
)
399 struct temp_data
*tdata
= container_of(devattr
, struct temp_data
, sd_attrs
[ATTR_TEMP
]);
402 mutex_lock(&tdata
->update_lock
);
404 tjmax
= get_tjmax(tdata
, dev
);
405 /* Check whether the time interval has elapsed */
406 if (time_after(jiffies
, tdata
->last_updated
+ HZ
)) {
407 rdmsr_on_cpu(tdata
->cpu
, tdata
->status_reg
, &eax
, &edx
);
409 * Ignore the valid bit. In all observed cases the register
410 * value is either low or zero if the valid bit is 0.
411 * Return it instead of reporting an error which doesn't
412 * really help at all.
414 tdata
->temp
= tjmax
- ((eax
>> 16) & 0xff) * 1000;
415 tdata
->last_updated
= jiffies
;
418 mutex_unlock(&tdata
->update_lock
);
419 return sprintf(buf
, "%d\n", tdata
->temp
);
422 static int create_core_attrs(struct temp_data
*tdata
, struct device
*dev
)
425 static ssize_t (*const rd_ptr
[TOTAL_ATTRS
]) (struct device
*dev
,
426 struct device_attribute
*devattr
, char *buf
) = {
427 show_label
, show_crit_alarm
, show_temp
, show_tjmax
,
429 static const char *const suffixes
[TOTAL_ATTRS
] = {
430 "label", "crit_alarm", "input", "crit", "max"
433 for (i
= 0; i
< tdata
->attr_size
; i
++) {
435 * We map the attr number to core id of the CPU
436 * The attr number is always core id + 2
437 * The Pkgtemp will always show up as temp1_*, if available
439 int attr_no
= is_pkg_temp_data(tdata
) ? 1 : tdata
->cpu_core_id
+ 2;
441 snprintf(tdata
->attr_name
[i
], CORETEMP_NAME_LENGTH
,
442 "temp%d_%s", attr_no
, suffixes
[i
]);
443 sysfs_attr_init(&tdata
->sd_attrs
[i
].attr
);
444 tdata
->sd_attrs
[i
].attr
.name
= tdata
->attr_name
[i
];
445 tdata
->sd_attrs
[i
].attr
.mode
= 0444;
446 tdata
->sd_attrs
[i
].show
= rd_ptr
[i
];
447 tdata
->attrs
[i
] = &tdata
->sd_attrs
[i
].attr
;
449 tdata
->attr_group
.attrs
= tdata
->attrs
;
450 return sysfs_create_group(&dev
->kobj
, &tdata
->attr_group
);
454 static int chk_ucode_version(unsigned int cpu
)
456 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
459 * Check if we have problem with errata AE18 of Core processors:
460 * Readings might stop update when processor visited too deep sleep,
461 * fixed for stepping D0 (6EC).
463 if (c
->x86_model
== 0xe && c
->x86_stepping
< 0xc && c
->microcode
< 0x39) {
464 pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
470 static struct platform_device
*coretemp_get_pdev(unsigned int cpu
)
472 int id
= topology_logical_die_id(cpu
);
474 if (id
>= 0 && id
< max_zones
)
475 return zone_devices
[id
];
479 static struct temp_data
*
480 init_temp_data(struct platform_data
*pdata
, unsigned int cpu
, int pkg_flag
)
482 struct temp_data
*tdata
;
484 if (!pdata
->core_data
) {
487 * The information of actual possible cores in a package is broken for now.
488 * Will replace hardcoded NUM_REAL_CORES with actual per package core count
489 * when this information becomes available.
491 pdata
->nr_cores
= NUM_REAL_CORES
;
492 pdata
->core_data
= kcalloc(pdata
->nr_cores
, sizeof(struct temp_data
*),
494 if (!pdata
->core_data
)
498 tdata
= kzalloc(sizeof(struct temp_data
), GFP_KERNEL
);
503 pdata
->pkg_data
= tdata
;
504 /* Use tdata->index as indicator of package temp data */
507 tdata
->index
= ida_alloc_max(&pdata
->ida
, pdata
->nr_cores
- 1, GFP_KERNEL
);
508 if (tdata
->index
< 0) {
512 pdata
->core_data
[tdata
->index
] = tdata
;
515 tdata
->status_reg
= pkg_flag
? MSR_IA32_PACKAGE_THERM_STATUS
:
516 MSR_IA32_THERM_STATUS
;
518 tdata
->cpu_core_id
= topology_core_id(cpu
);
519 tdata
->attr_size
= MAX_CORE_ATTRS
;
520 mutex_init(&tdata
->update_lock
);
524 static void destroy_temp_data(struct platform_data
*pdata
, struct temp_data
*tdata
)
526 if (is_pkg_temp_data(tdata
)) {
527 pdata
->pkg_data
= NULL
;
528 kfree(pdata
->core_data
);
529 pdata
->core_data
= NULL
;
532 pdata
->core_data
[tdata
->index
] = NULL
;
533 ida_free(&pdata
->ida
, tdata
->index
);
538 static struct temp_data
*get_temp_data(struct platform_data
*pdata
, int cpu
)
542 /* cpu < 0 means get pkg temp_data */
544 return pdata
->pkg_data
;
546 for (i
= 0; i
< pdata
->nr_cores
; i
++) {
547 if (pdata
->core_data
[i
] &&
548 pdata
->core_data
[i
]->cpu_core_id
== topology_core_id(cpu
))
549 return pdata
->core_data
[i
];
554 static int create_core_data(struct platform_device
*pdev
, unsigned int cpu
,
557 struct temp_data
*tdata
;
558 struct platform_data
*pdata
= platform_get_drvdata(pdev
);
559 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
563 if (!housekeeping_cpu(cpu
, HK_TYPE_MISC
))
566 tdata
= init_temp_data(pdata
, cpu
, pkg_flag
);
570 /* Test if we can access the status register */
571 err
= rdmsr_safe_on_cpu(cpu
, tdata
->status_reg
, &eax
, &edx
);
575 /* Make sure tdata->tjmax is a valid indicator for dynamic/static tjmax */
576 get_tjmax(tdata
, &pdev
->dev
);
579 * The target temperature is available on older CPUs but not in the
580 * MSR_IA32_TEMPERATURE_TARGET register. Atoms don't have the register
583 if (c
->x86_model
> 0xe && c
->x86_model
!= 0x1c)
584 if (get_ttarget(tdata
, &pdev
->dev
) >= 0)
587 /* Create sysfs interfaces */
588 err
= create_core_attrs(tdata
, pdata
->hwmon_dev
);
595 destroy_temp_data(pdata
, tdata
);
600 coretemp_add_core(struct platform_device
*pdev
, unsigned int cpu
, int pkg_flag
)
602 if (create_core_data(pdev
, cpu
, pkg_flag
))
603 dev_err(&pdev
->dev
, "Adding Core %u failed\n", cpu
);
606 static void coretemp_remove_core(struct platform_data
*pdata
, struct temp_data
*tdata
)
608 /* if we errored on add then this is already gone */
612 /* Remove the sysfs attributes */
613 sysfs_remove_group(&pdata
->hwmon_dev
->kobj
, &tdata
->attr_group
);
615 destroy_temp_data(pdata
, tdata
);
618 static int coretemp_device_add(int zoneid
)
620 struct platform_device
*pdev
;
621 struct platform_data
*pdata
;
624 /* Initialize the per-zone data structures */
625 pdata
= kzalloc(sizeof(*pdata
), GFP_KERNEL
);
629 pdata
->pkg_id
= zoneid
;
630 ida_init(&pdata
->ida
);
632 pdev
= platform_device_alloc(DRVNAME
, zoneid
);
638 err
= platform_device_add(pdev
);
642 platform_set_drvdata(pdev
, pdata
);
643 zone_devices
[zoneid
] = pdev
;
647 platform_device_put(pdev
);
653 static void coretemp_device_remove(int zoneid
)
655 struct platform_device
*pdev
= zone_devices
[zoneid
];
656 struct platform_data
*pdata
= platform_get_drvdata(pdev
);
658 ida_destroy(&pdata
->ida
);
660 platform_device_unregister(pdev
);
663 static int coretemp_cpu_online(unsigned int cpu
)
665 struct platform_device
*pdev
= coretemp_get_pdev(cpu
);
666 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
667 struct platform_data
*pdata
;
670 * Don't execute this on resume as the offline callback did
671 * not get executed on suspend.
673 if (cpuhp_tasks_frozen
)
677 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
678 * sensors. We check this bit only, all the early CPUs
679 * without thermal sensors will be filtered out.
681 if (!cpu_has(c
, X86_FEATURE_DTHERM
))
684 pdata
= platform_get_drvdata(pdev
);
685 if (!pdata
->hwmon_dev
) {
686 struct device
*hwmon
;
688 /* Check the microcode version of the CPU */
689 if (chk_ucode_version(cpu
))
693 * Alright, we have DTS support.
694 * We are bringing the _first_ core in this pkg
695 * online. So, initialize per-pkg data structures and
696 * then bring this core online.
698 hwmon
= hwmon_device_register_with_groups(&pdev
->dev
, DRVNAME
,
701 return PTR_ERR(hwmon
);
702 pdata
->hwmon_dev
= hwmon
;
705 * Check whether pkgtemp support is available.
706 * If so, add interfaces for pkgtemp.
708 if (cpu_has(c
, X86_FEATURE_PTS
))
709 coretemp_add_core(pdev
, cpu
, 1);
713 * Check whether a thread sibling is already online. If not add the
714 * interface for this CPU core.
716 if (!cpumask_intersects(&pdata
->cpumask
, topology_sibling_cpumask(cpu
)))
717 coretemp_add_core(pdev
, cpu
, 0);
719 cpumask_set_cpu(cpu
, &pdata
->cpumask
);
723 static int coretemp_cpu_offline(unsigned int cpu
)
725 struct platform_device
*pdev
= coretemp_get_pdev(cpu
);
726 struct platform_data
*pd
;
727 struct temp_data
*tdata
;
730 /* No need to tear down any interfaces for suspend */
731 if (cpuhp_tasks_frozen
)
734 /* If the physical CPU device does not exist, just return */
735 pd
= platform_get_drvdata(pdev
);
739 tdata
= get_temp_data(pd
, cpu
);
741 cpumask_clear_cpu(cpu
, &pd
->cpumask
);
744 * If this is the last thread sibling, remove the CPU core
745 * interface, If there is still a sibling online, transfer the
746 * target cpu of that core interface to it.
748 target
= cpumask_any_and(&pd
->cpumask
, topology_sibling_cpumask(cpu
));
749 if (target
>= nr_cpu_ids
) {
750 coretemp_remove_core(pd
, tdata
);
751 } else if (tdata
&& tdata
->cpu
== cpu
) {
752 mutex_lock(&tdata
->update_lock
);
754 mutex_unlock(&tdata
->update_lock
);
758 * If all cores in this pkg are offline, remove the interface.
760 tdata
= get_temp_data(pd
, -1);
761 if (cpumask_empty(&pd
->cpumask
)) {
763 coretemp_remove_core(pd
, tdata
);
764 hwmon_device_unregister(pd
->hwmon_dev
);
765 pd
->hwmon_dev
= NULL
;
770 * Check whether this core is the target for the package
771 * interface. We need to assign it to some other cpu.
773 if (tdata
&& tdata
->cpu
== cpu
) {
774 target
= cpumask_first(&pd
->cpumask
);
775 mutex_lock(&tdata
->update_lock
);
777 mutex_unlock(&tdata
->update_lock
);
781 static const struct x86_cpu_id __initconst coretemp_ids
[] = {
782 X86_MATCH_VENDOR_FEATURE(INTEL
, X86_FEATURE_DTHERM
, NULL
),
785 MODULE_DEVICE_TABLE(x86cpu
, coretemp_ids
);
787 static enum cpuhp_state coretemp_hp_online
;
789 static int __init
coretemp_init(void)
794 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
795 * sensors. We check this bit only, all the early CPUs
796 * without thermal sensors will be filtered out.
798 if (!x86_match_cpu(coretemp_ids
))
801 max_zones
= topology_max_packages() * topology_max_dies_per_package();
802 zone_devices
= kcalloc(max_zones
, sizeof(struct platform_device
*),
807 for (i
= 0; i
< max_zones
; i
++) {
808 err
= coretemp_device_add(i
);
813 err
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "hwmon/coretemp:online",
814 coretemp_cpu_online
, coretemp_cpu_offline
);
817 coretemp_hp_online
= err
;
822 coretemp_device_remove(i
);
826 module_init(coretemp_init
)
828 static void __exit
coretemp_exit(void)
832 cpuhp_remove_state(coretemp_hp_online
);
833 for (i
= 0; i
< max_zones
; i
++)
834 coretemp_device_remove(i
);
837 module_exit(coretemp_exit
)
839 MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
840 MODULE_DESCRIPTION("Intel Core temperature monitor");
841 MODULE_LICENSE("GPL");