PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / thermal / samsung / exynos_tmu.c
blob0d96a510389f412c0287f4f67484a23ae71356f1
1 /*
2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/clk.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
38 /**
39 * struct exynos_tmu_data : A structure to hold the private data of the TMU
40 driver
41 * @id: identifier of the one instance of the TMU controller.
42 * @pdata: pointer to the tmu platform/configuration data
43 * @base: base address of the single instance of the TMU controller.
44 * @base_common: base address of the common registers of the TMU controller.
45 * @irq: irq number of the TMU controller.
46 * @soc: id of the SOC type.
47 * @irq_work: pointer to the irq work structure.
48 * @lock: lock to implement synchronization.
49 * @clk: pointer to the clock structure.
50 * @temp_error1: fused value of the first point trim.
51 * @temp_error2: fused value of the second point trim.
52 * @regulator: pointer to the TMU regulator structure.
53 * @reg_conf: pointer to structure to register with core thermal.
55 struct exynos_tmu_data {
56 int id;
57 struct exynos_tmu_platform_data *pdata;
58 void __iomem *base;
59 void __iomem *base_common;
60 int irq;
61 enum soc_type soc;
62 struct work_struct irq_work;
63 struct mutex lock;
64 struct clk *clk;
65 u8 temp_error1, temp_error2;
66 struct regulator *regulator;
67 struct thermal_sensor_conf *reg_conf;
71 * TMU treats temperature as a mapped temperature code.
72 * The temperature is converted differently depending on the calibration type.
74 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
76 struct exynos_tmu_platform_data *pdata = data->pdata;
77 int temp_code;
79 if (pdata->cal_mode == HW_MODE)
80 return temp;
82 if (data->soc == SOC_ARCH_EXYNOS4210)
83 /* temp should range between 25 and 125 */
84 if (temp < 25 || temp > 125) {
85 temp_code = -EINVAL;
86 goto out;
89 switch (pdata->cal_type) {
90 case TYPE_TWO_POINT_TRIMMING:
91 temp_code = (temp - pdata->first_point_trim) *
92 (data->temp_error2 - data->temp_error1) /
93 (pdata->second_point_trim - pdata->first_point_trim) +
94 data->temp_error1;
95 break;
96 case TYPE_ONE_POINT_TRIMMING:
97 temp_code = temp + data->temp_error1 - pdata->first_point_trim;
98 break;
99 default:
100 temp_code = temp + pdata->default_temp_offset;
101 break;
103 out:
104 return temp_code;
108 * Calculate a temperature value from a temperature code.
109 * The unit of the temperature is degree Celsius.
111 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
113 struct exynos_tmu_platform_data *pdata = data->pdata;
114 int temp;
116 if (pdata->cal_mode == HW_MODE)
117 return temp_code;
119 if (data->soc == SOC_ARCH_EXYNOS4210)
120 /* temp_code should range between 75 and 175 */
121 if (temp_code < 75 || temp_code > 175) {
122 temp = -ENODATA;
123 goto out;
126 switch (pdata->cal_type) {
127 case TYPE_TWO_POINT_TRIMMING:
128 temp = (temp_code - data->temp_error1) *
129 (pdata->second_point_trim - pdata->first_point_trim) /
130 (data->temp_error2 - data->temp_error1) +
131 pdata->first_point_trim;
132 break;
133 case TYPE_ONE_POINT_TRIMMING:
134 temp = temp_code - data->temp_error1 + pdata->first_point_trim;
135 break;
136 default:
137 temp = temp_code - pdata->default_temp_offset;
138 break;
140 out:
141 return temp;
144 static int exynos_tmu_initialize(struct platform_device *pdev)
146 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
147 struct exynos_tmu_platform_data *pdata = data->pdata;
148 const struct exynos_tmu_registers *reg = pdata->registers;
149 unsigned int status, trim_info = 0, con;
150 unsigned int rising_threshold = 0, falling_threshold = 0;
151 int ret = 0, threshold_code, i, trigger_levs = 0;
153 mutex_lock(&data->lock);
154 clk_enable(data->clk);
156 if (TMU_SUPPORTS(pdata, READY_STATUS)) {
157 status = readb(data->base + reg->tmu_status);
158 if (!status) {
159 ret = -EBUSY;
160 goto out;
164 if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
165 __raw_writel(1, data->base + reg->triminfo_ctrl);
167 if (pdata->cal_mode == HW_MODE)
168 goto skip_calib_data;
170 /* Save trimming info in order to perform calibration */
171 if (data->soc == SOC_ARCH_EXYNOS5440) {
173 * For exynos5440 soc triminfo value is swapped between TMU0 and
174 * TMU2, so the below logic is needed.
176 switch (data->id) {
177 case 0:
178 trim_info = readl(data->base +
179 EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
180 break;
181 case 1:
182 trim_info = readl(data->base + reg->triminfo_data);
183 break;
184 case 2:
185 trim_info = readl(data->base -
186 EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
188 } else {
189 trim_info = readl(data->base + reg->triminfo_data);
191 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
192 data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
193 EXYNOS_TMU_TEMP_MASK);
195 if (!data->temp_error1 ||
196 (pdata->min_efuse_value > data->temp_error1) ||
197 (data->temp_error1 > pdata->max_efuse_value))
198 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
200 if (!data->temp_error2)
201 data->temp_error2 =
202 (pdata->efuse_value >> reg->triminfo_85_shift) &
203 EXYNOS_TMU_TEMP_MASK;
205 skip_calib_data:
206 if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
207 dev_err(&pdev->dev, "Invalid max trigger level\n");
208 ret = -EINVAL;
209 goto out;
212 for (i = 0; i < pdata->max_trigger_level; i++) {
213 if (!pdata->trigger_levels[i])
214 continue;
216 if ((pdata->trigger_type[i] == HW_TRIP) &&
217 (!pdata->trigger_levels[pdata->max_trigger_level - 1])) {
218 dev_err(&pdev->dev, "Invalid hw trigger level\n");
219 ret = -EINVAL;
220 goto out;
223 /* Count trigger levels except the HW trip*/
224 if (!(pdata->trigger_type[i] == HW_TRIP))
225 trigger_levs++;
228 if (data->soc == SOC_ARCH_EXYNOS4210) {
229 /* Write temperature code for threshold */
230 threshold_code = temp_to_code(data, pdata->threshold);
231 if (threshold_code < 0) {
232 ret = threshold_code;
233 goto out;
235 writeb(threshold_code,
236 data->base + reg->threshold_temp);
237 for (i = 0; i < trigger_levs; i++)
238 writeb(pdata->trigger_levels[i], data->base +
239 reg->threshold_th0 + i * sizeof(reg->threshold_th0));
241 writel(reg->inten_rise_mask, data->base + reg->tmu_intclear);
242 } else {
243 /* Write temperature code for rising and falling threshold */
244 for (i = 0;
245 i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
246 threshold_code = temp_to_code(data,
247 pdata->trigger_levels[i]);
248 if (threshold_code < 0) {
249 ret = threshold_code;
250 goto out;
252 rising_threshold |= threshold_code << 8 * i;
253 if (pdata->threshold_falling) {
254 threshold_code = temp_to_code(data,
255 pdata->trigger_levels[i] -
256 pdata->threshold_falling);
257 if (threshold_code > 0)
258 falling_threshold |=
259 threshold_code << 8 * i;
263 writel(rising_threshold,
264 data->base + reg->threshold_th0);
265 writel(falling_threshold,
266 data->base + reg->threshold_th1);
268 writel((reg->inten_rise_mask << reg->inten_rise_shift) |
269 (reg->inten_fall_mask << reg->inten_fall_shift),
270 data->base + reg->tmu_intclear);
272 /* if last threshold limit is also present */
273 i = pdata->max_trigger_level - 1;
274 if (pdata->trigger_levels[i] &&
275 (pdata->trigger_type[i] == HW_TRIP)) {
276 threshold_code = temp_to_code(data,
277 pdata->trigger_levels[i]);
278 if (threshold_code < 0) {
279 ret = threshold_code;
280 goto out;
282 if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
283 /* 1-4 level to be assigned in th0 reg */
284 rising_threshold |= threshold_code << 8 * i;
285 writel(rising_threshold,
286 data->base + reg->threshold_th0);
287 } else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
288 /* 5th level to be assigned in th2 reg */
289 rising_threshold =
290 threshold_code << reg->threshold_th3_l0_shift;
291 writel(rising_threshold,
292 data->base + reg->threshold_th2);
294 con = readl(data->base + reg->tmu_ctrl);
295 con |= (1 << reg->therm_trip_en_shift);
296 writel(con, data->base + reg->tmu_ctrl);
299 /*Clear the PMIN in the common TMU register*/
300 if (reg->tmu_pmin && !data->id)
301 writel(0, data->base_common + reg->tmu_pmin);
302 out:
303 clk_disable(data->clk);
304 mutex_unlock(&data->lock);
306 return ret;
309 static void exynos_tmu_control(struct platform_device *pdev, bool on)
311 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
312 struct exynos_tmu_platform_data *pdata = data->pdata;
313 const struct exynos_tmu_registers *reg = pdata->registers;
314 unsigned int con, interrupt_en, cal_val;
316 mutex_lock(&data->lock);
317 clk_enable(data->clk);
319 con = readl(data->base + reg->tmu_ctrl);
321 if (pdata->test_mux)
322 con |= (pdata->test_mux << reg->test_mux_addr_shift);
324 if (pdata->reference_voltage) {
325 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
326 con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
329 if (pdata->gain) {
330 con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
331 con |= (pdata->gain << reg->buf_slope_sel_shift);
334 if (pdata->noise_cancel_mode) {
335 con &= ~(reg->therm_trip_mode_mask <<
336 reg->therm_trip_mode_shift);
337 con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
340 if (pdata->cal_mode == HW_MODE) {
341 con &= ~(reg->calib_mode_mask << reg->calib_mode_shift);
342 cal_val = 0;
343 switch (pdata->cal_type) {
344 case TYPE_TWO_POINT_TRIMMING:
345 cal_val = 3;
346 break;
347 case TYPE_ONE_POINT_TRIMMING_85:
348 cal_val = 2;
349 break;
350 case TYPE_ONE_POINT_TRIMMING_25:
351 cal_val = 1;
352 break;
353 case TYPE_NONE:
354 break;
355 default:
356 dev_err(&pdev->dev, "Invalid calibration type, using none\n");
358 con |= cal_val << reg->calib_mode_shift;
361 if (on) {
362 con |= (1 << reg->core_en_shift);
363 interrupt_en =
364 pdata->trigger_enable[3] << reg->inten_rise3_shift |
365 pdata->trigger_enable[2] << reg->inten_rise2_shift |
366 pdata->trigger_enable[1] << reg->inten_rise1_shift |
367 pdata->trigger_enable[0] << reg->inten_rise0_shift;
368 if (TMU_SUPPORTS(pdata, FALLING_TRIP))
369 interrupt_en |=
370 interrupt_en << reg->inten_fall0_shift;
371 } else {
372 con &= ~(1 << reg->core_en_shift);
373 interrupt_en = 0; /* Disable all interrupts */
375 writel(interrupt_en, data->base + reg->tmu_inten);
376 writel(con, data->base + reg->tmu_ctrl);
378 clk_disable(data->clk);
379 mutex_unlock(&data->lock);
382 static int exynos_tmu_read(struct exynos_tmu_data *data)
384 struct exynos_tmu_platform_data *pdata = data->pdata;
385 const struct exynos_tmu_registers *reg = pdata->registers;
386 u8 temp_code;
387 int temp;
389 mutex_lock(&data->lock);
390 clk_enable(data->clk);
392 temp_code = readb(data->base + reg->tmu_cur_temp);
393 temp = code_to_temp(data, temp_code);
395 clk_disable(data->clk);
396 mutex_unlock(&data->lock);
398 return temp;
401 #ifdef CONFIG_THERMAL_EMULATION
402 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
404 struct exynos_tmu_data *data = drv_data;
405 struct exynos_tmu_platform_data *pdata = data->pdata;
406 const struct exynos_tmu_registers *reg = pdata->registers;
407 unsigned int val;
408 int ret = -EINVAL;
410 if (!TMU_SUPPORTS(pdata, EMULATION))
411 goto out;
413 if (temp && temp < MCELSIUS)
414 goto out;
416 mutex_lock(&data->lock);
417 clk_enable(data->clk);
419 val = readl(data->base + reg->emul_con);
421 if (temp) {
422 temp /= MCELSIUS;
424 if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
425 val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
426 val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
428 val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
429 val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
430 EXYNOS_EMUL_ENABLE;
431 } else {
432 val &= ~EXYNOS_EMUL_ENABLE;
435 writel(val, data->base + reg->emul_con);
437 clk_disable(data->clk);
438 mutex_unlock(&data->lock);
439 return 0;
440 out:
441 return ret;
443 #else
444 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
445 { return -EINVAL; }
446 #endif/*CONFIG_THERMAL_EMULATION*/
448 static void exynos_tmu_work(struct work_struct *work)
450 struct exynos_tmu_data *data = container_of(work,
451 struct exynos_tmu_data, irq_work);
452 struct exynos_tmu_platform_data *pdata = data->pdata;
453 const struct exynos_tmu_registers *reg = pdata->registers;
454 unsigned int val_irq, val_type;
456 /* Find which sensor generated this interrupt */
457 if (reg->tmu_irqstatus) {
458 val_type = readl(data->base_common + reg->tmu_irqstatus);
459 if (!((val_type >> data->id) & 0x1))
460 goto out;
463 exynos_report_trigger(data->reg_conf);
464 mutex_lock(&data->lock);
465 clk_enable(data->clk);
467 /* TODO: take action based on particular interrupt */
468 val_irq = readl(data->base + reg->tmu_intstat);
469 /* clear the interrupts */
470 writel(val_irq, data->base + reg->tmu_intclear);
472 clk_disable(data->clk);
473 mutex_unlock(&data->lock);
474 out:
475 enable_irq(data->irq);
478 static irqreturn_t exynos_tmu_irq(int irq, void *id)
480 struct exynos_tmu_data *data = id;
482 disable_irq_nosync(irq);
483 schedule_work(&data->irq_work);
485 return IRQ_HANDLED;
488 static const struct of_device_id exynos_tmu_match[] = {
490 .compatible = "samsung,exynos4210-tmu",
491 .data = (void *)EXYNOS4210_TMU_DRV_DATA,
494 .compatible = "samsung,exynos4412-tmu",
495 .data = (void *)EXYNOS4412_TMU_DRV_DATA,
498 .compatible = "samsung,exynos5250-tmu",
499 .data = (void *)EXYNOS5250_TMU_DRV_DATA,
502 .compatible = "samsung,exynos5440-tmu",
503 .data = (void *)EXYNOS5440_TMU_DRV_DATA,
507 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
509 static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
510 struct platform_device *pdev, int id)
512 struct exynos_tmu_init_data *data_table;
513 struct exynos_tmu_platform_data *tmu_data;
514 const struct of_device_id *match;
516 match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
517 if (!match)
518 return NULL;
519 data_table = (struct exynos_tmu_init_data *) match->data;
520 if (!data_table || id >= data_table->tmu_count)
521 return NULL;
522 tmu_data = data_table->tmu_data;
523 return (struct exynos_tmu_platform_data *) (tmu_data + id);
526 static int exynos_map_dt_data(struct platform_device *pdev)
528 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
529 struct exynos_tmu_platform_data *pdata;
530 struct resource res;
531 int ret;
533 if (!data || !pdev->dev.of_node)
534 return -ENODEV;
537 * Try enabling the regulator if found
538 * TODO: Add regulator as an SOC feature, so that regulator enable
539 * is a compulsory call.
541 data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
542 if (!IS_ERR(data->regulator)) {
543 ret = regulator_enable(data->regulator);
544 if (ret) {
545 dev_err(&pdev->dev, "failed to enable vtmu\n");
546 return ret;
548 } else {
549 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
552 data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
553 if (data->id < 0)
554 data->id = 0;
556 data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
557 if (data->irq <= 0) {
558 dev_err(&pdev->dev, "failed to get IRQ\n");
559 return -ENODEV;
562 if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
563 dev_err(&pdev->dev, "failed to get Resource 0\n");
564 return -ENODEV;
567 data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
568 if (!data->base) {
569 dev_err(&pdev->dev, "Failed to ioremap memory\n");
570 return -EADDRNOTAVAIL;
573 pdata = exynos_get_driver_data(pdev, data->id);
574 if (!pdata) {
575 dev_err(&pdev->dev, "No platform init data supplied.\n");
576 return -ENODEV;
578 data->pdata = pdata;
580 * Check if the TMU shares some registers and then try to map the
581 * memory of common registers.
583 if (!TMU_SUPPORTS(pdata, SHARED_MEMORY))
584 return 0;
586 if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
587 dev_err(&pdev->dev, "failed to get Resource 1\n");
588 return -ENODEV;
591 data->base_common = devm_ioremap(&pdev->dev, res.start,
592 resource_size(&res));
593 if (!data->base_common) {
594 dev_err(&pdev->dev, "Failed to ioremap memory\n");
595 return -ENOMEM;
598 return 0;
601 static int exynos_tmu_probe(struct platform_device *pdev)
603 struct exynos_tmu_data *data;
604 struct exynos_tmu_platform_data *pdata;
605 struct thermal_sensor_conf *sensor_conf;
606 int ret, i;
608 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
609 GFP_KERNEL);
610 if (!data) {
611 dev_err(&pdev->dev, "Failed to allocate driver structure\n");
612 return -ENOMEM;
615 platform_set_drvdata(pdev, data);
616 mutex_init(&data->lock);
618 ret = exynos_map_dt_data(pdev);
619 if (ret)
620 return ret;
622 pdata = data->pdata;
624 INIT_WORK(&data->irq_work, exynos_tmu_work);
626 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
627 if (IS_ERR(data->clk)) {
628 dev_err(&pdev->dev, "Failed to get clock\n");
629 return PTR_ERR(data->clk);
632 ret = clk_prepare(data->clk);
633 if (ret)
634 return ret;
636 if (pdata->type == SOC_ARCH_EXYNOS4210 ||
637 pdata->type == SOC_ARCH_EXYNOS4412 ||
638 pdata->type == SOC_ARCH_EXYNOS5250 ||
639 pdata->type == SOC_ARCH_EXYNOS5440)
640 data->soc = pdata->type;
641 else {
642 ret = -EINVAL;
643 dev_err(&pdev->dev, "Platform not supported\n");
644 goto err_clk;
647 ret = exynos_tmu_initialize(pdev);
648 if (ret) {
649 dev_err(&pdev->dev, "Failed to initialize TMU\n");
650 goto err_clk;
653 exynos_tmu_control(pdev, true);
655 /* Allocate a structure to register with the exynos core thermal */
656 sensor_conf = devm_kzalloc(&pdev->dev,
657 sizeof(struct thermal_sensor_conf), GFP_KERNEL);
658 if (!sensor_conf) {
659 dev_err(&pdev->dev, "Failed to allocate registration struct\n");
660 ret = -ENOMEM;
661 goto err_clk;
663 sprintf(sensor_conf->name, "therm_zone%d", data->id);
664 sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
665 sensor_conf->write_emul_temp =
666 (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
667 sensor_conf->driver_data = data;
668 sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
669 pdata->trigger_enable[1] + pdata->trigger_enable[2]+
670 pdata->trigger_enable[3];
672 for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
673 sensor_conf->trip_data.trip_val[i] =
674 pdata->threshold + pdata->trigger_levels[i];
675 sensor_conf->trip_data.trip_type[i] =
676 pdata->trigger_type[i];
679 sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
681 sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
682 for (i = 0; i < pdata->freq_tab_count; i++) {
683 sensor_conf->cooling_data.freq_data[i].freq_clip_max =
684 pdata->freq_tab[i].freq_clip_max;
685 sensor_conf->cooling_data.freq_data[i].temp_level =
686 pdata->freq_tab[i].temp_level;
688 sensor_conf->dev = &pdev->dev;
689 /* Register the sensor with thermal management interface */
690 ret = exynos_register_thermal(sensor_conf);
691 if (ret) {
692 dev_err(&pdev->dev, "Failed to register thermal interface\n");
693 goto err_clk;
695 data->reg_conf = sensor_conf;
697 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
698 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
699 if (ret) {
700 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
701 goto err_clk;
704 return 0;
705 err_clk:
706 clk_unprepare(data->clk);
707 return ret;
710 static int exynos_tmu_remove(struct platform_device *pdev)
712 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
714 exynos_tmu_control(pdev, false);
716 exynos_unregister_thermal(data->reg_conf);
718 clk_unprepare(data->clk);
720 if (!IS_ERR(data->regulator))
721 regulator_disable(data->regulator);
723 return 0;
726 #ifdef CONFIG_PM_SLEEP
727 static int exynos_tmu_suspend(struct device *dev)
729 exynos_tmu_control(to_platform_device(dev), false);
731 return 0;
734 static int exynos_tmu_resume(struct device *dev)
736 struct platform_device *pdev = to_platform_device(dev);
738 exynos_tmu_initialize(pdev);
739 exynos_tmu_control(pdev, true);
741 return 0;
744 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
745 exynos_tmu_suspend, exynos_tmu_resume);
746 #define EXYNOS_TMU_PM (&exynos_tmu_pm)
747 #else
748 #define EXYNOS_TMU_PM NULL
749 #endif
751 static struct platform_driver exynos_tmu_driver = {
752 .driver = {
753 .name = "exynos-tmu",
754 .owner = THIS_MODULE,
755 .pm = EXYNOS_TMU_PM,
756 .of_match_table = exynos_tmu_match,
758 .probe = exynos_tmu_probe,
759 .remove = exynos_tmu_remove,
762 module_platform_driver(exynos_tmu_driver);
764 MODULE_DESCRIPTION("EXYNOS TMU Driver");
765 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
766 MODULE_LICENSE("GPL");
767 MODULE_ALIAS("platform:exynos-tmu");