spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / arch / arm / plat-omap / dmtimer.c
blobaf3b92be84593384f96c5c8c59f74d10c108e22d
1 /*
2 * linux/arch/arm/plat-omap/dmtimer.c
4 * OMAP Dual-Mode Timers
6 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
7 * Tarun Kanti DebBarma <tarun.kanti@ti.com>
8 * Thara Gopinath <thara@ti.com>
10 * dmtimer adaptation to platform_driver.
12 * Copyright (C) 2005 Nokia Corporation
13 * OMAP2 support by Juha Yrjola
14 * API improvements and OMAP2 clock framework support by Timo Teras
16 * Copyright (C) 2009 Texas Instruments
17 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2 of the License, or (at your
22 * option) any later version.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
27 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * You should have received a copy of the GNU General Public License along
34 * with this program; if not, write to the Free Software Foundation, Inc.,
35 * 675 Mass Ave, Cambridge, MA 02139, USA.
38 #include <linux/module.h>
39 #include <linux/io.h>
40 #include <linux/slab.h>
41 #include <linux/err.h>
42 #include <linux/pm_runtime.h>
44 #include <plat/dmtimer.h>
46 static LIST_HEAD(omap_timer_list);
47 static DEFINE_SPINLOCK(dm_timer_lock);
49 /**
50 * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
51 * @timer: timer pointer over which read operation to perform
52 * @reg: lowest byte holds the register offset
54 * The posted mode bit is encoded in reg. Note that in posted mode write
55 * pending bit must be checked. Otherwise a read of a non completed write
56 * will produce an error.
58 static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
60 WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
61 return __omap_dm_timer_read(timer, reg, timer->posted);
64 /**
65 * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
66 * @timer: timer pointer over which write operation is to perform
67 * @reg: lowest byte holds the register offset
68 * @value: data to write into the register
70 * The posted mode bit is encoded in reg. Note that in posted mode the write
71 * pending bit must be checked. Otherwise a write on a register which has a
72 * pending write will be lost.
74 static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
75 u32 value)
77 WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
78 __omap_dm_timer_write(timer, reg, value, timer->posted);
81 static void omap_timer_restore_context(struct omap_dm_timer *timer)
83 omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_OFFSET,
84 timer->context.tiocp_cfg);
85 if (timer->revision > 1)
86 __raw_writel(timer->context.tistat, timer->sys_stat);
88 __raw_writel(timer->context.tisr, timer->irq_stat);
89 omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
90 timer->context.twer);
91 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
92 timer->context.tcrr);
93 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
94 timer->context.tldr);
95 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
96 timer->context.tmar);
97 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
98 timer->context.tsicr);
99 __raw_writel(timer->context.tier, timer->irq_ena);
100 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
101 timer->context.tclr);
104 static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
106 int c;
108 if (!timer->sys_stat)
109 return;
111 c = 0;
112 while (!(__raw_readl(timer->sys_stat) & 1)) {
113 c++;
114 if (c > 100000) {
115 printk(KERN_ERR "Timer failed to reset\n");
116 return;
121 static void omap_dm_timer_reset(struct omap_dm_timer *timer)
123 omap_dm_timer_enable(timer);
124 if (timer->pdev->id != 1) {
125 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
126 omap_dm_timer_wait_for_reset(timer);
129 __omap_dm_timer_reset(timer, 0, 0);
130 omap_dm_timer_disable(timer);
131 timer->posted = 1;
134 int omap_dm_timer_prepare(struct omap_dm_timer *timer)
136 struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
137 int ret;
139 timer->fclk = clk_get(&timer->pdev->dev, "fck");
140 if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
141 timer->fclk = NULL;
142 dev_err(&timer->pdev->dev, ": No fclk handle.\n");
143 return -EINVAL;
146 if (pdata->needs_manual_reset)
147 omap_dm_timer_reset(timer);
149 ret = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
151 timer->posted = 1;
152 return ret;
155 struct omap_dm_timer *omap_dm_timer_request(void)
157 struct omap_dm_timer *timer = NULL, *t;
158 unsigned long flags;
159 int ret = 0;
161 spin_lock_irqsave(&dm_timer_lock, flags);
162 list_for_each_entry(t, &omap_timer_list, node) {
163 if (t->reserved)
164 continue;
166 timer = t;
167 timer->reserved = 1;
168 break;
171 if (timer) {
172 ret = omap_dm_timer_prepare(timer);
173 if (ret) {
174 timer->reserved = 0;
175 timer = NULL;
178 spin_unlock_irqrestore(&dm_timer_lock, flags);
180 if (!timer)
181 pr_debug("%s: timer request failed!\n", __func__);
183 return timer;
185 EXPORT_SYMBOL_GPL(omap_dm_timer_request);
187 struct omap_dm_timer *omap_dm_timer_request_specific(int id)
189 struct omap_dm_timer *timer = NULL, *t;
190 unsigned long flags;
191 int ret = 0;
193 spin_lock_irqsave(&dm_timer_lock, flags);
194 list_for_each_entry(t, &omap_timer_list, node) {
195 if (t->pdev->id == id && !t->reserved) {
196 timer = t;
197 timer->reserved = 1;
198 break;
202 if (timer) {
203 ret = omap_dm_timer_prepare(timer);
204 if (ret) {
205 timer->reserved = 0;
206 timer = NULL;
209 spin_unlock_irqrestore(&dm_timer_lock, flags);
211 if (!timer)
212 pr_debug("%s: timer%d request failed!\n", __func__, id);
214 return timer;
216 EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific);
218 int omap_dm_timer_free(struct omap_dm_timer *timer)
220 if (unlikely(!timer))
221 return -EINVAL;
223 clk_put(timer->fclk);
225 WARN_ON(!timer->reserved);
226 timer->reserved = 0;
227 return 0;
229 EXPORT_SYMBOL_GPL(omap_dm_timer_free);
231 void omap_dm_timer_enable(struct omap_dm_timer *timer)
233 pm_runtime_get_sync(&timer->pdev->dev);
235 EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
237 void omap_dm_timer_disable(struct omap_dm_timer *timer)
239 pm_runtime_put(&timer->pdev->dev);
241 EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
243 int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
245 if (timer)
246 return timer->irq;
247 return -EINVAL;
249 EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq);
251 #if defined(CONFIG_ARCH_OMAP1)
254 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
255 * @inputmask: current value of idlect mask
257 __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
259 int i = 0;
260 struct omap_dm_timer *timer = NULL;
261 unsigned long flags;
263 /* If ARMXOR cannot be idled this function call is unnecessary */
264 if (!(inputmask & (1 << 1)))
265 return inputmask;
267 /* If any active timer is using ARMXOR return modified mask */
268 spin_lock_irqsave(&dm_timer_lock, flags);
269 list_for_each_entry(timer, &omap_timer_list, node) {
270 u32 l;
272 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
273 if (l & OMAP_TIMER_CTRL_ST) {
274 if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
275 inputmask &= ~(1 << 1);
276 else
277 inputmask &= ~(1 << 2);
279 i++;
281 spin_unlock_irqrestore(&dm_timer_lock, flags);
283 return inputmask;
285 EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
287 #else
289 struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
291 if (timer)
292 return timer->fclk;
293 return NULL;
295 EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk);
297 __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
299 BUG();
301 return 0;
303 EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
305 #endif
307 int omap_dm_timer_trigger(struct omap_dm_timer *timer)
309 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
310 pr_err("%s: timer not available or enabled.\n", __func__);
311 return -EINVAL;
314 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
315 return 0;
317 EXPORT_SYMBOL_GPL(omap_dm_timer_trigger);
319 int omap_dm_timer_start(struct omap_dm_timer *timer)
321 u32 l;
323 if (unlikely(!timer))
324 return -EINVAL;
326 omap_dm_timer_enable(timer);
328 if (timer->loses_context) {
329 u32 ctx_loss_cnt_after =
330 timer->get_context_loss_count(&timer->pdev->dev);
331 if (ctx_loss_cnt_after != timer->ctx_loss_count)
332 omap_timer_restore_context(timer);
335 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
336 if (!(l & OMAP_TIMER_CTRL_ST)) {
337 l |= OMAP_TIMER_CTRL_ST;
338 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
341 /* Save the context */
342 timer->context.tclr = l;
343 return 0;
345 EXPORT_SYMBOL_GPL(omap_dm_timer_start);
347 int omap_dm_timer_stop(struct omap_dm_timer *timer)
349 unsigned long rate = 0;
350 struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
352 if (unlikely(!timer))
353 return -EINVAL;
355 if (!pdata->needs_manual_reset)
356 rate = clk_get_rate(timer->fclk);
358 __omap_dm_timer_stop(timer, timer->posted, rate);
360 return 0;
362 EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
364 int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
366 int ret;
367 struct dmtimer_platform_data *pdata;
369 if (unlikely(!timer))
370 return -EINVAL;
372 pdata = timer->pdev->dev.platform_data;
374 if (source < 0 || source >= 3)
375 return -EINVAL;
377 ret = pdata->set_timer_src(timer->pdev, source);
379 return ret;
381 EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
383 int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
384 unsigned int load)
386 u32 l;
388 if (unlikely(!timer))
389 return -EINVAL;
391 omap_dm_timer_enable(timer);
392 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
393 if (autoreload)
394 l |= OMAP_TIMER_CTRL_AR;
395 else
396 l &= ~OMAP_TIMER_CTRL_AR;
397 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
398 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
400 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
401 /* Save the context */
402 timer->context.tclr = l;
403 timer->context.tldr = load;
404 omap_dm_timer_disable(timer);
405 return 0;
407 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load);
409 /* Optimized set_load which removes costly spin wait in timer_start */
410 int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
411 unsigned int load)
413 u32 l;
415 if (unlikely(!timer))
416 return -EINVAL;
418 omap_dm_timer_enable(timer);
420 if (timer->loses_context) {
421 u32 ctx_loss_cnt_after =
422 timer->get_context_loss_count(&timer->pdev->dev);
423 if (ctx_loss_cnt_after != timer->ctx_loss_count)
424 omap_timer_restore_context(timer);
427 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
428 if (autoreload) {
429 l |= OMAP_TIMER_CTRL_AR;
430 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
431 } else {
432 l &= ~OMAP_TIMER_CTRL_AR;
434 l |= OMAP_TIMER_CTRL_ST;
436 __omap_dm_timer_load_start(timer, l, load, timer->posted);
438 /* Save the context */
439 timer->context.tclr = l;
440 timer->context.tldr = load;
441 timer->context.tcrr = load;
442 return 0;
444 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
446 int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
447 unsigned int match)
449 u32 l;
451 if (unlikely(!timer))
452 return -EINVAL;
454 omap_dm_timer_enable(timer);
455 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
456 if (enable)
457 l |= OMAP_TIMER_CTRL_CE;
458 else
459 l &= ~OMAP_TIMER_CTRL_CE;
460 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
461 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
463 /* Save the context */
464 timer->context.tclr = l;
465 timer->context.tmar = match;
466 omap_dm_timer_disable(timer);
467 return 0;
469 EXPORT_SYMBOL_GPL(omap_dm_timer_set_match);
471 int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
472 int toggle, int trigger)
474 u32 l;
476 if (unlikely(!timer))
477 return -EINVAL;
479 omap_dm_timer_enable(timer);
480 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
481 l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
482 OMAP_TIMER_CTRL_PT | (0x03 << 10));
483 if (def_on)
484 l |= OMAP_TIMER_CTRL_SCPWM;
485 if (toggle)
486 l |= OMAP_TIMER_CTRL_PT;
487 l |= trigger << 10;
488 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
490 /* Save the context */
491 timer->context.tclr = l;
492 omap_dm_timer_disable(timer);
493 return 0;
495 EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm);
497 int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
499 u32 l;
501 if (unlikely(!timer))
502 return -EINVAL;
504 omap_dm_timer_enable(timer);
505 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
506 l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
507 if (prescaler >= 0x00 && prescaler <= 0x07) {
508 l |= OMAP_TIMER_CTRL_PRE;
509 l |= prescaler << 2;
511 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
513 /* Save the context */
514 timer->context.tclr = l;
515 omap_dm_timer_disable(timer);
516 return 0;
518 EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
520 int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
521 unsigned int value)
523 if (unlikely(!timer))
524 return -EINVAL;
526 omap_dm_timer_enable(timer);
527 __omap_dm_timer_int_enable(timer, value);
529 /* Save the context */
530 timer->context.tier = value;
531 timer->context.twer = value;
532 omap_dm_timer_disable(timer);
533 return 0;
535 EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
537 unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
539 unsigned int l;
541 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
542 pr_err("%s: timer not available or enabled.\n", __func__);
543 return 0;
546 l = __raw_readl(timer->irq_stat);
548 return l;
550 EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
552 int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
554 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev)))
555 return -EINVAL;
557 __omap_dm_timer_write_status(timer, value);
558 /* Save the context */
559 timer->context.tisr = value;
560 return 0;
562 EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
564 unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
566 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
567 pr_err("%s: timer not iavailable or enabled.\n", __func__);
568 return 0;
571 return __omap_dm_timer_read_counter(timer, timer->posted);
573 EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
575 int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
577 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
578 pr_err("%s: timer not available or enabled.\n", __func__);
579 return -EINVAL;
582 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
584 /* Save the context */
585 timer->context.tcrr = value;
586 return 0;
588 EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter);
590 int omap_dm_timers_active(void)
592 struct omap_dm_timer *timer;
594 list_for_each_entry(timer, &omap_timer_list, node) {
595 if (!timer->reserved)
596 continue;
598 if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
599 OMAP_TIMER_CTRL_ST) {
600 return 1;
603 return 0;
605 EXPORT_SYMBOL_GPL(omap_dm_timers_active);
608 * omap_dm_timer_probe - probe function called for every registered device
609 * @pdev: pointer to current timer platform device
611 * Called by driver framework at the end of device registration for all
612 * timer devices.
614 static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
616 int ret;
617 unsigned long flags;
618 struct omap_dm_timer *timer;
619 struct resource *mem, *irq, *ioarea;
620 struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
622 if (!pdata) {
623 dev_err(&pdev->dev, "%s: no platform data.\n", __func__);
624 return -ENODEV;
627 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
628 if (unlikely(!irq)) {
629 dev_err(&pdev->dev, "%s: no IRQ resource.\n", __func__);
630 return -ENODEV;
633 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
634 if (unlikely(!mem)) {
635 dev_err(&pdev->dev, "%s: no memory resource.\n", __func__);
636 return -ENODEV;
639 ioarea = request_mem_region(mem->start, resource_size(mem),
640 pdev->name);
641 if (!ioarea) {
642 dev_err(&pdev->dev, "%s: region already claimed.\n", __func__);
643 return -EBUSY;
646 timer = kzalloc(sizeof(struct omap_dm_timer), GFP_KERNEL);
647 if (!timer) {
648 dev_err(&pdev->dev, "%s: no memory for omap_dm_timer.\n",
649 __func__);
650 ret = -ENOMEM;
651 goto err_free_ioregion;
654 timer->io_base = ioremap(mem->start, resource_size(mem));
655 if (!timer->io_base) {
656 dev_err(&pdev->dev, "%s: ioremap failed.\n", __func__);
657 ret = -ENOMEM;
658 goto err_free_mem;
661 timer->id = pdev->id;
662 timer->irq = irq->start;
663 timer->reserved = pdata->reserved;
664 timer->pdev = pdev;
665 timer->loses_context = pdata->loses_context;
666 timer->get_context_loss_count = pdata->get_context_loss_count;
668 /* Skip pm_runtime_enable for OMAP1 */
669 if (!pdata->needs_manual_reset) {
670 pm_runtime_enable(&pdev->dev);
671 pm_runtime_irq_safe(&pdev->dev);
674 if (!timer->reserved) {
675 pm_runtime_get_sync(&pdev->dev);
676 __omap_dm_timer_init_regs(timer);
677 pm_runtime_put(&pdev->dev);
680 /* add the timer element to the list */
681 spin_lock_irqsave(&dm_timer_lock, flags);
682 list_add_tail(&timer->node, &omap_timer_list);
683 spin_unlock_irqrestore(&dm_timer_lock, flags);
685 dev_dbg(&pdev->dev, "Device Probed.\n");
687 return 0;
689 err_free_mem:
690 kfree(timer);
692 err_free_ioregion:
693 release_mem_region(mem->start, resource_size(mem));
695 return ret;
699 * omap_dm_timer_remove - cleanup a registered timer device
700 * @pdev: pointer to current timer platform device
702 * Called by driver framework whenever a timer device is unregistered.
703 * In addition to freeing platform resources it also deletes the timer
704 * entry from the local list.
706 static int __devexit omap_dm_timer_remove(struct platform_device *pdev)
708 struct omap_dm_timer *timer;
709 unsigned long flags;
710 int ret = -EINVAL;
712 spin_lock_irqsave(&dm_timer_lock, flags);
713 list_for_each_entry(timer, &omap_timer_list, node)
714 if (timer->pdev->id == pdev->id) {
715 list_del(&timer->node);
716 kfree(timer);
717 ret = 0;
718 break;
720 spin_unlock_irqrestore(&dm_timer_lock, flags);
722 return ret;
725 static struct platform_driver omap_dm_timer_driver = {
726 .probe = omap_dm_timer_probe,
727 .remove = __devexit_p(omap_dm_timer_remove),
728 .driver = {
729 .name = "omap_timer",
733 static int __init omap_dm_timer_driver_init(void)
735 return platform_driver_register(&omap_dm_timer_driver);
738 static void __exit omap_dm_timer_driver_exit(void)
740 platform_driver_unregister(&omap_dm_timer_driver);
743 early_platform_init("earlytimer", &omap_dm_timer_driver);
744 module_init(omap_dm_timer_driver_init);
745 module_exit(omap_dm_timer_driver_exit);
747 MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
748 MODULE_LICENSE("GPL");
749 MODULE_ALIAS("platform:" DRIVER_NAME);
750 MODULE_AUTHOR("Texas Instruments Inc");