2 * drivers/w1/masters/omap_hdq.c
4 * Copyright (C) 2007,2012 Texas Instruments, Inc.
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
18 #include <linux/sched.h>
19 #include <linux/pm_runtime.h>
22 #include <mach/hardware.h>
25 #include "../w1_int.h"
27 #define MOD_NAME "OMAP_HDQ:"
29 #define OMAP_HDQ_REVISION 0x00
30 #define OMAP_HDQ_TX_DATA 0x04
31 #define OMAP_HDQ_RX_DATA 0x08
32 #define OMAP_HDQ_CTRL_STATUS 0x0c
33 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
34 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
35 #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
36 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
37 #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
38 #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
39 #define OMAP_HDQ_INT_STATUS 0x10
40 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
41 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
42 #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
43 #define OMAP_HDQ_SYSCONFIG 0x14
44 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
45 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
46 #define OMAP_HDQ_SYSSTATUS 0x18
47 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
49 #define OMAP_HDQ_FLAG_CLEAR 0
50 #define OMAP_HDQ_FLAG_SET 1
51 #define OMAP_HDQ_TIMEOUT (HZ/5)
53 #define OMAP_HDQ_MAX_USER 4
55 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue
);
60 void __iomem
*hdq_base
;
61 /* lock status update */
62 struct mutex hdq_mutex
;
66 spinlock_t hdq_spinlock
;
68 * Used to control the call to omap_hdq_get and omap_hdq_put.
69 * HDQ Protocol: Write the CMD|REG_address first, followed by
70 * the data wrire or read.
75 static int __devinit
omap_hdq_probe(struct platform_device
*pdev
);
76 static int omap_hdq_remove(struct platform_device
*pdev
);
78 static struct platform_driver omap_hdq_driver
= {
79 .probe
= omap_hdq_probe
,
80 .remove
= omap_hdq_remove
,
86 static u8
omap_w1_read_byte(void *_hdq
);
87 static void omap_w1_write_byte(void *_hdq
, u8 byte
);
88 static u8
omap_w1_reset_bus(void *_hdq
);
89 static void omap_w1_search_bus(void *_hdq
, struct w1_master
*master_dev
,
90 u8 search_type
, w1_slave_found_callback slave_found
);
93 static struct w1_bus_master omap_w1_master
= {
94 .read_byte
= omap_w1_read_byte
,
95 .write_byte
= omap_w1_write_byte
,
96 .reset_bus
= omap_w1_reset_bus
,
97 .search
= omap_w1_search_bus
,
100 /* HDQ register I/O routines */
101 static inline u8
hdq_reg_in(struct hdq_data
*hdq_data
, u32 offset
)
103 return __raw_readl(hdq_data
->hdq_base
+ offset
);
106 static inline void hdq_reg_out(struct hdq_data
*hdq_data
, u32 offset
, u8 val
)
108 __raw_writel(val
, hdq_data
->hdq_base
+ offset
);
111 static inline u8
hdq_reg_merge(struct hdq_data
*hdq_data
, u32 offset
,
114 u8 new_val
= (__raw_readl(hdq_data
->hdq_base
+ offset
) & ~mask
)
116 __raw_writel(new_val
, hdq_data
->hdq_base
+ offset
);
122 * Wait for one or more bits in flag change.
123 * HDQ_FLAG_SET: wait until any bit in the flag is set.
124 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
125 * return 0 on success and -ETIMEDOUT in the case of timeout.
127 static int hdq_wait_for_flag(struct hdq_data
*hdq_data
, u32 offset
,
128 u8 flag
, u8 flag_set
, u8
*status
)
131 unsigned long timeout
= jiffies
+ OMAP_HDQ_TIMEOUT
;
133 if (flag_set
== OMAP_HDQ_FLAG_CLEAR
) {
134 /* wait for the flag clear */
135 while (((*status
= hdq_reg_in(hdq_data
, offset
)) & flag
)
136 && time_before(jiffies
, timeout
)) {
137 schedule_timeout_uninterruptible(1);
141 } else if (flag_set
== OMAP_HDQ_FLAG_SET
) {
142 /* wait for the flag set */
143 while (!((*status
= hdq_reg_in(hdq_data
, offset
)) & flag
)
144 && time_before(jiffies
, timeout
)) {
145 schedule_timeout_uninterruptible(1);
147 if (!(*status
& flag
))
155 /* write out a byte and fill *status with HDQ_INT_STATUS */
156 static int hdq_write_byte(struct hdq_data
*hdq_data
, u8 val
, u8
*status
)
160 unsigned long irqflags
;
164 spin_lock_irqsave(&hdq_data
->hdq_spinlock
, irqflags
);
165 /* clear interrupt flags via a dummy read */
166 hdq_reg_in(hdq_data
, OMAP_HDQ_INT_STATUS
);
167 /* ISR loads it with new INT_STATUS */
168 hdq_data
->hdq_irqstatus
= 0;
169 spin_unlock_irqrestore(&hdq_data
->hdq_spinlock
, irqflags
);
171 hdq_reg_out(hdq_data
, OMAP_HDQ_TX_DATA
, val
);
174 hdq_reg_merge(hdq_data
, OMAP_HDQ_CTRL_STATUS
, OMAP_HDQ_CTRL_STATUS_GO
,
175 OMAP_HDQ_CTRL_STATUS_DIR
| OMAP_HDQ_CTRL_STATUS_GO
);
176 /* wait for the TXCOMPLETE bit */
177 ret
= wait_event_timeout(hdq_wait_queue
,
178 hdq_data
->hdq_irqstatus
, OMAP_HDQ_TIMEOUT
);
180 dev_dbg(hdq_data
->dev
, "TX wait elapsed\n");
185 *status
= hdq_data
->hdq_irqstatus
;
186 /* check irqstatus */
187 if (!(*status
& OMAP_HDQ_INT_STATUS_TXCOMPLETE
)) {
188 dev_dbg(hdq_data
->dev
, "timeout waiting for"
189 " TXCOMPLETE/RXCOMPLETE, %x", *status
);
194 /* wait for the GO bit return to zero */
195 ret
= hdq_wait_for_flag(hdq_data
, OMAP_HDQ_CTRL_STATUS
,
196 OMAP_HDQ_CTRL_STATUS_GO
,
197 OMAP_HDQ_FLAG_CLEAR
, &tmp_status
);
199 dev_dbg(hdq_data
->dev
, "timeout waiting GO bit"
200 " return to zero, %x", tmp_status
);
207 /* HDQ Interrupt service routine */
208 static irqreturn_t
hdq_isr(int irq
, void *_hdq
)
210 struct hdq_data
*hdq_data
= _hdq
;
211 unsigned long irqflags
;
213 spin_lock_irqsave(&hdq_data
->hdq_spinlock
, irqflags
);
214 hdq_data
->hdq_irqstatus
= hdq_reg_in(hdq_data
, OMAP_HDQ_INT_STATUS
);
215 spin_unlock_irqrestore(&hdq_data
->hdq_spinlock
, irqflags
);
216 dev_dbg(hdq_data
->dev
, "hdq_isr: %x", hdq_data
->hdq_irqstatus
);
218 if (hdq_data
->hdq_irqstatus
&
219 (OMAP_HDQ_INT_STATUS_TXCOMPLETE
| OMAP_HDQ_INT_STATUS_RXCOMPLETE
220 | OMAP_HDQ_INT_STATUS_TIMEOUT
)) {
221 /* wake up sleeping process */
222 wake_up(&hdq_wait_queue
);
228 /* HDQ Mode: always return success */
229 static u8
omap_w1_reset_bus(void *_hdq
)
234 /* W1 search callback function */
235 static void omap_w1_search_bus(void *_hdq
, struct w1_master
*master_dev
,
236 u8 search_type
, w1_slave_found_callback slave_found
)
238 u64 module_id
, rn_le
, cs
, id
;
245 rn_le
= cpu_to_le64(module_id
);
247 * HDQ might not obey truly the 1-wire spec.
248 * So calculate CRC based on module parameter.
250 cs
= w1_calc_crc8((u8
*)&rn_le
, 7);
251 id
= (cs
<< 56) | module_id
;
253 slave_found(master_dev
, id
);
256 static int _omap_hdq_reset(struct hdq_data
*hdq_data
)
261 hdq_reg_out(hdq_data
, OMAP_HDQ_SYSCONFIG
, OMAP_HDQ_SYSCONFIG_SOFTRESET
);
263 * Select HDQ mode & enable clocks.
264 * It is observed that INT flags can't be cleared via a read and GO/INIT
265 * won't return to zero if interrupt is disabled. So we always enable
268 hdq_reg_out(hdq_data
, OMAP_HDQ_CTRL_STATUS
,
269 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE
|
270 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK
);
272 /* wait for reset to complete */
273 ret
= hdq_wait_for_flag(hdq_data
, OMAP_HDQ_SYSSTATUS
,
274 OMAP_HDQ_SYSSTATUS_RESETDONE
, OMAP_HDQ_FLAG_SET
, &tmp_status
);
276 dev_dbg(hdq_data
->dev
, "timeout waiting HDQ reset, %x",
279 hdq_reg_out(hdq_data
, OMAP_HDQ_CTRL_STATUS
,
280 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE
|
281 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK
);
282 hdq_reg_out(hdq_data
, OMAP_HDQ_SYSCONFIG
,
283 OMAP_HDQ_SYSCONFIG_AUTOIDLE
);
289 /* Issue break pulse to the device */
290 static int omap_hdq_break(struct hdq_data
*hdq_data
)
294 unsigned long irqflags
;
296 ret
= mutex_lock_interruptible(&hdq_data
->hdq_mutex
);
298 dev_dbg(hdq_data
->dev
, "Could not acquire mutex\n");
303 spin_lock_irqsave(&hdq_data
->hdq_spinlock
, irqflags
);
304 /* clear interrupt flags via a dummy read */
305 hdq_reg_in(hdq_data
, OMAP_HDQ_INT_STATUS
);
306 /* ISR loads it with new INT_STATUS */
307 hdq_data
->hdq_irqstatus
= 0;
308 spin_unlock_irqrestore(&hdq_data
->hdq_spinlock
, irqflags
);
310 /* set the INIT and GO bit */
311 hdq_reg_merge(hdq_data
, OMAP_HDQ_CTRL_STATUS
,
312 OMAP_HDQ_CTRL_STATUS_INITIALIZATION
| OMAP_HDQ_CTRL_STATUS_GO
,
313 OMAP_HDQ_CTRL_STATUS_DIR
| OMAP_HDQ_CTRL_STATUS_INITIALIZATION
|
314 OMAP_HDQ_CTRL_STATUS_GO
);
316 /* wait for the TIMEOUT bit */
317 ret
= wait_event_timeout(hdq_wait_queue
,
318 hdq_data
->hdq_irqstatus
, OMAP_HDQ_TIMEOUT
);
320 dev_dbg(hdq_data
->dev
, "break wait elapsed\n");
325 tmp_status
= hdq_data
->hdq_irqstatus
;
326 /* check irqstatus */
327 if (!(tmp_status
& OMAP_HDQ_INT_STATUS_TIMEOUT
)) {
328 dev_dbg(hdq_data
->dev
, "timeout waiting for TIMEOUT, %x",
334 * wait for both INIT and GO bits rerurn to zero.
335 * zero wait time expected for interrupt mode.
337 ret
= hdq_wait_for_flag(hdq_data
, OMAP_HDQ_CTRL_STATUS
,
338 OMAP_HDQ_CTRL_STATUS_INITIALIZATION
|
339 OMAP_HDQ_CTRL_STATUS_GO
, OMAP_HDQ_FLAG_CLEAR
,
342 dev_dbg(hdq_data
->dev
, "timeout waiting INIT&GO bits"
343 " return to zero, %x", tmp_status
);
346 mutex_unlock(&hdq_data
->hdq_mutex
);
351 static int hdq_read_byte(struct hdq_data
*hdq_data
, u8
*val
)
356 ret
= mutex_lock_interruptible(&hdq_data
->hdq_mutex
);
362 if (!hdq_data
->hdq_usecount
) {
367 if (!(hdq_data
->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_RXCOMPLETE
)) {
368 hdq_reg_merge(hdq_data
, OMAP_HDQ_CTRL_STATUS
,
369 OMAP_HDQ_CTRL_STATUS_DIR
| OMAP_HDQ_CTRL_STATUS_GO
,
370 OMAP_HDQ_CTRL_STATUS_DIR
| OMAP_HDQ_CTRL_STATUS_GO
);
372 * The RX comes immediately after TX.
374 wait_event_timeout(hdq_wait_queue
,
375 (hdq_data
->hdq_irqstatus
376 & OMAP_HDQ_INT_STATUS_RXCOMPLETE
),
379 hdq_reg_merge(hdq_data
, OMAP_HDQ_CTRL_STATUS
, 0,
380 OMAP_HDQ_CTRL_STATUS_DIR
);
381 status
= hdq_data
->hdq_irqstatus
;
382 /* check irqstatus */
383 if (!(status
& OMAP_HDQ_INT_STATUS_RXCOMPLETE
)) {
384 dev_dbg(hdq_data
->dev
, "timeout waiting for"
385 " RXCOMPLETE, %x", status
);
390 /* the data is ready. Read it in! */
391 *val
= hdq_reg_in(hdq_data
, OMAP_HDQ_RX_DATA
);
393 mutex_unlock(&hdq_data
->hdq_mutex
);
399 /* Enable clocks and set the controller to HDQ mode */
400 static int omap_hdq_get(struct hdq_data
*hdq_data
)
404 ret
= mutex_lock_interruptible(&hdq_data
->hdq_mutex
);
410 if (OMAP_HDQ_MAX_USER
== hdq_data
->hdq_usecount
) {
411 dev_dbg(hdq_data
->dev
, "attempt to exceed the max use count");
415 hdq_data
->hdq_usecount
++;
416 try_module_get(THIS_MODULE
);
417 if (1 == hdq_data
->hdq_usecount
) {
419 pm_runtime_get_sync(hdq_data
->dev
);
421 /* make sure HDQ is out of reset */
422 if (!(hdq_reg_in(hdq_data
, OMAP_HDQ_SYSSTATUS
) &
423 OMAP_HDQ_SYSSTATUS_RESETDONE
)) {
424 ret
= _omap_hdq_reset(hdq_data
);
426 /* back up the count */
427 hdq_data
->hdq_usecount
--;
429 /* select HDQ mode & enable clocks */
430 hdq_reg_out(hdq_data
, OMAP_HDQ_CTRL_STATUS
,
431 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE
|
432 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK
);
433 hdq_reg_out(hdq_data
, OMAP_HDQ_SYSCONFIG
,
434 OMAP_HDQ_SYSCONFIG_AUTOIDLE
);
435 hdq_reg_in(hdq_data
, OMAP_HDQ_INT_STATUS
);
441 mutex_unlock(&hdq_data
->hdq_mutex
);
446 /* Disable clocks to the module */
447 static int omap_hdq_put(struct hdq_data
*hdq_data
)
451 ret
= mutex_lock_interruptible(&hdq_data
->hdq_mutex
);
455 if (0 == hdq_data
->hdq_usecount
) {
456 dev_dbg(hdq_data
->dev
, "attempt to decrement use count"
460 hdq_data
->hdq_usecount
--;
461 module_put(THIS_MODULE
);
462 if (0 == hdq_data
->hdq_usecount
)
463 pm_runtime_put_sync(hdq_data
->dev
);
465 mutex_unlock(&hdq_data
->hdq_mutex
);
470 /* Read a byte of data from the device */
471 static u8
omap_w1_read_byte(void *_hdq
)
473 struct hdq_data
*hdq_data
= _hdq
;
477 ret
= hdq_read_byte(hdq_data
, &val
);
479 ret
= mutex_lock_interruptible(&hdq_data
->hdq_mutex
);
481 dev_dbg(hdq_data
->dev
, "Could not acquire mutex\n");
484 hdq_data
->init_trans
= 0;
485 mutex_unlock(&hdq_data
->hdq_mutex
);
486 omap_hdq_put(hdq_data
);
490 /* Write followed by a read, release the module */
491 if (hdq_data
->init_trans
) {
492 ret
= mutex_lock_interruptible(&hdq_data
->hdq_mutex
);
494 dev_dbg(hdq_data
->dev
, "Could not acquire mutex\n");
497 hdq_data
->init_trans
= 0;
498 mutex_unlock(&hdq_data
->hdq_mutex
);
499 omap_hdq_put(hdq_data
);
505 /* Write a byte of data to the device */
506 static void omap_w1_write_byte(void *_hdq
, u8 byte
)
508 struct hdq_data
*hdq_data
= _hdq
;
512 /* First write to initialize the transfer */
513 if (hdq_data
->init_trans
== 0)
514 omap_hdq_get(hdq_data
);
516 ret
= mutex_lock_interruptible(&hdq_data
->hdq_mutex
);
518 dev_dbg(hdq_data
->dev
, "Could not acquire mutex\n");
521 hdq_data
->init_trans
++;
522 mutex_unlock(&hdq_data
->hdq_mutex
);
524 ret
= hdq_write_byte(hdq_data
, byte
, &status
);
526 dev_dbg(hdq_data
->dev
, "TX failure:Ctrl status %x\n", status
);
530 /* Second write, data transferred. Release the module */
531 if (hdq_data
->init_trans
> 1) {
532 omap_hdq_put(hdq_data
);
533 ret
= mutex_lock_interruptible(&hdq_data
->hdq_mutex
);
535 dev_dbg(hdq_data
->dev
, "Could not acquire mutex\n");
538 hdq_data
->init_trans
= 0;
539 mutex_unlock(&hdq_data
->hdq_mutex
);
545 static int __devinit
omap_hdq_probe(struct platform_device
*pdev
)
547 struct hdq_data
*hdq_data
;
548 struct resource
*res
;
552 hdq_data
= kmalloc(sizeof(*hdq_data
), GFP_KERNEL
);
554 dev_dbg(&pdev
->dev
, "unable to allocate memory\n");
559 hdq_data
->dev
= &pdev
->dev
;
560 platform_set_drvdata(pdev
, hdq_data
);
562 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
564 dev_dbg(&pdev
->dev
, "unable to get resource\n");
569 hdq_data
->hdq_base
= ioremap(res
->start
, SZ_4K
);
570 if (!hdq_data
->hdq_base
) {
571 dev_dbg(&pdev
->dev
, "ioremap failed\n");
576 hdq_data
->hdq_usecount
= 0;
577 mutex_init(&hdq_data
->hdq_mutex
);
579 pm_runtime_enable(&pdev
->dev
);
580 pm_runtime_get_sync(&pdev
->dev
);
582 rev
= hdq_reg_in(hdq_data
, OMAP_HDQ_REVISION
);
583 dev_info(&pdev
->dev
, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
584 (rev
>> 4) + '0', (rev
& 0x0f) + '0', "Interrupt");
586 spin_lock_init(&hdq_data
->hdq_spinlock
);
588 irq
= platform_get_irq(pdev
, 0);
594 ret
= request_irq(irq
, hdq_isr
, IRQF_DISABLED
, "omap_hdq", hdq_data
);
596 dev_dbg(&pdev
->dev
, "could not request irq\n");
600 omap_hdq_break(hdq_data
);
602 pm_runtime_put_sync(&pdev
->dev
);
604 omap_w1_master
.data
= hdq_data
;
606 ret
= w1_add_master_device(&omap_w1_master
);
608 dev_dbg(&pdev
->dev
, "Failure in registering w1 master\n");
615 pm_runtime_put_sync(&pdev
->dev
);
617 pm_runtime_disable(&pdev
->dev
);
619 iounmap(hdq_data
->hdq_base
);
623 platform_set_drvdata(pdev
, NULL
);
631 static int omap_hdq_remove(struct platform_device
*pdev
)
633 struct hdq_data
*hdq_data
= platform_get_drvdata(pdev
);
635 mutex_lock(&hdq_data
->hdq_mutex
);
637 if (hdq_data
->hdq_usecount
) {
638 dev_dbg(&pdev
->dev
, "removed when use count is not zero\n");
639 mutex_unlock(&hdq_data
->hdq_mutex
);
643 mutex_unlock(&hdq_data
->hdq_mutex
);
645 /* remove module dependency */
646 pm_runtime_disable(&pdev
->dev
);
647 free_irq(INT_24XX_HDQ_IRQ
, hdq_data
);
648 platform_set_drvdata(pdev
, NULL
);
649 iounmap(hdq_data
->hdq_base
);
658 return platform_driver_register(&omap_hdq_driver
);
660 module_init(omap_hdq_init
);
665 platform_driver_unregister(&omap_hdq_driver
);
667 module_exit(omap_hdq_exit
);
669 module_param(w1_id
, int, S_IRUSR
);
670 MODULE_PARM_DESC(w1_id
, "1-wire id for the slave detection");
672 MODULE_AUTHOR("Texas Instruments");
673 MODULE_DESCRIPTION("HDQ driver Library");
674 MODULE_LICENSE("GPL");