Adding support for MOXA ART SoC. Testing port of linux-2.6.32.60-moxart.
[linux-3.6.7-moxart.git] / drivers / w1 / masters / omap_hdq.c
blob4b0fcf3c2d035f8aba0d0e0ba938f34df6621ef2
1 /*
2 * drivers/w1/masters/omap_hdq.c
4 * Copyright (C) 2007,2012 Texas Instruments, Inc.
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/sched.h>
19 #include <linux/pm_runtime.h>
21 #include <asm/irq.h>
22 #include <mach/hardware.h>
24 #include "../w1.h"
25 #include "../w1_int.h"
27 #define MOD_NAME "OMAP_HDQ:"
29 #define OMAP_HDQ_REVISION 0x00
30 #define OMAP_HDQ_TX_DATA 0x04
31 #define OMAP_HDQ_RX_DATA 0x08
32 #define OMAP_HDQ_CTRL_STATUS 0x0c
33 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
34 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
35 #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
36 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
37 #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
38 #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
39 #define OMAP_HDQ_INT_STATUS 0x10
40 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
41 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
42 #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
43 #define OMAP_HDQ_SYSCONFIG 0x14
44 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
45 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
46 #define OMAP_HDQ_SYSSTATUS 0x18
47 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
49 #define OMAP_HDQ_FLAG_CLEAR 0
50 #define OMAP_HDQ_FLAG_SET 1
51 #define OMAP_HDQ_TIMEOUT (HZ/5)
53 #define OMAP_HDQ_MAX_USER 4
55 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
56 static int w1_id;
58 struct hdq_data {
59 struct device *dev;
60 void __iomem *hdq_base;
61 /* lock status update */
62 struct mutex hdq_mutex;
63 int hdq_usecount;
64 u8 hdq_irqstatus;
65 /* device lock */
66 spinlock_t hdq_spinlock;
68 * Used to control the call to omap_hdq_get and omap_hdq_put.
69 * HDQ Protocol: Write the CMD|REG_address first, followed by
70 * the data wrire or read.
72 int init_trans;
75 static int __devinit omap_hdq_probe(struct platform_device *pdev);
76 static int omap_hdq_remove(struct platform_device *pdev);
78 static struct platform_driver omap_hdq_driver = {
79 .probe = omap_hdq_probe,
80 .remove = omap_hdq_remove,
81 .driver = {
82 .name = "omap_hdq",
86 static u8 omap_w1_read_byte(void *_hdq);
87 static void omap_w1_write_byte(void *_hdq, u8 byte);
88 static u8 omap_w1_reset_bus(void *_hdq);
89 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
90 u8 search_type, w1_slave_found_callback slave_found);
93 static struct w1_bus_master omap_w1_master = {
94 .read_byte = omap_w1_read_byte,
95 .write_byte = omap_w1_write_byte,
96 .reset_bus = omap_w1_reset_bus,
97 .search = omap_w1_search_bus,
100 /* HDQ register I/O routines */
101 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
103 return __raw_readl(hdq_data->hdq_base + offset);
106 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
108 __raw_writel(val, hdq_data->hdq_base + offset);
111 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
112 u8 val, u8 mask)
114 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
115 | (val & mask);
116 __raw_writel(new_val, hdq_data->hdq_base + offset);
118 return new_val;
122 * Wait for one or more bits in flag change.
123 * HDQ_FLAG_SET: wait until any bit in the flag is set.
124 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
125 * return 0 on success and -ETIMEDOUT in the case of timeout.
127 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
128 u8 flag, u8 flag_set, u8 *status)
130 int ret = 0;
131 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
133 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
134 /* wait for the flag clear */
135 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
136 && time_before(jiffies, timeout)) {
137 schedule_timeout_uninterruptible(1);
139 if (*status & flag)
140 ret = -ETIMEDOUT;
141 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
142 /* wait for the flag set */
143 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
144 && time_before(jiffies, timeout)) {
145 schedule_timeout_uninterruptible(1);
147 if (!(*status & flag))
148 ret = -ETIMEDOUT;
149 } else
150 return -EINVAL;
152 return ret;
155 /* write out a byte and fill *status with HDQ_INT_STATUS */
156 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
158 int ret;
159 u8 tmp_status;
160 unsigned long irqflags;
162 *status = 0;
164 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
165 /* clear interrupt flags via a dummy read */
166 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
167 /* ISR loads it with new INT_STATUS */
168 hdq_data->hdq_irqstatus = 0;
169 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
171 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
173 /* set the GO bit */
174 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
175 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
176 /* wait for the TXCOMPLETE bit */
177 ret = wait_event_timeout(hdq_wait_queue,
178 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
179 if (ret == 0) {
180 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
181 ret = -ETIMEDOUT;
182 goto out;
185 *status = hdq_data->hdq_irqstatus;
186 /* check irqstatus */
187 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
188 dev_dbg(hdq_data->dev, "timeout waiting for"
189 " TXCOMPLETE/RXCOMPLETE, %x", *status);
190 ret = -ETIMEDOUT;
191 goto out;
194 /* wait for the GO bit return to zero */
195 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
196 OMAP_HDQ_CTRL_STATUS_GO,
197 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
198 if (ret) {
199 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
200 " return to zero, %x", tmp_status);
203 out:
204 return ret;
207 /* HDQ Interrupt service routine */
208 static irqreturn_t hdq_isr(int irq, void *_hdq)
210 struct hdq_data *hdq_data = _hdq;
211 unsigned long irqflags;
213 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
214 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
215 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
216 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
218 if (hdq_data->hdq_irqstatus &
219 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
220 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
221 /* wake up sleeping process */
222 wake_up(&hdq_wait_queue);
225 return IRQ_HANDLED;
228 /* HDQ Mode: always return success */
229 static u8 omap_w1_reset_bus(void *_hdq)
231 return 0;
234 /* W1 search callback function */
235 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
236 u8 search_type, w1_slave_found_callback slave_found)
238 u64 module_id, rn_le, cs, id;
240 if (w1_id)
241 module_id = w1_id;
242 else
243 module_id = 0x1;
245 rn_le = cpu_to_le64(module_id);
247 * HDQ might not obey truly the 1-wire spec.
248 * So calculate CRC based on module parameter.
250 cs = w1_calc_crc8((u8 *)&rn_le, 7);
251 id = (cs << 56) | module_id;
253 slave_found(master_dev, id);
256 static int _omap_hdq_reset(struct hdq_data *hdq_data)
258 int ret;
259 u8 tmp_status;
261 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
263 * Select HDQ mode & enable clocks.
264 * It is observed that INT flags can't be cleared via a read and GO/INIT
265 * won't return to zero if interrupt is disabled. So we always enable
266 * interrupt.
268 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
269 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
270 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
272 /* wait for reset to complete */
273 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
274 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
275 if (ret)
276 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
277 tmp_status);
278 else {
279 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
280 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
281 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
282 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
283 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
286 return ret;
289 /* Issue break pulse to the device */
290 static int omap_hdq_break(struct hdq_data *hdq_data)
292 int ret = 0;
293 u8 tmp_status;
294 unsigned long irqflags;
296 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
297 if (ret < 0) {
298 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
299 ret = -EINTR;
300 goto rtn;
303 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
304 /* clear interrupt flags via a dummy read */
305 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
306 /* ISR loads it with new INT_STATUS */
307 hdq_data->hdq_irqstatus = 0;
308 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
310 /* set the INIT and GO bit */
311 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
312 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
313 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
314 OMAP_HDQ_CTRL_STATUS_GO);
316 /* wait for the TIMEOUT bit */
317 ret = wait_event_timeout(hdq_wait_queue,
318 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
319 if (ret == 0) {
320 dev_dbg(hdq_data->dev, "break wait elapsed\n");
321 ret = -EINTR;
322 goto out;
325 tmp_status = hdq_data->hdq_irqstatus;
326 /* check irqstatus */
327 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
328 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
329 tmp_status);
330 ret = -ETIMEDOUT;
331 goto out;
334 * wait for both INIT and GO bits rerurn to zero.
335 * zero wait time expected for interrupt mode.
337 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
338 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
339 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
340 &tmp_status);
341 if (ret)
342 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
343 " return to zero, %x", tmp_status);
345 out:
346 mutex_unlock(&hdq_data->hdq_mutex);
347 rtn:
348 return ret;
351 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
353 int ret = 0;
354 u8 status;
356 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
357 if (ret < 0) {
358 ret = -EINTR;
359 goto rtn;
362 if (!hdq_data->hdq_usecount) {
363 ret = -EINVAL;
364 goto out;
367 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
368 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
369 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
370 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
372 * The RX comes immediately after TX.
374 wait_event_timeout(hdq_wait_queue,
375 (hdq_data->hdq_irqstatus
376 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
377 OMAP_HDQ_TIMEOUT);
379 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
380 OMAP_HDQ_CTRL_STATUS_DIR);
381 status = hdq_data->hdq_irqstatus;
382 /* check irqstatus */
383 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
384 dev_dbg(hdq_data->dev, "timeout waiting for"
385 " RXCOMPLETE, %x", status);
386 ret = -ETIMEDOUT;
387 goto out;
390 /* the data is ready. Read it in! */
391 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
392 out:
393 mutex_unlock(&hdq_data->hdq_mutex);
394 rtn:
395 return ret;
399 /* Enable clocks and set the controller to HDQ mode */
400 static int omap_hdq_get(struct hdq_data *hdq_data)
402 int ret = 0;
404 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
405 if (ret < 0) {
406 ret = -EINTR;
407 goto rtn;
410 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
411 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
412 ret = -EINVAL;
413 goto out;
414 } else {
415 hdq_data->hdq_usecount++;
416 try_module_get(THIS_MODULE);
417 if (1 == hdq_data->hdq_usecount) {
419 pm_runtime_get_sync(hdq_data->dev);
421 /* make sure HDQ is out of reset */
422 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
423 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
424 ret = _omap_hdq_reset(hdq_data);
425 if (ret)
426 /* back up the count */
427 hdq_data->hdq_usecount--;
428 } else {
429 /* select HDQ mode & enable clocks */
430 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
431 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
432 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
433 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
434 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
435 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
440 out:
441 mutex_unlock(&hdq_data->hdq_mutex);
442 rtn:
443 return ret;
446 /* Disable clocks to the module */
447 static int omap_hdq_put(struct hdq_data *hdq_data)
449 int ret = 0;
451 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
452 if (ret < 0)
453 return -EINTR;
455 if (0 == hdq_data->hdq_usecount) {
456 dev_dbg(hdq_data->dev, "attempt to decrement use count"
457 " when it is zero");
458 ret = -EINVAL;
459 } else {
460 hdq_data->hdq_usecount--;
461 module_put(THIS_MODULE);
462 if (0 == hdq_data->hdq_usecount)
463 pm_runtime_put_sync(hdq_data->dev);
465 mutex_unlock(&hdq_data->hdq_mutex);
467 return ret;
470 /* Read a byte of data from the device */
471 static u8 omap_w1_read_byte(void *_hdq)
473 struct hdq_data *hdq_data = _hdq;
474 u8 val = 0;
475 int ret;
477 ret = hdq_read_byte(hdq_data, &val);
478 if (ret) {
479 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
480 if (ret < 0) {
481 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
482 return -EINTR;
484 hdq_data->init_trans = 0;
485 mutex_unlock(&hdq_data->hdq_mutex);
486 omap_hdq_put(hdq_data);
487 return -1;
490 /* Write followed by a read, release the module */
491 if (hdq_data->init_trans) {
492 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
493 if (ret < 0) {
494 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
495 return -EINTR;
497 hdq_data->init_trans = 0;
498 mutex_unlock(&hdq_data->hdq_mutex);
499 omap_hdq_put(hdq_data);
502 return val;
505 /* Write a byte of data to the device */
506 static void omap_w1_write_byte(void *_hdq, u8 byte)
508 struct hdq_data *hdq_data = _hdq;
509 int ret;
510 u8 status;
512 /* First write to initialize the transfer */
513 if (hdq_data->init_trans == 0)
514 omap_hdq_get(hdq_data);
516 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
517 if (ret < 0) {
518 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
519 return;
521 hdq_data->init_trans++;
522 mutex_unlock(&hdq_data->hdq_mutex);
524 ret = hdq_write_byte(hdq_data, byte, &status);
525 if (ret < 0) {
526 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
527 return;
530 /* Second write, data transferred. Release the module */
531 if (hdq_data->init_trans > 1) {
532 omap_hdq_put(hdq_data);
533 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
534 if (ret < 0) {
535 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
536 return;
538 hdq_data->init_trans = 0;
539 mutex_unlock(&hdq_data->hdq_mutex);
542 return;
545 static int __devinit omap_hdq_probe(struct platform_device *pdev)
547 struct hdq_data *hdq_data;
548 struct resource *res;
549 int ret, irq;
550 u8 rev;
552 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
553 if (!hdq_data) {
554 dev_dbg(&pdev->dev, "unable to allocate memory\n");
555 ret = -ENOMEM;
556 goto err_kmalloc;
559 hdq_data->dev = &pdev->dev;
560 platform_set_drvdata(pdev, hdq_data);
562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
563 if (!res) {
564 dev_dbg(&pdev->dev, "unable to get resource\n");
565 ret = -ENXIO;
566 goto err_resource;
569 hdq_data->hdq_base = ioremap(res->start, SZ_4K);
570 if (!hdq_data->hdq_base) {
571 dev_dbg(&pdev->dev, "ioremap failed\n");
572 ret = -EINVAL;
573 goto err_ioremap;
576 hdq_data->hdq_usecount = 0;
577 mutex_init(&hdq_data->hdq_mutex);
579 pm_runtime_enable(&pdev->dev);
580 pm_runtime_get_sync(&pdev->dev);
582 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
583 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
584 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
586 spin_lock_init(&hdq_data->hdq_spinlock);
588 irq = platform_get_irq(pdev, 0);
589 if (irq < 0) {
590 ret = -ENXIO;
591 goto err_irq;
594 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
595 if (ret < 0) {
596 dev_dbg(&pdev->dev, "could not request irq\n");
597 goto err_irq;
600 omap_hdq_break(hdq_data);
602 pm_runtime_put_sync(&pdev->dev);
604 omap_w1_master.data = hdq_data;
606 ret = w1_add_master_device(&omap_w1_master);
607 if (ret) {
608 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
609 goto err_w1;
612 return 0;
614 err_irq:
615 pm_runtime_put_sync(&pdev->dev);
616 err_w1:
617 pm_runtime_disable(&pdev->dev);
619 iounmap(hdq_data->hdq_base);
621 err_ioremap:
622 err_resource:
623 platform_set_drvdata(pdev, NULL);
624 kfree(hdq_data);
626 err_kmalloc:
627 return ret;
631 static int omap_hdq_remove(struct platform_device *pdev)
633 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
635 mutex_lock(&hdq_data->hdq_mutex);
637 if (hdq_data->hdq_usecount) {
638 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
639 mutex_unlock(&hdq_data->hdq_mutex);
640 return -EBUSY;
643 mutex_unlock(&hdq_data->hdq_mutex);
645 /* remove module dependency */
646 pm_runtime_disable(&pdev->dev);
647 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
648 platform_set_drvdata(pdev, NULL);
649 iounmap(hdq_data->hdq_base);
650 kfree(hdq_data);
652 return 0;
655 static int __init
656 omap_hdq_init(void)
658 return platform_driver_register(&omap_hdq_driver);
660 module_init(omap_hdq_init);
662 static void __exit
663 omap_hdq_exit(void)
665 platform_driver_unregister(&omap_hdq_driver);
667 module_exit(omap_hdq_exit);
669 module_param(w1_id, int, S_IRUSR);
670 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
672 MODULE_AUTHOR("Texas Instruments");
673 MODULE_DESCRIPTION("HDQ driver Library");
674 MODULE_LICENSE("GPL");