1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/mfd/ucb1x00-core.c
5 * Copyright (C) 2001 Russell King, All Rights Reserved.
7 * The UCB1x00 core driver provides basic services for handling IO,
8 * the ADC, interrupts, and accessing registers. It is designed
9 * such that everything goes through this layer, thereby providing
10 * a consistent locking methodology, as well as allowing the drivers
11 * to be used on other non-MCP-enabled hardware platforms.
13 * Note that all locks are private to this file. Nothing else may
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/irq.h>
24 #include <linux/device.h>
25 #include <linux/mutex.h>
26 #include <linux/mfd/ucb1x00.h>
28 #include <linux/gpio/driver.h>
30 static DEFINE_MUTEX(ucb1x00_mutex
);
31 static LIST_HEAD(ucb1x00_drivers
);
32 static LIST_HEAD(ucb1x00_devices
);
35 * ucb1x00_io_set_dir - set IO direction
36 * @ucb: UCB1x00 structure describing chip
37 * @in: bitfield of IO pins to be set as inputs
38 * @out: bitfield of IO pins to be set as outputs
40 * Set the IO direction of the ten general purpose IO pins on
41 * the UCB1x00 chip. The @in bitfield has priority over the
42 * @out bitfield, in that if you specify a pin as both input
43 * and output, it will end up as an input.
45 * ucb1x00_enable must have been called to enable the comms
46 * before using this function.
48 * This function takes a spinlock, disabling interrupts.
50 void ucb1x00_io_set_dir(struct ucb1x00
*ucb
, unsigned int in
, unsigned int out
)
54 spin_lock_irqsave(&ucb
->io_lock
, flags
);
58 ucb1x00_reg_write(ucb
, UCB_IO_DIR
, ucb
->io_dir
);
59 spin_unlock_irqrestore(&ucb
->io_lock
, flags
);
63 * ucb1x00_io_write - set or clear IO outputs
64 * @ucb: UCB1x00 structure describing chip
65 * @set: bitfield of IO pins to set to logic '1'
66 * @clear: bitfield of IO pins to set to logic '0'
68 * Set the IO output state of the specified IO pins. The value
69 * is retained if the pins are subsequently configured as inputs.
70 * The @clear bitfield has priority over the @set bitfield -
71 * outputs will be cleared.
73 * ucb1x00_enable must have been called to enable the comms
74 * before using this function.
76 * This function takes a spinlock, disabling interrupts.
78 void ucb1x00_io_write(struct ucb1x00
*ucb
, unsigned int set
, unsigned int clear
)
82 spin_lock_irqsave(&ucb
->io_lock
, flags
);
84 ucb
->io_out
&= ~clear
;
86 ucb1x00_reg_write(ucb
, UCB_IO_DATA
, ucb
->io_out
);
87 spin_unlock_irqrestore(&ucb
->io_lock
, flags
);
91 * ucb1x00_io_read - read the current state of the IO pins
92 * @ucb: UCB1x00 structure describing chip
94 * Return a bitfield describing the logic state of the ten
95 * general purpose IO pins.
97 * ucb1x00_enable must have been called to enable the comms
98 * before using this function.
100 * This function does not take any mutexes or spinlocks.
102 unsigned int ucb1x00_io_read(struct ucb1x00
*ucb
)
104 return ucb1x00_reg_read(ucb
, UCB_IO_DATA
);
107 static void ucb1x00_gpio_set(struct gpio_chip
*chip
, unsigned offset
, int value
)
109 struct ucb1x00
*ucb
= gpiochip_get_data(chip
);
112 spin_lock_irqsave(&ucb
->io_lock
, flags
);
114 ucb
->io_out
|= 1 << offset
;
116 ucb
->io_out
&= ~(1 << offset
);
119 ucb1x00_reg_write(ucb
, UCB_IO_DATA
, ucb
->io_out
);
120 ucb1x00_disable(ucb
);
121 spin_unlock_irqrestore(&ucb
->io_lock
, flags
);
124 static int ucb1x00_gpio_get(struct gpio_chip
*chip
, unsigned offset
)
126 struct ucb1x00
*ucb
= gpiochip_get_data(chip
);
130 val
= ucb1x00_reg_read(ucb
, UCB_IO_DATA
);
131 ucb1x00_disable(ucb
);
133 return !!(val
& (1 << offset
));
136 static int ucb1x00_gpio_direction_input(struct gpio_chip
*chip
, unsigned offset
)
138 struct ucb1x00
*ucb
= gpiochip_get_data(chip
);
141 spin_lock_irqsave(&ucb
->io_lock
, flags
);
142 ucb
->io_dir
&= ~(1 << offset
);
144 ucb1x00_reg_write(ucb
, UCB_IO_DIR
, ucb
->io_dir
);
145 ucb1x00_disable(ucb
);
146 spin_unlock_irqrestore(&ucb
->io_lock
, flags
);
151 static int ucb1x00_gpio_direction_output(struct gpio_chip
*chip
, unsigned offset
154 struct ucb1x00
*ucb
= gpiochip_get_data(chip
);
156 unsigned old
, mask
= 1 << offset
;
158 spin_lock_irqsave(&ucb
->io_lock
, flags
);
163 ucb
->io_out
&= ~mask
;
166 if (old
!= ucb
->io_out
)
167 ucb1x00_reg_write(ucb
, UCB_IO_DATA
, ucb
->io_out
);
169 if (!(ucb
->io_dir
& mask
)) {
171 ucb1x00_reg_write(ucb
, UCB_IO_DIR
, ucb
->io_dir
);
173 ucb1x00_disable(ucb
);
174 spin_unlock_irqrestore(&ucb
->io_lock
, flags
);
179 static int ucb1x00_to_irq(struct gpio_chip
*chip
, unsigned offset
)
181 struct ucb1x00
*ucb
= gpiochip_get_data(chip
);
183 return ucb
->irq_base
> 0 ? ucb
->irq_base
+ offset
: -ENXIO
;
187 * UCB1300 data sheet says we must:
188 * 1. enable ADC => 5us (including reference startup time)
189 * 2. select input => 51*tsibclk => 4.3us
190 * 3. start conversion => 102*tsibclk => 8.5us
191 * (tsibclk = 1/11981000)
192 * Period between SIB 128-bit frames = 10.7us
196 * ucb1x00_adc_enable - enable the ADC converter
197 * @ucb: UCB1x00 structure describing chip
199 * Enable the ucb1x00 and ADC converter on the UCB1x00 for use.
200 * Any code wishing to use the ADC converter must call this
201 * function prior to using it.
203 * This function takes the ADC mutex to prevent two or more
204 * concurrent uses, and therefore may sleep. As a result, it
205 * can only be called from process context, not interrupt
208 * You should release the ADC as soon as possible using
209 * ucb1x00_adc_disable.
211 void ucb1x00_adc_enable(struct ucb1x00
*ucb
)
213 mutex_lock(&ucb
->adc_mutex
);
215 ucb
->adc_cr
|= UCB_ADC_ENA
;
218 ucb1x00_reg_write(ucb
, UCB_ADC_CR
, ucb
->adc_cr
);
222 * ucb1x00_adc_read - read the specified ADC channel
223 * @ucb: UCB1x00 structure describing chip
224 * @adc_channel: ADC channel mask
225 * @sync: wait for syncronisation pulse.
227 * Start an ADC conversion and wait for the result. Note that
228 * synchronised ADC conversions (via the ADCSYNC pin) must wait
229 * until the trigger is asserted and the conversion is finished.
231 * This function currently spins waiting for the conversion to
232 * complete (2 frames max without sync).
234 * If called for a synchronised ADC conversion, it may sleep
235 * with the ADC mutex held.
237 unsigned int ucb1x00_adc_read(struct ucb1x00
*ucb
, int adc_channel
, int sync
)
242 adc_channel
|= UCB_ADC_SYNC_ENA
;
244 ucb1x00_reg_write(ucb
, UCB_ADC_CR
, ucb
->adc_cr
| adc_channel
);
245 ucb1x00_reg_write(ucb
, UCB_ADC_CR
, ucb
->adc_cr
| adc_channel
| UCB_ADC_START
);
248 val
= ucb1x00_reg_read(ucb
, UCB_ADC_DATA
);
249 if (val
& UCB_ADC_DAT_VAL
)
251 /* yield to other processes */
252 set_current_state(TASK_INTERRUPTIBLE
);
256 return UCB_ADC_DAT(val
);
260 * ucb1x00_adc_disable - disable the ADC converter
261 * @ucb: UCB1x00 structure describing chip
263 * Disable the ADC converter and release the ADC mutex.
265 void ucb1x00_adc_disable(struct ucb1x00
*ucb
)
267 ucb
->adc_cr
&= ~UCB_ADC_ENA
;
268 ucb1x00_reg_write(ucb
, UCB_ADC_CR
, ucb
->adc_cr
);
269 ucb1x00_disable(ucb
);
271 mutex_unlock(&ucb
->adc_mutex
);
275 * UCB1x00 Interrupt handling.
277 * The UCB1x00 can generate interrupts when the SIBCLK is stopped.
278 * Since we need to read an internal register, we must re-enable
279 * SIBCLK to talk to the chip. We leave the clock running until
280 * we have finished processing all interrupts from the chip.
282 static void ucb1x00_irq(struct irq_desc
*desc
)
284 struct ucb1x00
*ucb
= irq_desc_get_handler_data(desc
);
288 isr
= ucb1x00_reg_read(ucb
, UCB_IE_STATUS
);
289 ucb1x00_reg_write(ucb
, UCB_IE_CLEAR
, isr
);
290 ucb1x00_reg_write(ucb
, UCB_IE_CLEAR
, 0);
292 for (i
= 0; i
< 16 && isr
; i
++, isr
>>= 1)
294 generic_handle_irq(ucb
->irq_base
+ i
);
295 ucb1x00_disable(ucb
);
298 static void ucb1x00_irq_update(struct ucb1x00
*ucb
, unsigned mask
)
301 if (ucb
->irq_ris_enbl
& mask
)
302 ucb1x00_reg_write(ucb
, UCB_IE_RIS
, ucb
->irq_ris_enbl
&
304 if (ucb
->irq_fal_enbl
& mask
)
305 ucb1x00_reg_write(ucb
, UCB_IE_FAL
, ucb
->irq_fal_enbl
&
307 ucb1x00_disable(ucb
);
310 static void ucb1x00_irq_noop(struct irq_data
*data
)
314 static void ucb1x00_irq_mask(struct irq_data
*data
)
316 struct ucb1x00
*ucb
= irq_data_get_irq_chip_data(data
);
317 unsigned mask
= 1 << (data
->irq
- ucb
->irq_base
);
319 raw_spin_lock(&ucb
->irq_lock
);
320 ucb
->irq_mask
&= ~mask
;
321 ucb1x00_irq_update(ucb
, mask
);
322 raw_spin_unlock(&ucb
->irq_lock
);
325 static void ucb1x00_irq_unmask(struct irq_data
*data
)
327 struct ucb1x00
*ucb
= irq_data_get_irq_chip_data(data
);
328 unsigned mask
= 1 << (data
->irq
- ucb
->irq_base
);
330 raw_spin_lock(&ucb
->irq_lock
);
331 ucb
->irq_mask
|= mask
;
332 ucb1x00_irq_update(ucb
, mask
);
333 raw_spin_unlock(&ucb
->irq_lock
);
336 static int ucb1x00_irq_set_type(struct irq_data
*data
, unsigned int type
)
338 struct ucb1x00
*ucb
= irq_data_get_irq_chip_data(data
);
339 unsigned mask
= 1 << (data
->irq
- ucb
->irq_base
);
341 raw_spin_lock(&ucb
->irq_lock
);
342 if (type
& IRQ_TYPE_EDGE_RISING
)
343 ucb
->irq_ris_enbl
|= mask
;
345 ucb
->irq_ris_enbl
&= ~mask
;
347 if (type
& IRQ_TYPE_EDGE_FALLING
)
348 ucb
->irq_fal_enbl
|= mask
;
350 ucb
->irq_fal_enbl
&= ~mask
;
351 if (ucb
->irq_mask
& mask
) {
352 ucb1x00_reg_write(ucb
, UCB_IE_RIS
, ucb
->irq_ris_enbl
&
354 ucb1x00_reg_write(ucb
, UCB_IE_FAL
, ucb
->irq_fal_enbl
&
357 raw_spin_unlock(&ucb
->irq_lock
);
362 static int ucb1x00_irq_set_wake(struct irq_data
*data
, unsigned int on
)
364 struct ucb1x00
*ucb
= irq_data_get_irq_chip_data(data
);
365 struct ucb1x00_plat_data
*pdata
= ucb
->mcp
->attached_device
.platform_data
;
366 unsigned mask
= 1 << (data
->irq
- ucb
->irq_base
);
368 if (!pdata
|| !pdata
->can_wakeup
)
371 raw_spin_lock(&ucb
->irq_lock
);
373 ucb
->irq_wake
|= mask
;
375 ucb
->irq_wake
&= ~mask
;
376 raw_spin_unlock(&ucb
->irq_lock
);
381 static struct irq_chip ucb1x00_irqchip
= {
383 .irq_ack
= ucb1x00_irq_noop
,
384 .irq_mask
= ucb1x00_irq_mask
,
385 .irq_unmask
= ucb1x00_irq_unmask
,
386 .irq_set_type
= ucb1x00_irq_set_type
,
387 .irq_set_wake
= ucb1x00_irq_set_wake
,
390 static int ucb1x00_add_dev(struct ucb1x00
*ucb
, struct ucb1x00_driver
*drv
)
392 struct ucb1x00_dev
*dev
;
395 dev
= kmalloc(sizeof(struct ucb1x00_dev
), GFP_KERNEL
);
408 list_add_tail(&dev
->dev_node
, &ucb
->devs
);
409 list_add_tail(&dev
->drv_node
, &drv
->devs
);
414 static void ucb1x00_remove_dev(struct ucb1x00_dev
*dev
)
416 dev
->drv
->remove(dev
);
417 list_del(&dev
->dev_node
);
418 list_del(&dev
->drv_node
);
423 * Try to probe our interrupt, rather than relying on lots of
424 * hard-coded machine dependencies. For reference, the expected
427 * Machine Default IRQ
428 * adsbitsy IRQ_GPCIN4
429 * cerf IRQ_GPIO_UCB1200_IRQ
430 * flexanet IRQ_GPIO_GUI
431 * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ
432 * graphicsclient ADS_EXT_IRQ(8)
433 * graphicsmaster ADS_EXT_IRQ(8)
434 * lart LART_IRQ_UCB1200
435 * omnimeter IRQ_GPIO23
436 * pfs168 IRQ_GPIO_UCB1300_IRQ
437 * simpad IRQ_GPIO_UCB1300_IRQ
438 * shannon SHANNON_IRQ_GPIO_IRQ_CODEC
439 * yopy IRQ_GPIO_UCB1200_IRQ
441 static int ucb1x00_detect_irq(struct ucb1x00
*ucb
)
445 mask
= probe_irq_on();
448 * Enable the ADC interrupt.
450 ucb1x00_reg_write(ucb
, UCB_IE_RIS
, UCB_IE_ADC
);
451 ucb1x00_reg_write(ucb
, UCB_IE_FAL
, UCB_IE_ADC
);
452 ucb1x00_reg_write(ucb
, UCB_IE_CLEAR
, 0xffff);
453 ucb1x00_reg_write(ucb
, UCB_IE_CLEAR
, 0);
456 * Cause an ADC interrupt.
458 ucb1x00_reg_write(ucb
, UCB_ADC_CR
, UCB_ADC_ENA
);
459 ucb1x00_reg_write(ucb
, UCB_ADC_CR
, UCB_ADC_ENA
| UCB_ADC_START
);
462 * Wait for the conversion to complete.
464 while ((ucb1x00_reg_read(ucb
, UCB_ADC_DATA
) & UCB_ADC_DAT_VAL
) == 0);
465 ucb1x00_reg_write(ucb
, UCB_ADC_CR
, 0);
468 * Disable and clear interrupt.
470 ucb1x00_reg_write(ucb
, UCB_IE_RIS
, 0);
471 ucb1x00_reg_write(ucb
, UCB_IE_FAL
, 0);
472 ucb1x00_reg_write(ucb
, UCB_IE_CLEAR
, 0xffff);
473 ucb1x00_reg_write(ucb
, UCB_IE_CLEAR
, 0);
476 * Read triggered interrupt.
478 return probe_irq_off(mask
);
481 static void ucb1x00_release(struct device
*dev
)
483 struct ucb1x00
*ucb
= classdev_to_ucb1x00(dev
);
487 static struct class ucb1x00_class
= {
489 .dev_release
= ucb1x00_release
,
492 static int ucb1x00_probe(struct mcp
*mcp
)
494 struct ucb1x00_plat_data
*pdata
= mcp
->attached_device
.platform_data
;
495 struct ucb1x00_driver
*drv
;
497 unsigned id
, i
, irq_base
;
500 /* Tell the platform to deassert the UCB1x00 reset */
501 if (pdata
&& pdata
->reset
)
502 pdata
->reset(UCB_RST_PROBE
);
505 id
= mcp_reg_read(mcp
, UCB_ID
);
508 if (id
!= UCB_ID_1200
&& id
!= UCB_ID_1300
&& id
!= UCB_ID_TC35143
) {
509 printk(KERN_WARNING
"UCB1x00 ID not found: %04x\n", id
);
513 ucb
= kzalloc(sizeof(struct ucb1x00
), GFP_KERNEL
);
518 device_initialize(&ucb
->dev
);
519 ucb
->dev
.class = &ucb1x00_class
;
520 ucb
->dev
.parent
= &mcp
->attached_device
;
521 dev_set_name(&ucb
->dev
, "ucb1x00");
523 raw_spin_lock_init(&ucb
->irq_lock
);
524 spin_lock_init(&ucb
->io_lock
);
525 mutex_init(&ucb
->adc_mutex
);
530 ret
= device_add(&ucb
->dev
);
535 ucb
->irq
= ucb1x00_detect_irq(ucb
);
536 ucb1x00_disable(ucb
);
538 dev_err(&ucb
->dev
, "IRQ probe failed\n");
544 irq_base
= pdata
? pdata
->irq_base
: 0;
545 ucb
->irq_base
= irq_alloc_descs(-1, irq_base
, 16, -1);
546 if (ucb
->irq_base
< 0) {
547 dev_err(&ucb
->dev
, "unable to allocate 16 irqs: %d\n",
553 for (i
= 0; i
< 16; i
++) {
554 unsigned irq
= ucb
->irq_base
+ i
;
556 irq_set_chip_and_handler(irq
, &ucb1x00_irqchip
, handle_edge_irq
);
557 irq_set_chip_data(irq
, ucb
);
558 irq_clear_status_flags(irq
, IRQ_NOREQUEST
);
561 irq_set_irq_type(ucb
->irq
, IRQ_TYPE_EDGE_RISING
);
562 irq_set_chained_handler_and_data(ucb
->irq
, ucb1x00_irq
, ucb
);
564 if (pdata
&& pdata
->gpio_base
) {
565 ucb
->gpio
.label
= dev_name(&ucb
->dev
);
566 ucb
->gpio
.parent
= &ucb
->dev
;
567 ucb
->gpio
.owner
= THIS_MODULE
;
568 ucb
->gpio
.base
= pdata
->gpio_base
;
569 ucb
->gpio
.ngpio
= 10;
570 ucb
->gpio
.set
= ucb1x00_gpio_set
;
571 ucb
->gpio
.get
= ucb1x00_gpio_get
;
572 ucb
->gpio
.direction_input
= ucb1x00_gpio_direction_input
;
573 ucb
->gpio
.direction_output
= ucb1x00_gpio_direction_output
;
574 ucb
->gpio
.to_irq
= ucb1x00_to_irq
;
575 ret
= gpiochip_add_data(&ucb
->gpio
, ucb
);
579 dev_info(&ucb
->dev
, "gpio_base not set so no gpiolib support");
581 mcp_set_drvdata(mcp
, ucb
);
584 device_set_wakeup_capable(&ucb
->dev
, pdata
->can_wakeup
);
586 INIT_LIST_HEAD(&ucb
->devs
);
587 mutex_lock(&ucb1x00_mutex
);
588 list_add_tail(&ucb
->node
, &ucb1x00_devices
);
589 list_for_each_entry(drv
, &ucb1x00_drivers
, node
) {
590 ucb1x00_add_dev(ucb
, drv
);
592 mutex_unlock(&ucb1x00_mutex
);
597 irq_set_chained_handler(ucb
->irq
, NULL
);
599 if (ucb
->irq_base
> 0)
600 irq_free_descs(ucb
->irq_base
, 16);
602 device_del(&ucb
->dev
);
604 put_device(&ucb
->dev
);
606 if (pdata
&& pdata
->reset
)
607 pdata
->reset(UCB_RST_PROBE_FAIL
);
611 static void ucb1x00_remove(struct mcp
*mcp
)
613 struct ucb1x00_plat_data
*pdata
= mcp
->attached_device
.platform_data
;
614 struct ucb1x00
*ucb
= mcp_get_drvdata(mcp
);
615 struct list_head
*l
, *n
;
617 mutex_lock(&ucb1x00_mutex
);
618 list_del(&ucb
->node
);
619 list_for_each_safe(l
, n
, &ucb
->devs
) {
620 struct ucb1x00_dev
*dev
= list_entry(l
, struct ucb1x00_dev
, dev_node
);
621 ucb1x00_remove_dev(dev
);
623 mutex_unlock(&ucb1x00_mutex
);
625 if (ucb
->gpio
.base
!= -1)
626 gpiochip_remove(&ucb
->gpio
);
628 irq_set_chained_handler(ucb
->irq
, NULL
);
629 irq_free_descs(ucb
->irq_base
, 16);
630 device_unregister(&ucb
->dev
);
632 if (pdata
&& pdata
->reset
)
633 pdata
->reset(UCB_RST_REMOVE
);
636 int ucb1x00_register_driver(struct ucb1x00_driver
*drv
)
640 INIT_LIST_HEAD(&drv
->devs
);
641 mutex_lock(&ucb1x00_mutex
);
642 list_add_tail(&drv
->node
, &ucb1x00_drivers
);
643 list_for_each_entry(ucb
, &ucb1x00_devices
, node
) {
644 ucb1x00_add_dev(ucb
, drv
);
646 mutex_unlock(&ucb1x00_mutex
);
650 void ucb1x00_unregister_driver(struct ucb1x00_driver
*drv
)
652 struct list_head
*n
, *l
;
654 mutex_lock(&ucb1x00_mutex
);
655 list_del(&drv
->node
);
656 list_for_each_safe(l
, n
, &drv
->devs
) {
657 struct ucb1x00_dev
*dev
= list_entry(l
, struct ucb1x00_dev
, drv_node
);
658 ucb1x00_remove_dev(dev
);
660 mutex_unlock(&ucb1x00_mutex
);
663 #ifdef CONFIG_PM_SLEEP
664 static int ucb1x00_suspend(struct device
*dev
)
666 struct ucb1x00_plat_data
*pdata
= dev_get_platdata(dev
);
667 struct ucb1x00
*ucb
= dev_get_drvdata(dev
);
668 struct ucb1x00_dev
*udev
;
670 mutex_lock(&ucb1x00_mutex
);
671 list_for_each_entry(udev
, &ucb
->devs
, dev_node
) {
672 if (udev
->drv
->suspend
)
673 udev
->drv
->suspend(udev
);
675 mutex_unlock(&ucb1x00_mutex
);
680 raw_spin_lock_irqsave(&ucb
->irq_lock
, flags
);
682 ucb1x00_reg_write(ucb
, UCB_IE_RIS
, ucb
->irq_ris_enbl
&
684 ucb1x00_reg_write(ucb
, UCB_IE_FAL
, ucb
->irq_fal_enbl
&
686 ucb1x00_disable(ucb
);
687 raw_spin_unlock_irqrestore(&ucb
->irq_lock
, flags
);
689 enable_irq_wake(ucb
->irq
);
690 } else if (pdata
&& pdata
->reset
)
691 pdata
->reset(UCB_RST_SUSPEND
);
696 static int ucb1x00_resume(struct device
*dev
)
698 struct ucb1x00_plat_data
*pdata
= dev_get_platdata(dev
);
699 struct ucb1x00
*ucb
= dev_get_drvdata(dev
);
700 struct ucb1x00_dev
*udev
;
702 if (!ucb
->irq_wake
&& pdata
&& pdata
->reset
)
703 pdata
->reset(UCB_RST_RESUME
);
706 ucb1x00_reg_write(ucb
, UCB_IO_DATA
, ucb
->io_out
);
707 ucb1x00_reg_write(ucb
, UCB_IO_DIR
, ucb
->io_dir
);
712 raw_spin_lock_irqsave(&ucb
->irq_lock
, flags
);
713 ucb1x00_reg_write(ucb
, UCB_IE_RIS
, ucb
->irq_ris_enbl
&
715 ucb1x00_reg_write(ucb
, UCB_IE_FAL
, ucb
->irq_fal_enbl
&
717 raw_spin_unlock_irqrestore(&ucb
->irq_lock
, flags
);
719 disable_irq_wake(ucb
->irq
);
721 ucb1x00_disable(ucb
);
723 mutex_lock(&ucb1x00_mutex
);
724 list_for_each_entry(udev
, &ucb
->devs
, dev_node
) {
725 if (udev
->drv
->resume
)
726 udev
->drv
->resume(udev
);
728 mutex_unlock(&ucb1x00_mutex
);
733 static SIMPLE_DEV_PM_OPS(ucb1x00_pm_ops
, ucb1x00_suspend
, ucb1x00_resume
);
735 static struct mcp_driver ucb1x00_driver
= {
738 .owner
= THIS_MODULE
,
739 .pm
= &ucb1x00_pm_ops
,
741 .probe
= ucb1x00_probe
,
742 .remove
= ucb1x00_remove
,
745 static int __init
ucb1x00_init(void)
747 int ret
= class_register(&ucb1x00_class
);
749 ret
= mcp_driver_register(&ucb1x00_driver
);
751 class_unregister(&ucb1x00_class
);
756 static void __exit
ucb1x00_exit(void)
758 mcp_driver_unregister(&ucb1x00_driver
);
759 class_unregister(&ucb1x00_class
);
762 module_init(ucb1x00_init
);
763 module_exit(ucb1x00_exit
);
765 EXPORT_SYMBOL(ucb1x00_io_set_dir
);
766 EXPORT_SYMBOL(ucb1x00_io_write
);
767 EXPORT_SYMBOL(ucb1x00_io_read
);
769 EXPORT_SYMBOL(ucb1x00_adc_enable
);
770 EXPORT_SYMBOL(ucb1x00_adc_read
);
771 EXPORT_SYMBOL(ucb1x00_adc_disable
);
773 EXPORT_SYMBOL(ucb1x00_register_driver
);
774 EXPORT_SYMBOL(ucb1x00_unregister_driver
);
776 MODULE_ALIAS("mcp:ucb1x00");
777 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
778 MODULE_DESCRIPTION("UCB1x00 core driver");
779 MODULE_LICENSE("GPL");