2 * Driver for Motorola PCAP2 as present in EZX phones
4 * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
5 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/mfd/ezx-pcap.h>
19 #include <linux/spi/spi.h>
20 #include <linux/gpio.h>
21 #include <linux/slab.h>
23 #define PCAP_ADC_MAXQ 8
24 struct pcap_adc_request
{
28 void (*callback
)(void *, u16
[]);
32 struct pcap_adc_sync_request
{
34 struct completion completion
;
38 struct spi_device
*spi
;
42 struct mutex io_mutex
;
45 unsigned int irq_base
;
47 struct work_struct isr_work
;
48 struct work_struct msr_work
;
49 struct workqueue_struct
*workqueue
;
52 struct pcap_adc_request
*adc_queue
[PCAP_ADC_MAXQ
];
55 struct mutex adc_mutex
;
59 static int ezx_pcap_putget(struct pcap_chip
*pcap
, u32
*data
)
61 struct spi_transfer t
;
65 memset(&t
, 0, sizeof t
);
68 spi_message_add_tail(&t
, &m
);
71 t
.tx_buf
= (u8
*) &pcap
->buf
;
72 t
.rx_buf
= (u8
*) &pcap
->buf
;
73 status
= spi_sync(pcap
->spi
, &m
);
81 int ezx_pcap_write(struct pcap_chip
*pcap
, u8 reg_num
, u32 value
)
85 mutex_lock(&pcap
->io_mutex
);
86 value
&= PCAP_REGISTER_VALUE_MASK
;
87 value
|= PCAP_REGISTER_WRITE_OP_BIT
88 | (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
89 ret
= ezx_pcap_putget(pcap
, &value
);
90 mutex_unlock(&pcap
->io_mutex
);
94 EXPORT_SYMBOL_GPL(ezx_pcap_write
);
96 int ezx_pcap_read(struct pcap_chip
*pcap
, u8 reg_num
, u32
*value
)
100 mutex_lock(&pcap
->io_mutex
);
101 *value
= PCAP_REGISTER_READ_OP_BIT
102 | (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
104 ret
= ezx_pcap_putget(pcap
, value
);
105 mutex_unlock(&pcap
->io_mutex
);
109 EXPORT_SYMBOL_GPL(ezx_pcap_read
);
111 int ezx_pcap_set_bits(struct pcap_chip
*pcap
, u8 reg_num
, u32 mask
, u32 val
)
114 u32 tmp
= PCAP_REGISTER_READ_OP_BIT
|
115 (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
117 mutex_lock(&pcap
->io_mutex
);
118 ret
= ezx_pcap_putget(pcap
, &tmp
);
122 tmp
&= (PCAP_REGISTER_VALUE_MASK
& ~mask
);
123 tmp
|= (val
& mask
) | PCAP_REGISTER_WRITE_OP_BIT
|
124 (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
126 ret
= ezx_pcap_putget(pcap
, &tmp
);
128 mutex_unlock(&pcap
->io_mutex
);
132 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits
);
135 int irq_to_pcap(struct pcap_chip
*pcap
, int irq
)
137 return irq
- pcap
->irq_base
;
139 EXPORT_SYMBOL_GPL(irq_to_pcap
);
141 int pcap_to_irq(struct pcap_chip
*pcap
, int irq
)
143 return pcap
->irq_base
+ irq
;
145 EXPORT_SYMBOL_GPL(pcap_to_irq
);
147 static void pcap_mask_irq(struct irq_data
*d
)
149 struct pcap_chip
*pcap
= irq_data_get_irq_chip_data(d
);
151 pcap
->msr
|= 1 << irq_to_pcap(pcap
, d
->irq
);
152 queue_work(pcap
->workqueue
, &pcap
->msr_work
);
155 static void pcap_unmask_irq(struct irq_data
*d
)
157 struct pcap_chip
*pcap
= irq_data_get_irq_chip_data(d
);
159 pcap
->msr
&= ~(1 << irq_to_pcap(pcap
, d
->irq
));
160 queue_work(pcap
->workqueue
, &pcap
->msr_work
);
163 static struct irq_chip pcap_irq_chip
= {
165 .irq_disable
= pcap_mask_irq
,
166 .irq_mask
= pcap_mask_irq
,
167 .irq_unmask
= pcap_unmask_irq
,
170 static void pcap_msr_work(struct work_struct
*work
)
172 struct pcap_chip
*pcap
= container_of(work
, struct pcap_chip
, msr_work
);
174 ezx_pcap_write(pcap
, PCAP_REG_MSR
, pcap
->msr
);
177 static void pcap_isr_work(struct work_struct
*work
)
179 struct pcap_chip
*pcap
= container_of(work
, struct pcap_chip
, isr_work
);
180 struct pcap_platform_data
*pdata
= dev_get_platdata(&pcap
->spi
->dev
);
181 u32 msr
, isr
, int_sel
, service
;
185 ezx_pcap_read(pcap
, PCAP_REG_MSR
, &msr
);
186 ezx_pcap_read(pcap
, PCAP_REG_ISR
, &isr
);
188 /* We can't service/ack irqs that are assigned to port 2 */
189 if (!(pdata
->config
& PCAP_SECOND_PORT
)) {
190 ezx_pcap_read(pcap
, PCAP_REG_INT_SEL
, &int_sel
);
194 ezx_pcap_write(pcap
, PCAP_REG_MSR
, isr
| msr
);
195 ezx_pcap_write(pcap
, PCAP_REG_ISR
, isr
);
198 service
= isr
& ~msr
;
199 for (irq
= pcap
->irq_base
; service
; service
>>= 1, irq
++) {
201 generic_handle_irq(irq
);
204 ezx_pcap_write(pcap
, PCAP_REG_MSR
, pcap
->msr
);
205 } while (gpio_get_value(pdata
->gpio
));
208 static void pcap_irq_handler(unsigned int irq
, struct irq_desc
*desc
)
210 struct pcap_chip
*pcap
= irq_get_handler_data(irq
);
212 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
213 queue_work(pcap
->workqueue
, &pcap
->isr_work
);
218 void pcap_set_ts_bits(struct pcap_chip
*pcap
, u32 bits
)
222 mutex_lock(&pcap
->adc_mutex
);
223 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
224 tmp
&= ~(PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
225 tmp
|= bits
& (PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
226 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
227 mutex_unlock(&pcap
->adc_mutex
);
229 EXPORT_SYMBOL_GPL(pcap_set_ts_bits
);
231 static void pcap_disable_adc(struct pcap_chip
*pcap
)
235 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
236 tmp
&= ~(PCAP_ADC_ADEN
|PCAP_ADC_BATT_I_ADC
|PCAP_ADC_BATT_I_POLARITY
);
237 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
240 static void pcap_adc_trigger(struct pcap_chip
*pcap
)
245 mutex_lock(&pcap
->adc_mutex
);
246 head
= pcap
->adc_head
;
247 if (!pcap
->adc_queue
[head
]) {
248 /* queue is empty, save power */
249 pcap_disable_adc(pcap
);
250 mutex_unlock(&pcap
->adc_mutex
);
253 /* start conversion on requested bank, save TS_M bits */
254 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
255 tmp
&= (PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
256 tmp
|= pcap
->adc_queue
[head
]->flags
| PCAP_ADC_ADEN
;
258 if (pcap
->adc_queue
[head
]->bank
== PCAP_ADC_BANK_1
)
259 tmp
|= PCAP_ADC_AD_SEL1
;
261 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
262 mutex_unlock(&pcap
->adc_mutex
);
263 ezx_pcap_write(pcap
, PCAP_REG_ADR
, PCAP_ADR_ASC
);
266 static irqreturn_t
pcap_adc_irq(int irq
, void *_pcap
)
268 struct pcap_chip
*pcap
= _pcap
;
269 struct pcap_adc_request
*req
;
273 mutex_lock(&pcap
->adc_mutex
);
274 req
= pcap
->adc_queue
[pcap
->adc_head
];
276 if (WARN(!req
, "adc irq without pending request\n")) {
277 mutex_unlock(&pcap
->adc_mutex
);
281 /* read requested channels results */
282 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
283 tmp
&= ~(PCAP_ADC_ADA1_MASK
| PCAP_ADC_ADA2_MASK
);
284 tmp
|= (req
->ch
[0] << PCAP_ADC_ADA1_SHIFT
);
285 tmp
|= (req
->ch
[1] << PCAP_ADC_ADA2_SHIFT
);
286 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
287 ezx_pcap_read(pcap
, PCAP_REG_ADR
, &tmp
);
288 res
[0] = (tmp
& PCAP_ADR_ADD1_MASK
) >> PCAP_ADR_ADD1_SHIFT
;
289 res
[1] = (tmp
& PCAP_ADR_ADD2_MASK
) >> PCAP_ADR_ADD2_SHIFT
;
291 pcap
->adc_queue
[pcap
->adc_head
] = NULL
;
292 pcap
->adc_head
= (pcap
->adc_head
+ 1) & (PCAP_ADC_MAXQ
- 1);
293 mutex_unlock(&pcap
->adc_mutex
);
295 /* pass the results and release memory */
296 req
->callback(req
->data
, res
);
299 /* trigger next conversion (if any) on queue */
300 pcap_adc_trigger(pcap
);
305 int pcap_adc_async(struct pcap_chip
*pcap
, u8 bank
, u32 flags
, u8 ch
[],
306 void *callback
, void *data
)
308 struct pcap_adc_request
*req
;
310 /* This will be freed after we have a result */
311 req
= kmalloc(sizeof(struct pcap_adc_request
), GFP_KERNEL
);
319 req
->callback
= callback
;
322 mutex_lock(&pcap
->adc_mutex
);
323 if (pcap
->adc_queue
[pcap
->adc_tail
]) {
324 mutex_unlock(&pcap
->adc_mutex
);
328 pcap
->adc_queue
[pcap
->adc_tail
] = req
;
329 pcap
->adc_tail
= (pcap
->adc_tail
+ 1) & (PCAP_ADC_MAXQ
- 1);
330 mutex_unlock(&pcap
->adc_mutex
);
332 /* start conversion */
333 pcap_adc_trigger(pcap
);
337 EXPORT_SYMBOL_GPL(pcap_adc_async
);
339 static void pcap_adc_sync_cb(void *param
, u16 res
[])
341 struct pcap_adc_sync_request
*req
= param
;
343 req
->res
[0] = res
[0];
344 req
->res
[1] = res
[1];
345 complete(&req
->completion
);
348 int pcap_adc_sync(struct pcap_chip
*pcap
, u8 bank
, u32 flags
, u8 ch
[],
351 struct pcap_adc_sync_request sync_data
;
354 init_completion(&sync_data
.completion
);
355 ret
= pcap_adc_async(pcap
, bank
, flags
, ch
, pcap_adc_sync_cb
,
359 wait_for_completion(&sync_data
.completion
);
360 res
[0] = sync_data
.res
[0];
361 res
[1] = sync_data
.res
[1];
365 EXPORT_SYMBOL_GPL(pcap_adc_sync
);
368 static int pcap_remove_subdev(struct device
*dev
, void *unused
)
370 platform_device_unregister(to_platform_device(dev
));
374 static int pcap_add_subdev(struct pcap_chip
*pcap
,
375 struct pcap_subdev
*subdev
)
377 struct platform_device
*pdev
;
380 pdev
= platform_device_alloc(subdev
->name
, subdev
->id
);
384 pdev
->dev
.parent
= &pcap
->spi
->dev
;
385 pdev
->dev
.platform_data
= subdev
->platform_data
;
387 ret
= platform_device_add(pdev
);
389 platform_device_put(pdev
);
394 static int ezx_pcap_remove(struct spi_device
*spi
)
396 struct pcap_chip
*pcap
= spi_get_drvdata(spi
);
399 /* remove all registered subdevs */
400 device_for_each_child(&spi
->dev
, NULL
, pcap_remove_subdev
);
403 mutex_lock(&pcap
->adc_mutex
);
404 for (i
= 0; i
< PCAP_ADC_MAXQ
; i
++)
405 kfree(pcap
->adc_queue
[i
]);
406 mutex_unlock(&pcap
->adc_mutex
);
408 /* cleanup irqchip */
409 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++)
410 irq_set_chip_and_handler(i
, NULL
, NULL
);
412 destroy_workqueue(pcap
->workqueue
);
417 static int ezx_pcap_probe(struct spi_device
*spi
)
419 struct pcap_platform_data
*pdata
= dev_get_platdata(&spi
->dev
);
420 struct pcap_chip
*pcap
;
424 /* platform data is required */
428 pcap
= devm_kzalloc(&spi
->dev
, sizeof(*pcap
), GFP_KERNEL
);
434 mutex_init(&pcap
->io_mutex
);
435 mutex_init(&pcap
->adc_mutex
);
436 INIT_WORK(&pcap
->isr_work
, pcap_isr_work
);
437 INIT_WORK(&pcap
->msr_work
, pcap_msr_work
);
438 spi_set_drvdata(spi
, pcap
);
441 spi
->bits_per_word
= 32;
442 spi
->mode
= SPI_MODE_0
| (pdata
->config
& PCAP_CS_AH
? SPI_CS_HIGH
: 0);
443 ret
= spi_setup(spi
);
450 pcap
->irq_base
= pdata
->irq_base
;
451 pcap
->workqueue
= create_singlethread_workqueue("pcapd");
452 if (!pcap
->workqueue
) {
454 dev_err(&spi
->dev
, "can't create pcap thread\n");
458 /* redirect interrupts to AP, except adcdone2 */
459 if (!(pdata
->config
& PCAP_SECOND_PORT
))
460 ezx_pcap_write(pcap
, PCAP_REG_INT_SEL
,
461 (1 << PCAP_IRQ_ADCDONE2
));
464 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++) {
465 irq_set_chip_and_handler(i
, &pcap_irq_chip
, handle_simple_irq
);
466 irq_set_chip_data(i
, pcap
);
468 set_irq_flags(i
, IRQF_VALID
);
474 /* mask/ack all PCAP interrupts */
475 ezx_pcap_write(pcap
, PCAP_REG_MSR
, PCAP_MASK_ALL_INTERRUPT
);
476 ezx_pcap_write(pcap
, PCAP_REG_ISR
, PCAP_CLEAR_INTERRUPT_REGISTER
);
477 pcap
->msr
= PCAP_MASK_ALL_INTERRUPT
;
479 irq_set_irq_type(spi
->irq
, IRQ_TYPE_EDGE_RISING
);
480 irq_set_handler_data(spi
->irq
, pcap
);
481 irq_set_chained_handler(spi
->irq
, pcap_irq_handler
);
482 irq_set_irq_wake(spi
->irq
, 1);
485 adc_irq
= pcap_to_irq(pcap
, (pdata
->config
& PCAP_SECOND_PORT
) ?
486 PCAP_IRQ_ADCDONE2
: PCAP_IRQ_ADCDONE
);
488 ret
= devm_request_irq(&spi
->dev
, adc_irq
, pcap_adc_irq
, 0, "ADC",
494 for (i
= 0; i
< pdata
->num_subdevs
; i
++) {
495 ret
= pcap_add_subdev(pcap
, &pdata
->subdevs
[i
]);
500 /* board specific quirks */
507 device_for_each_child(&spi
->dev
, NULL
, pcap_remove_subdev
);
509 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++)
510 irq_set_chip_and_handler(i
, NULL
, NULL
);
511 /* destroy_workqueue: */
512 destroy_workqueue(pcap
->workqueue
);
517 static struct spi_driver ezxpcap_driver
= {
518 .probe
= ezx_pcap_probe
,
519 .remove
= ezx_pcap_remove
,
522 .owner
= THIS_MODULE
,
526 static int __init
ezx_pcap_init(void)
528 return spi_register_driver(&ezxpcap_driver
);
531 static void __exit
ezx_pcap_exit(void)
533 spi_unregister_driver(&ezxpcap_driver
);
536 subsys_initcall(ezx_pcap_init
);
537 module_exit(ezx_pcap_exit
);
539 MODULE_LICENSE("GPL");
540 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
541 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
542 MODULE_ALIAS("spi:ezx-pcap");