1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Motorola PCAP2 as present in EZX phones
5 * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
6 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/mfd/ezx-pcap.h>
15 #include <linux/spi/spi.h>
16 #include <linux/gpio.h>
17 #include <linux/slab.h>
19 #define PCAP_ADC_MAXQ 8
20 struct pcap_adc_request
{
24 void (*callback
)(void *, u16
[]);
28 struct pcap_adc_sync_request
{
30 struct completion completion
;
34 struct spi_device
*spi
;
38 struct mutex io_mutex
;
41 unsigned int irq_base
;
43 struct work_struct isr_work
;
44 struct work_struct msr_work
;
45 struct workqueue_struct
*workqueue
;
48 struct pcap_adc_request
*adc_queue
[PCAP_ADC_MAXQ
];
51 struct mutex adc_mutex
;
55 static int ezx_pcap_putget(struct pcap_chip
*pcap
, u32
*data
)
57 struct spi_transfer t
;
61 memset(&t
, 0, sizeof(t
));
64 spi_message_add_tail(&t
, &m
);
67 t
.tx_buf
= (u8
*) &pcap
->buf
;
68 t
.rx_buf
= (u8
*) &pcap
->buf
;
69 status
= spi_sync(pcap
->spi
, &m
);
77 int ezx_pcap_write(struct pcap_chip
*pcap
, u8 reg_num
, u32 value
)
81 mutex_lock(&pcap
->io_mutex
);
82 value
&= PCAP_REGISTER_VALUE_MASK
;
83 value
|= PCAP_REGISTER_WRITE_OP_BIT
84 | (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
85 ret
= ezx_pcap_putget(pcap
, &value
);
86 mutex_unlock(&pcap
->io_mutex
);
90 EXPORT_SYMBOL_GPL(ezx_pcap_write
);
92 int ezx_pcap_read(struct pcap_chip
*pcap
, u8 reg_num
, u32
*value
)
96 mutex_lock(&pcap
->io_mutex
);
97 *value
= PCAP_REGISTER_READ_OP_BIT
98 | (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
100 ret
= ezx_pcap_putget(pcap
, value
);
101 mutex_unlock(&pcap
->io_mutex
);
105 EXPORT_SYMBOL_GPL(ezx_pcap_read
);
107 int ezx_pcap_set_bits(struct pcap_chip
*pcap
, u8 reg_num
, u32 mask
, u32 val
)
110 u32 tmp
= PCAP_REGISTER_READ_OP_BIT
|
111 (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
113 mutex_lock(&pcap
->io_mutex
);
114 ret
= ezx_pcap_putget(pcap
, &tmp
);
118 tmp
&= (PCAP_REGISTER_VALUE_MASK
& ~mask
);
119 tmp
|= (val
& mask
) | PCAP_REGISTER_WRITE_OP_BIT
|
120 (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
122 ret
= ezx_pcap_putget(pcap
, &tmp
);
124 mutex_unlock(&pcap
->io_mutex
);
128 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits
);
131 int irq_to_pcap(struct pcap_chip
*pcap
, int irq
)
133 return irq
- pcap
->irq_base
;
135 EXPORT_SYMBOL_GPL(irq_to_pcap
);
137 int pcap_to_irq(struct pcap_chip
*pcap
, int irq
)
139 return pcap
->irq_base
+ irq
;
141 EXPORT_SYMBOL_GPL(pcap_to_irq
);
143 static void pcap_mask_irq(struct irq_data
*d
)
145 struct pcap_chip
*pcap
= irq_data_get_irq_chip_data(d
);
147 pcap
->msr
|= 1 << irq_to_pcap(pcap
, d
->irq
);
148 queue_work(pcap
->workqueue
, &pcap
->msr_work
);
151 static void pcap_unmask_irq(struct irq_data
*d
)
153 struct pcap_chip
*pcap
= irq_data_get_irq_chip_data(d
);
155 pcap
->msr
&= ~(1 << irq_to_pcap(pcap
, d
->irq
));
156 queue_work(pcap
->workqueue
, &pcap
->msr_work
);
159 static struct irq_chip pcap_irq_chip
= {
161 .irq_disable
= pcap_mask_irq
,
162 .irq_mask
= pcap_mask_irq
,
163 .irq_unmask
= pcap_unmask_irq
,
166 static void pcap_msr_work(struct work_struct
*work
)
168 struct pcap_chip
*pcap
= container_of(work
, struct pcap_chip
, msr_work
);
170 ezx_pcap_write(pcap
, PCAP_REG_MSR
, pcap
->msr
);
173 static void pcap_isr_work(struct work_struct
*work
)
175 struct pcap_chip
*pcap
= container_of(work
, struct pcap_chip
, isr_work
);
176 struct pcap_platform_data
*pdata
= dev_get_platdata(&pcap
->spi
->dev
);
177 u32 msr
, isr
, int_sel
, service
;
181 ezx_pcap_read(pcap
, PCAP_REG_MSR
, &msr
);
182 ezx_pcap_read(pcap
, PCAP_REG_ISR
, &isr
);
184 /* We can't service/ack irqs that are assigned to port 2 */
185 if (!(pdata
->config
& PCAP_SECOND_PORT
)) {
186 ezx_pcap_read(pcap
, PCAP_REG_INT_SEL
, &int_sel
);
190 ezx_pcap_write(pcap
, PCAP_REG_MSR
, isr
| msr
);
191 ezx_pcap_write(pcap
, PCAP_REG_ISR
, isr
);
194 service
= isr
& ~msr
;
195 for (irq
= pcap
->irq_base
; service
; service
>>= 1, irq
++) {
197 generic_handle_irq(irq
);
200 ezx_pcap_write(pcap
, PCAP_REG_MSR
, pcap
->msr
);
201 } while (gpio_get_value(pdata
->gpio
));
204 static void pcap_irq_handler(struct irq_desc
*desc
)
206 struct pcap_chip
*pcap
= irq_desc_get_handler_data(desc
);
208 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
209 queue_work(pcap
->workqueue
, &pcap
->isr_work
);
213 void pcap_set_ts_bits(struct pcap_chip
*pcap
, u32 bits
)
217 mutex_lock(&pcap
->adc_mutex
);
218 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
219 tmp
&= ~(PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
220 tmp
|= bits
& (PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
221 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
222 mutex_unlock(&pcap
->adc_mutex
);
224 EXPORT_SYMBOL_GPL(pcap_set_ts_bits
);
226 static void pcap_disable_adc(struct pcap_chip
*pcap
)
230 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
231 tmp
&= ~(PCAP_ADC_ADEN
|PCAP_ADC_BATT_I_ADC
|PCAP_ADC_BATT_I_POLARITY
);
232 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
235 static void pcap_adc_trigger(struct pcap_chip
*pcap
)
240 mutex_lock(&pcap
->adc_mutex
);
241 head
= pcap
->adc_head
;
242 if (!pcap
->adc_queue
[head
]) {
243 /* queue is empty, save power */
244 pcap_disable_adc(pcap
);
245 mutex_unlock(&pcap
->adc_mutex
);
248 /* start conversion on requested bank, save TS_M bits */
249 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
250 tmp
&= (PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
251 tmp
|= pcap
->adc_queue
[head
]->flags
| PCAP_ADC_ADEN
;
253 if (pcap
->adc_queue
[head
]->bank
== PCAP_ADC_BANK_1
)
254 tmp
|= PCAP_ADC_AD_SEL1
;
256 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
257 mutex_unlock(&pcap
->adc_mutex
);
258 ezx_pcap_write(pcap
, PCAP_REG_ADR
, PCAP_ADR_ASC
);
261 static irqreturn_t
pcap_adc_irq(int irq
, void *_pcap
)
263 struct pcap_chip
*pcap
= _pcap
;
264 struct pcap_adc_request
*req
;
268 mutex_lock(&pcap
->adc_mutex
);
269 req
= pcap
->adc_queue
[pcap
->adc_head
];
271 if (WARN(!req
, "adc irq without pending request\n")) {
272 mutex_unlock(&pcap
->adc_mutex
);
276 /* read requested channels results */
277 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
278 tmp
&= ~(PCAP_ADC_ADA1_MASK
| PCAP_ADC_ADA2_MASK
);
279 tmp
|= (req
->ch
[0] << PCAP_ADC_ADA1_SHIFT
);
280 tmp
|= (req
->ch
[1] << PCAP_ADC_ADA2_SHIFT
);
281 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
282 ezx_pcap_read(pcap
, PCAP_REG_ADR
, &tmp
);
283 res
[0] = (tmp
& PCAP_ADR_ADD1_MASK
) >> PCAP_ADR_ADD1_SHIFT
;
284 res
[1] = (tmp
& PCAP_ADR_ADD2_MASK
) >> PCAP_ADR_ADD2_SHIFT
;
286 pcap
->adc_queue
[pcap
->adc_head
] = NULL
;
287 pcap
->adc_head
= (pcap
->adc_head
+ 1) & (PCAP_ADC_MAXQ
- 1);
288 mutex_unlock(&pcap
->adc_mutex
);
290 /* pass the results and release memory */
291 req
->callback(req
->data
, res
);
294 /* trigger next conversion (if any) on queue */
295 pcap_adc_trigger(pcap
);
300 int pcap_adc_async(struct pcap_chip
*pcap
, u8 bank
, u32 flags
, u8 ch
[],
301 void *callback
, void *data
)
303 struct pcap_adc_request
*req
;
305 /* This will be freed after we have a result */
306 req
= kmalloc(sizeof(struct pcap_adc_request
), GFP_KERNEL
);
314 req
->callback
= callback
;
317 mutex_lock(&pcap
->adc_mutex
);
318 if (pcap
->adc_queue
[pcap
->adc_tail
]) {
319 mutex_unlock(&pcap
->adc_mutex
);
323 pcap
->adc_queue
[pcap
->adc_tail
] = req
;
324 pcap
->adc_tail
= (pcap
->adc_tail
+ 1) & (PCAP_ADC_MAXQ
- 1);
325 mutex_unlock(&pcap
->adc_mutex
);
327 /* start conversion */
328 pcap_adc_trigger(pcap
);
332 EXPORT_SYMBOL_GPL(pcap_adc_async
);
334 static void pcap_adc_sync_cb(void *param
, u16 res
[])
336 struct pcap_adc_sync_request
*req
= param
;
338 req
->res
[0] = res
[0];
339 req
->res
[1] = res
[1];
340 complete(&req
->completion
);
343 int pcap_adc_sync(struct pcap_chip
*pcap
, u8 bank
, u32 flags
, u8 ch
[],
346 struct pcap_adc_sync_request sync_data
;
349 init_completion(&sync_data
.completion
);
350 ret
= pcap_adc_async(pcap
, bank
, flags
, ch
, pcap_adc_sync_cb
,
354 wait_for_completion(&sync_data
.completion
);
355 res
[0] = sync_data
.res
[0];
356 res
[1] = sync_data
.res
[1];
360 EXPORT_SYMBOL_GPL(pcap_adc_sync
);
363 static int pcap_remove_subdev(struct device
*dev
, void *unused
)
365 platform_device_unregister(to_platform_device(dev
));
369 static int pcap_add_subdev(struct pcap_chip
*pcap
,
370 struct pcap_subdev
*subdev
)
372 struct platform_device
*pdev
;
375 pdev
= platform_device_alloc(subdev
->name
, subdev
->id
);
379 pdev
->dev
.parent
= &pcap
->spi
->dev
;
380 pdev
->dev
.platform_data
= subdev
->platform_data
;
382 ret
= platform_device_add(pdev
);
384 platform_device_put(pdev
);
389 static int ezx_pcap_remove(struct spi_device
*spi
)
391 struct pcap_chip
*pcap
= spi_get_drvdata(spi
);
394 /* remove all registered subdevs */
395 device_for_each_child(&spi
->dev
, NULL
, pcap_remove_subdev
);
398 mutex_lock(&pcap
->adc_mutex
);
399 for (i
= 0; i
< PCAP_ADC_MAXQ
; i
++)
400 kfree(pcap
->adc_queue
[i
]);
401 mutex_unlock(&pcap
->adc_mutex
);
403 /* cleanup irqchip */
404 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++)
405 irq_set_chip_and_handler(i
, NULL
, NULL
);
407 destroy_workqueue(pcap
->workqueue
);
412 static int ezx_pcap_probe(struct spi_device
*spi
)
414 struct pcap_platform_data
*pdata
= dev_get_platdata(&spi
->dev
);
415 struct pcap_chip
*pcap
;
419 /* platform data is required */
423 pcap
= devm_kzalloc(&spi
->dev
, sizeof(*pcap
), GFP_KERNEL
);
429 mutex_init(&pcap
->io_mutex
);
430 mutex_init(&pcap
->adc_mutex
);
431 INIT_WORK(&pcap
->isr_work
, pcap_isr_work
);
432 INIT_WORK(&pcap
->msr_work
, pcap_msr_work
);
433 spi_set_drvdata(spi
, pcap
);
436 spi
->bits_per_word
= 32;
437 spi
->mode
= SPI_MODE_0
| (pdata
->config
& PCAP_CS_AH
? SPI_CS_HIGH
: 0);
438 ret
= spi_setup(spi
);
445 pcap
->irq_base
= pdata
->irq_base
;
446 pcap
->workqueue
= create_singlethread_workqueue("pcapd");
447 if (!pcap
->workqueue
) {
449 dev_err(&spi
->dev
, "can't create pcap thread\n");
453 /* redirect interrupts to AP, except adcdone2 */
454 if (!(pdata
->config
& PCAP_SECOND_PORT
))
455 ezx_pcap_write(pcap
, PCAP_REG_INT_SEL
,
456 (1 << PCAP_IRQ_ADCDONE2
));
459 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++) {
460 irq_set_chip_and_handler(i
, &pcap_irq_chip
, handle_simple_irq
);
461 irq_set_chip_data(i
, pcap
);
462 irq_clear_status_flags(i
, IRQ_NOREQUEST
| IRQ_NOPROBE
);
465 /* mask/ack all PCAP interrupts */
466 ezx_pcap_write(pcap
, PCAP_REG_MSR
, PCAP_MASK_ALL_INTERRUPT
);
467 ezx_pcap_write(pcap
, PCAP_REG_ISR
, PCAP_CLEAR_INTERRUPT_REGISTER
);
468 pcap
->msr
= PCAP_MASK_ALL_INTERRUPT
;
470 irq_set_irq_type(spi
->irq
, IRQ_TYPE_EDGE_RISING
);
471 irq_set_chained_handler_and_data(spi
->irq
, pcap_irq_handler
, pcap
);
472 irq_set_irq_wake(spi
->irq
, 1);
475 adc_irq
= pcap_to_irq(pcap
, (pdata
->config
& PCAP_SECOND_PORT
) ?
476 PCAP_IRQ_ADCDONE2
: PCAP_IRQ_ADCDONE
);
478 ret
= devm_request_irq(&spi
->dev
, adc_irq
, pcap_adc_irq
, 0, "ADC",
484 for (i
= 0; i
< pdata
->num_subdevs
; i
++) {
485 ret
= pcap_add_subdev(pcap
, &pdata
->subdevs
[i
]);
490 /* board specific quirks */
497 device_for_each_child(&spi
->dev
, NULL
, pcap_remove_subdev
);
499 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++)
500 irq_set_chip_and_handler(i
, NULL
, NULL
);
501 /* destroy_workqueue: */
502 destroy_workqueue(pcap
->workqueue
);
507 static struct spi_driver ezxpcap_driver
= {
508 .probe
= ezx_pcap_probe
,
509 .remove
= ezx_pcap_remove
,
515 static int __init
ezx_pcap_init(void)
517 return spi_register_driver(&ezxpcap_driver
);
520 static void __exit
ezx_pcap_exit(void)
522 spi_unregister_driver(&ezxpcap_driver
);
525 subsys_initcall(ezx_pcap_init
);
526 module_exit(ezx_pcap_exit
);
528 MODULE_LICENSE("GPL");
529 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
530 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
531 MODULE_ALIAS("spi:ezx-pcap");