1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Motorola PCAP2 as present in EZX phones
5 * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
6 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/mfd/ezx-pcap.h>
15 #include <linux/spi/spi.h>
16 #include <linux/gpio.h>
17 #include <linux/slab.h>
19 #define PCAP_ADC_MAXQ 8
20 struct pcap_adc_request
{
24 void (*callback
)(void *, u16
[]);
28 struct pcap_adc_sync_request
{
30 struct completion completion
;
34 struct spi_device
*spi
;
41 unsigned int irq_base
;
43 struct work_struct isr_work
;
44 struct work_struct msr_work
;
45 struct workqueue_struct
*workqueue
;
48 struct pcap_adc_request
*adc_queue
[PCAP_ADC_MAXQ
];
55 static int ezx_pcap_putget(struct pcap_chip
*pcap
, u32
*data
)
57 struct spi_transfer t
;
61 memset(&t
, 0, sizeof(t
));
64 spi_message_add_tail(&t
, &m
);
67 t
.tx_buf
= (u8
*) &pcap
->buf
;
68 t
.rx_buf
= (u8
*) &pcap
->buf
;
69 status
= spi_sync(pcap
->spi
, &m
);
77 int ezx_pcap_write(struct pcap_chip
*pcap
, u8 reg_num
, u32 value
)
82 spin_lock_irqsave(&pcap
->io_lock
, flags
);
83 value
&= PCAP_REGISTER_VALUE_MASK
;
84 value
|= PCAP_REGISTER_WRITE_OP_BIT
85 | (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
86 ret
= ezx_pcap_putget(pcap
, &value
);
87 spin_unlock_irqrestore(&pcap
->io_lock
, flags
);
91 EXPORT_SYMBOL_GPL(ezx_pcap_write
);
93 int ezx_pcap_read(struct pcap_chip
*pcap
, u8 reg_num
, u32
*value
)
98 spin_lock_irqsave(&pcap
->io_lock
, flags
);
99 *value
= PCAP_REGISTER_READ_OP_BIT
100 | (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
102 ret
= ezx_pcap_putget(pcap
, value
);
103 spin_unlock_irqrestore(&pcap
->io_lock
, flags
);
107 EXPORT_SYMBOL_GPL(ezx_pcap_read
);
109 int ezx_pcap_set_bits(struct pcap_chip
*pcap
, u8 reg_num
, u32 mask
, u32 val
)
113 u32 tmp
= PCAP_REGISTER_READ_OP_BIT
|
114 (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
116 spin_lock_irqsave(&pcap
->io_lock
, flags
);
117 ret
= ezx_pcap_putget(pcap
, &tmp
);
121 tmp
&= (PCAP_REGISTER_VALUE_MASK
& ~mask
);
122 tmp
|= (val
& mask
) | PCAP_REGISTER_WRITE_OP_BIT
|
123 (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
125 ret
= ezx_pcap_putget(pcap
, &tmp
);
127 spin_unlock_irqrestore(&pcap
->io_lock
, flags
);
131 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits
);
134 int irq_to_pcap(struct pcap_chip
*pcap
, int irq
)
136 return irq
- pcap
->irq_base
;
138 EXPORT_SYMBOL_GPL(irq_to_pcap
);
140 int pcap_to_irq(struct pcap_chip
*pcap
, int irq
)
142 return pcap
->irq_base
+ irq
;
144 EXPORT_SYMBOL_GPL(pcap_to_irq
);
146 static void pcap_mask_irq(struct irq_data
*d
)
148 struct pcap_chip
*pcap
= irq_data_get_irq_chip_data(d
);
150 pcap
->msr
|= 1 << irq_to_pcap(pcap
, d
->irq
);
151 queue_work(pcap
->workqueue
, &pcap
->msr_work
);
154 static void pcap_unmask_irq(struct irq_data
*d
)
156 struct pcap_chip
*pcap
= irq_data_get_irq_chip_data(d
);
158 pcap
->msr
&= ~(1 << irq_to_pcap(pcap
, d
->irq
));
159 queue_work(pcap
->workqueue
, &pcap
->msr_work
);
162 static struct irq_chip pcap_irq_chip
= {
164 .irq_disable
= pcap_mask_irq
,
165 .irq_mask
= pcap_mask_irq
,
166 .irq_unmask
= pcap_unmask_irq
,
169 static void pcap_msr_work(struct work_struct
*work
)
171 struct pcap_chip
*pcap
= container_of(work
, struct pcap_chip
, msr_work
);
173 ezx_pcap_write(pcap
, PCAP_REG_MSR
, pcap
->msr
);
176 static void pcap_isr_work(struct work_struct
*work
)
178 struct pcap_chip
*pcap
= container_of(work
, struct pcap_chip
, isr_work
);
179 struct pcap_platform_data
*pdata
= dev_get_platdata(&pcap
->spi
->dev
);
180 u32 msr
, isr
, int_sel
, service
;
184 ezx_pcap_read(pcap
, PCAP_REG_MSR
, &msr
);
185 ezx_pcap_read(pcap
, PCAP_REG_ISR
, &isr
);
187 /* We can't service/ack irqs that are assigned to port 2 */
188 if (!(pdata
->config
& PCAP_SECOND_PORT
)) {
189 ezx_pcap_read(pcap
, PCAP_REG_INT_SEL
, &int_sel
);
193 ezx_pcap_write(pcap
, PCAP_REG_MSR
, isr
| msr
);
194 ezx_pcap_write(pcap
, PCAP_REG_ISR
, isr
);
196 service
= isr
& ~msr
;
197 for (irq
= pcap
->irq_base
; service
; service
>>= 1, irq
++) {
199 generic_handle_irq_safe(irq
);
201 ezx_pcap_write(pcap
, PCAP_REG_MSR
, pcap
->msr
);
202 } while (gpio_get_value(pdata
->gpio
));
205 static void pcap_irq_handler(struct irq_desc
*desc
)
207 struct pcap_chip
*pcap
= irq_desc_get_handler_data(desc
);
209 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
210 queue_work(pcap
->workqueue
, &pcap
->isr_work
);
214 void pcap_set_ts_bits(struct pcap_chip
*pcap
, u32 bits
)
219 spin_lock_irqsave(&pcap
->adc_lock
, flags
);
220 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
221 tmp
&= ~(PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
222 tmp
|= bits
& (PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
223 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
224 spin_unlock_irqrestore(&pcap
->adc_lock
, flags
);
226 EXPORT_SYMBOL_GPL(pcap_set_ts_bits
);
228 static void pcap_disable_adc(struct pcap_chip
*pcap
)
232 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
233 tmp
&= ~(PCAP_ADC_ADEN
|PCAP_ADC_BATT_I_ADC
|PCAP_ADC_BATT_I_POLARITY
);
234 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
237 static void pcap_adc_trigger(struct pcap_chip
*pcap
)
243 spin_lock_irqsave(&pcap
->adc_lock
, flags
);
244 head
= pcap
->adc_head
;
245 if (!pcap
->adc_queue
[head
]) {
246 /* queue is empty, save power */
247 pcap_disable_adc(pcap
);
248 spin_unlock_irqrestore(&pcap
->adc_lock
, flags
);
251 /* start conversion on requested bank, save TS_M bits */
252 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
253 tmp
&= (PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
254 tmp
|= pcap
->adc_queue
[head
]->flags
| PCAP_ADC_ADEN
;
256 if (pcap
->adc_queue
[head
]->bank
== PCAP_ADC_BANK_1
)
257 tmp
|= PCAP_ADC_AD_SEL1
;
259 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
260 spin_unlock_irqrestore(&pcap
->adc_lock
, flags
);
261 ezx_pcap_write(pcap
, PCAP_REG_ADR
, PCAP_ADR_ASC
);
264 static irqreturn_t
pcap_adc_irq(int irq
, void *_pcap
)
266 struct pcap_chip
*pcap
= _pcap
;
267 struct pcap_adc_request
*req
;
271 spin_lock(&pcap
->adc_lock
);
272 req
= pcap
->adc_queue
[pcap
->adc_head
];
274 if (WARN(!req
, "adc irq without pending request\n")) {
275 spin_unlock(&pcap
->adc_lock
);
279 /* read requested channels results */
280 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
281 tmp
&= ~(PCAP_ADC_ADA1_MASK
| PCAP_ADC_ADA2_MASK
);
282 tmp
|= (req
->ch
[0] << PCAP_ADC_ADA1_SHIFT
);
283 tmp
|= (req
->ch
[1] << PCAP_ADC_ADA2_SHIFT
);
284 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
285 ezx_pcap_read(pcap
, PCAP_REG_ADR
, &tmp
);
286 res
[0] = (tmp
& PCAP_ADR_ADD1_MASK
) >> PCAP_ADR_ADD1_SHIFT
;
287 res
[1] = (tmp
& PCAP_ADR_ADD2_MASK
) >> PCAP_ADR_ADD2_SHIFT
;
289 pcap
->adc_queue
[pcap
->adc_head
] = NULL
;
290 pcap
->adc_head
= (pcap
->adc_head
+ 1) & (PCAP_ADC_MAXQ
- 1);
291 spin_unlock(&pcap
->adc_lock
);
293 /* pass the results and release memory */
294 req
->callback(req
->data
, res
);
297 /* trigger next conversion (if any) on queue */
298 pcap_adc_trigger(pcap
);
303 int pcap_adc_async(struct pcap_chip
*pcap
, u8 bank
, u32 flags
, u8 ch
[],
304 void *callback
, void *data
)
306 struct pcap_adc_request
*req
;
307 unsigned long irq_flags
;
309 /* This will be freed after we have a result */
310 req
= kmalloc(sizeof(struct pcap_adc_request
), GFP_KERNEL
);
318 req
->callback
= callback
;
321 spin_lock_irqsave(&pcap
->adc_lock
, irq_flags
);
322 if (pcap
->adc_queue
[pcap
->adc_tail
]) {
323 spin_unlock_irqrestore(&pcap
->adc_lock
, irq_flags
);
327 pcap
->adc_queue
[pcap
->adc_tail
] = req
;
328 pcap
->adc_tail
= (pcap
->adc_tail
+ 1) & (PCAP_ADC_MAXQ
- 1);
329 spin_unlock_irqrestore(&pcap
->adc_lock
, irq_flags
);
331 /* start conversion */
332 pcap_adc_trigger(pcap
);
336 EXPORT_SYMBOL_GPL(pcap_adc_async
);
338 static void pcap_adc_sync_cb(void *param
, u16 res
[])
340 struct pcap_adc_sync_request
*req
= param
;
342 req
->res
[0] = res
[0];
343 req
->res
[1] = res
[1];
344 complete(&req
->completion
);
347 int pcap_adc_sync(struct pcap_chip
*pcap
, u8 bank
, u32 flags
, u8 ch
[],
350 struct pcap_adc_sync_request sync_data
;
353 init_completion(&sync_data
.completion
);
354 ret
= pcap_adc_async(pcap
, bank
, flags
, ch
, pcap_adc_sync_cb
,
358 wait_for_completion(&sync_data
.completion
);
359 res
[0] = sync_data
.res
[0];
360 res
[1] = sync_data
.res
[1];
364 EXPORT_SYMBOL_GPL(pcap_adc_sync
);
367 static int pcap_remove_subdev(struct device
*dev
, void *unused
)
369 platform_device_unregister(to_platform_device(dev
));
373 static int pcap_add_subdev(struct pcap_chip
*pcap
,
374 struct pcap_subdev
*subdev
)
376 struct platform_device
*pdev
;
379 pdev
= platform_device_alloc(subdev
->name
, subdev
->id
);
383 pdev
->dev
.parent
= &pcap
->spi
->dev
;
384 pdev
->dev
.platform_data
= subdev
->platform_data
;
386 ret
= platform_device_add(pdev
);
388 platform_device_put(pdev
);
393 static void ezx_pcap_remove(struct spi_device
*spi
)
395 struct pcap_chip
*pcap
= spi_get_drvdata(spi
);
399 /* remove all registered subdevs */
400 device_for_each_child(&spi
->dev
, NULL
, pcap_remove_subdev
);
403 spin_lock_irqsave(&pcap
->adc_lock
, flags
);
404 for (i
= 0; i
< PCAP_ADC_MAXQ
; i
++)
405 kfree(pcap
->adc_queue
[i
]);
406 spin_unlock_irqrestore(&pcap
->adc_lock
, flags
);
408 /* cleanup irqchip */
409 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++)
410 irq_set_chip_and_handler(i
, NULL
, NULL
);
412 destroy_workqueue(pcap
->workqueue
);
415 static int ezx_pcap_probe(struct spi_device
*spi
)
417 struct pcap_platform_data
*pdata
= dev_get_platdata(&spi
->dev
);
418 struct pcap_chip
*pcap
;
422 /* platform data is required */
426 pcap
= devm_kzalloc(&spi
->dev
, sizeof(*pcap
), GFP_KERNEL
);
432 spin_lock_init(&pcap
->io_lock
);
433 spin_lock_init(&pcap
->adc_lock
);
434 INIT_WORK(&pcap
->isr_work
, pcap_isr_work
);
435 INIT_WORK(&pcap
->msr_work
, pcap_msr_work
);
436 spi_set_drvdata(spi
, pcap
);
439 spi
->bits_per_word
= 32;
440 spi
->mode
= SPI_MODE_0
| (pdata
->config
& PCAP_CS_AH
? SPI_CS_HIGH
: 0);
441 ret
= spi_setup(spi
);
448 pcap
->irq_base
= pdata
->irq_base
;
449 pcap
->workqueue
= create_singlethread_workqueue("pcapd");
450 if (!pcap
->workqueue
) {
452 dev_err(&spi
->dev
, "can't create pcap thread\n");
456 /* redirect interrupts to AP, except adcdone2 */
457 if (!(pdata
->config
& PCAP_SECOND_PORT
))
458 ezx_pcap_write(pcap
, PCAP_REG_INT_SEL
,
459 (1 << PCAP_IRQ_ADCDONE2
));
462 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++) {
463 irq_set_chip_and_handler(i
, &pcap_irq_chip
, handle_simple_irq
);
464 irq_set_chip_data(i
, pcap
);
465 irq_clear_status_flags(i
, IRQ_NOREQUEST
| IRQ_NOPROBE
);
468 /* mask/ack all PCAP interrupts */
469 ezx_pcap_write(pcap
, PCAP_REG_MSR
, PCAP_MASK_ALL_INTERRUPT
);
470 ezx_pcap_write(pcap
, PCAP_REG_ISR
, PCAP_CLEAR_INTERRUPT_REGISTER
);
471 pcap
->msr
= PCAP_MASK_ALL_INTERRUPT
;
473 irq_set_irq_type(spi
->irq
, IRQ_TYPE_EDGE_RISING
);
474 irq_set_chained_handler_and_data(spi
->irq
, pcap_irq_handler
, pcap
);
475 irq_set_irq_wake(spi
->irq
, 1);
478 adc_irq
= pcap_to_irq(pcap
, (pdata
->config
& PCAP_SECOND_PORT
) ?
479 PCAP_IRQ_ADCDONE2
: PCAP_IRQ_ADCDONE
);
481 ret
= devm_request_irq(&spi
->dev
, adc_irq
, pcap_adc_irq
, 0, "ADC",
487 for (i
= 0; i
< pdata
->num_subdevs
; i
++) {
488 ret
= pcap_add_subdev(pcap
, &pdata
->subdevs
[i
]);
493 /* board specific quirks */
500 device_for_each_child(&spi
->dev
, NULL
, pcap_remove_subdev
);
502 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++)
503 irq_set_chip_and_handler(i
, NULL
, NULL
);
504 /* destroy_workqueue: */
505 destroy_workqueue(pcap
->workqueue
);
510 static struct spi_driver ezxpcap_driver
= {
511 .probe
= ezx_pcap_probe
,
512 .remove
= ezx_pcap_remove
,
518 static int __init
ezx_pcap_init(void)
520 return spi_register_driver(&ezxpcap_driver
);
523 static void __exit
ezx_pcap_exit(void)
525 spi_unregister_driver(&ezxpcap_driver
);
528 subsys_initcall(ezx_pcap_init
);
529 module_exit(ezx_pcap_exit
);
531 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
532 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
533 MODULE_ALIAS("spi:ezx-pcap");