1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Motorola PCAP2 as present in EZX phones
5 * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
6 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/mfd/ezx-pcap.h>
15 #include <linux/spi/spi.h>
16 #include <linux/gpio.h>
17 #include <linux/slab.h>
19 #define PCAP_ADC_MAXQ 8
20 struct pcap_adc_request
{
24 void (*callback
)(void *, u16
[]);
28 struct pcap_adc_sync_request
{
30 struct completion completion
;
34 struct spi_device
*spi
;
41 unsigned int irq_base
;
43 struct work_struct isr_work
;
44 struct work_struct msr_work
;
45 struct workqueue_struct
*workqueue
;
48 struct pcap_adc_request
*adc_queue
[PCAP_ADC_MAXQ
];
55 static int ezx_pcap_putget(struct pcap_chip
*pcap
, u32
*data
)
57 struct spi_transfer t
;
61 memset(&t
, 0, sizeof(t
));
64 spi_message_add_tail(&t
, &m
);
67 t
.tx_buf
= (u8
*) &pcap
->buf
;
68 t
.rx_buf
= (u8
*) &pcap
->buf
;
69 status
= spi_sync(pcap
->spi
, &m
);
77 int ezx_pcap_write(struct pcap_chip
*pcap
, u8 reg_num
, u32 value
)
82 spin_lock_irqsave(&pcap
->io_lock
, flags
);
83 value
&= PCAP_REGISTER_VALUE_MASK
;
84 value
|= PCAP_REGISTER_WRITE_OP_BIT
85 | (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
86 ret
= ezx_pcap_putget(pcap
, &value
);
87 spin_unlock_irqrestore(&pcap
->io_lock
, flags
);
91 EXPORT_SYMBOL_GPL(ezx_pcap_write
);
93 int ezx_pcap_read(struct pcap_chip
*pcap
, u8 reg_num
, u32
*value
)
98 spin_lock_irqsave(&pcap
->io_lock
, flags
);
99 *value
= PCAP_REGISTER_READ_OP_BIT
100 | (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
102 ret
= ezx_pcap_putget(pcap
, value
);
103 spin_unlock_irqrestore(&pcap
->io_lock
, flags
);
107 EXPORT_SYMBOL_GPL(ezx_pcap_read
);
109 int ezx_pcap_set_bits(struct pcap_chip
*pcap
, u8 reg_num
, u32 mask
, u32 val
)
113 u32 tmp
= PCAP_REGISTER_READ_OP_BIT
|
114 (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
116 spin_lock_irqsave(&pcap
->io_lock
, flags
);
117 ret
= ezx_pcap_putget(pcap
, &tmp
);
121 tmp
&= (PCAP_REGISTER_VALUE_MASK
& ~mask
);
122 tmp
|= (val
& mask
) | PCAP_REGISTER_WRITE_OP_BIT
|
123 (reg_num
<< PCAP_REGISTER_ADDRESS_SHIFT
);
125 ret
= ezx_pcap_putget(pcap
, &tmp
);
127 spin_unlock_irqrestore(&pcap
->io_lock
, flags
);
131 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits
);
134 int irq_to_pcap(struct pcap_chip
*pcap
, int irq
)
136 return irq
- pcap
->irq_base
;
138 EXPORT_SYMBOL_GPL(irq_to_pcap
);
140 int pcap_to_irq(struct pcap_chip
*pcap
, int irq
)
142 return pcap
->irq_base
+ irq
;
144 EXPORT_SYMBOL_GPL(pcap_to_irq
);
146 static void pcap_mask_irq(struct irq_data
*d
)
148 struct pcap_chip
*pcap
= irq_data_get_irq_chip_data(d
);
150 pcap
->msr
|= 1 << irq_to_pcap(pcap
, d
->irq
);
151 queue_work(pcap
->workqueue
, &pcap
->msr_work
);
154 static void pcap_unmask_irq(struct irq_data
*d
)
156 struct pcap_chip
*pcap
= irq_data_get_irq_chip_data(d
);
158 pcap
->msr
&= ~(1 << irq_to_pcap(pcap
, d
->irq
));
159 queue_work(pcap
->workqueue
, &pcap
->msr_work
);
162 static struct irq_chip pcap_irq_chip
= {
164 .irq_disable
= pcap_mask_irq
,
165 .irq_mask
= pcap_mask_irq
,
166 .irq_unmask
= pcap_unmask_irq
,
169 static void pcap_msr_work(struct work_struct
*work
)
171 struct pcap_chip
*pcap
= container_of(work
, struct pcap_chip
, msr_work
);
173 ezx_pcap_write(pcap
, PCAP_REG_MSR
, pcap
->msr
);
176 static void pcap_isr_work(struct work_struct
*work
)
178 struct pcap_chip
*pcap
= container_of(work
, struct pcap_chip
, isr_work
);
179 struct pcap_platform_data
*pdata
= dev_get_platdata(&pcap
->spi
->dev
);
180 u32 msr
, isr
, int_sel
, service
;
184 ezx_pcap_read(pcap
, PCAP_REG_MSR
, &msr
);
185 ezx_pcap_read(pcap
, PCAP_REG_ISR
, &isr
);
187 /* We can't service/ack irqs that are assigned to port 2 */
188 if (!(pdata
->config
& PCAP_SECOND_PORT
)) {
189 ezx_pcap_read(pcap
, PCAP_REG_INT_SEL
, &int_sel
);
193 ezx_pcap_write(pcap
, PCAP_REG_MSR
, isr
| msr
);
194 ezx_pcap_write(pcap
, PCAP_REG_ISR
, isr
);
197 service
= isr
& ~msr
;
198 for (irq
= pcap
->irq_base
; service
; service
>>= 1, irq
++) {
200 generic_handle_irq(irq
);
203 ezx_pcap_write(pcap
, PCAP_REG_MSR
, pcap
->msr
);
204 } while (gpio_get_value(pdata
->gpio
));
207 static void pcap_irq_handler(struct irq_desc
*desc
)
209 struct pcap_chip
*pcap
= irq_desc_get_handler_data(desc
);
211 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
212 queue_work(pcap
->workqueue
, &pcap
->isr_work
);
216 void pcap_set_ts_bits(struct pcap_chip
*pcap
, u32 bits
)
221 spin_lock_irqsave(&pcap
->adc_lock
, flags
);
222 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
223 tmp
&= ~(PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
224 tmp
|= bits
& (PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
225 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
226 spin_unlock_irqrestore(&pcap
->adc_lock
, flags
);
228 EXPORT_SYMBOL_GPL(pcap_set_ts_bits
);
230 static void pcap_disable_adc(struct pcap_chip
*pcap
)
234 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
235 tmp
&= ~(PCAP_ADC_ADEN
|PCAP_ADC_BATT_I_ADC
|PCAP_ADC_BATT_I_POLARITY
);
236 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
239 static void pcap_adc_trigger(struct pcap_chip
*pcap
)
245 spin_lock_irqsave(&pcap
->adc_lock
, flags
);
246 head
= pcap
->adc_head
;
247 if (!pcap
->adc_queue
[head
]) {
248 /* queue is empty, save power */
249 pcap_disable_adc(pcap
);
250 spin_unlock_irqrestore(&pcap
->adc_lock
, flags
);
253 /* start conversion on requested bank, save TS_M bits */
254 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
255 tmp
&= (PCAP_ADC_TS_M_MASK
| PCAP_ADC_TS_REF_LOWPWR
);
256 tmp
|= pcap
->adc_queue
[head
]->flags
| PCAP_ADC_ADEN
;
258 if (pcap
->adc_queue
[head
]->bank
== PCAP_ADC_BANK_1
)
259 tmp
|= PCAP_ADC_AD_SEL1
;
261 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
262 spin_unlock_irqrestore(&pcap
->adc_lock
, flags
);
263 ezx_pcap_write(pcap
, PCAP_REG_ADR
, PCAP_ADR_ASC
);
266 static irqreturn_t
pcap_adc_irq(int irq
, void *_pcap
)
268 struct pcap_chip
*pcap
= _pcap
;
269 struct pcap_adc_request
*req
;
273 spin_lock(&pcap
->adc_lock
);
274 req
= pcap
->adc_queue
[pcap
->adc_head
];
276 if (WARN(!req
, "adc irq without pending request\n")) {
277 spin_unlock(&pcap
->adc_lock
);
281 /* read requested channels results */
282 ezx_pcap_read(pcap
, PCAP_REG_ADC
, &tmp
);
283 tmp
&= ~(PCAP_ADC_ADA1_MASK
| PCAP_ADC_ADA2_MASK
);
284 tmp
|= (req
->ch
[0] << PCAP_ADC_ADA1_SHIFT
);
285 tmp
|= (req
->ch
[1] << PCAP_ADC_ADA2_SHIFT
);
286 ezx_pcap_write(pcap
, PCAP_REG_ADC
, tmp
);
287 ezx_pcap_read(pcap
, PCAP_REG_ADR
, &tmp
);
288 res
[0] = (tmp
& PCAP_ADR_ADD1_MASK
) >> PCAP_ADR_ADD1_SHIFT
;
289 res
[1] = (tmp
& PCAP_ADR_ADD2_MASK
) >> PCAP_ADR_ADD2_SHIFT
;
291 pcap
->adc_queue
[pcap
->adc_head
] = NULL
;
292 pcap
->adc_head
= (pcap
->adc_head
+ 1) & (PCAP_ADC_MAXQ
- 1);
293 spin_unlock(&pcap
->adc_lock
);
295 /* pass the results and release memory */
296 req
->callback(req
->data
, res
);
299 /* trigger next conversion (if any) on queue */
300 pcap_adc_trigger(pcap
);
305 int pcap_adc_async(struct pcap_chip
*pcap
, u8 bank
, u32 flags
, u8 ch
[],
306 void *callback
, void *data
)
308 struct pcap_adc_request
*req
;
309 unsigned long irq_flags
;
311 /* This will be freed after we have a result */
312 req
= kmalloc(sizeof(struct pcap_adc_request
), GFP_KERNEL
);
320 req
->callback
= callback
;
323 spin_lock_irqsave(&pcap
->adc_lock
, irq_flags
);
324 if (pcap
->adc_queue
[pcap
->adc_tail
]) {
325 spin_unlock_irqrestore(&pcap
->adc_lock
, irq_flags
);
329 pcap
->adc_queue
[pcap
->adc_tail
] = req
;
330 pcap
->adc_tail
= (pcap
->adc_tail
+ 1) & (PCAP_ADC_MAXQ
- 1);
331 spin_unlock_irqrestore(&pcap
->adc_lock
, irq_flags
);
333 /* start conversion */
334 pcap_adc_trigger(pcap
);
338 EXPORT_SYMBOL_GPL(pcap_adc_async
);
340 static void pcap_adc_sync_cb(void *param
, u16 res
[])
342 struct pcap_adc_sync_request
*req
= param
;
344 req
->res
[0] = res
[0];
345 req
->res
[1] = res
[1];
346 complete(&req
->completion
);
349 int pcap_adc_sync(struct pcap_chip
*pcap
, u8 bank
, u32 flags
, u8 ch
[],
352 struct pcap_adc_sync_request sync_data
;
355 init_completion(&sync_data
.completion
);
356 ret
= pcap_adc_async(pcap
, bank
, flags
, ch
, pcap_adc_sync_cb
,
360 wait_for_completion(&sync_data
.completion
);
361 res
[0] = sync_data
.res
[0];
362 res
[1] = sync_data
.res
[1];
366 EXPORT_SYMBOL_GPL(pcap_adc_sync
);
369 static int pcap_remove_subdev(struct device
*dev
, void *unused
)
371 platform_device_unregister(to_platform_device(dev
));
375 static int pcap_add_subdev(struct pcap_chip
*pcap
,
376 struct pcap_subdev
*subdev
)
378 struct platform_device
*pdev
;
381 pdev
= platform_device_alloc(subdev
->name
, subdev
->id
);
385 pdev
->dev
.parent
= &pcap
->spi
->dev
;
386 pdev
->dev
.platform_data
= subdev
->platform_data
;
388 ret
= platform_device_add(pdev
);
390 platform_device_put(pdev
);
395 static int ezx_pcap_remove(struct spi_device
*spi
)
397 struct pcap_chip
*pcap
= spi_get_drvdata(spi
);
401 /* remove all registered subdevs */
402 device_for_each_child(&spi
->dev
, NULL
, pcap_remove_subdev
);
405 spin_lock_irqsave(&pcap
->adc_lock
, flags
);
406 for (i
= 0; i
< PCAP_ADC_MAXQ
; i
++)
407 kfree(pcap
->adc_queue
[i
]);
408 spin_unlock_irqrestore(&pcap
->adc_lock
, flags
);
410 /* cleanup irqchip */
411 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++)
412 irq_set_chip_and_handler(i
, NULL
, NULL
);
414 destroy_workqueue(pcap
->workqueue
);
419 static int ezx_pcap_probe(struct spi_device
*spi
)
421 struct pcap_platform_data
*pdata
= dev_get_platdata(&spi
->dev
);
422 struct pcap_chip
*pcap
;
426 /* platform data is required */
430 pcap
= devm_kzalloc(&spi
->dev
, sizeof(*pcap
), GFP_KERNEL
);
436 spin_lock_init(&pcap
->io_lock
);
437 spin_lock_init(&pcap
->adc_lock
);
438 INIT_WORK(&pcap
->isr_work
, pcap_isr_work
);
439 INIT_WORK(&pcap
->msr_work
, pcap_msr_work
);
440 spi_set_drvdata(spi
, pcap
);
443 spi
->bits_per_word
= 32;
444 spi
->mode
= SPI_MODE_0
| (pdata
->config
& PCAP_CS_AH
? SPI_CS_HIGH
: 0);
445 ret
= spi_setup(spi
);
452 pcap
->irq_base
= pdata
->irq_base
;
453 pcap
->workqueue
= create_singlethread_workqueue("pcapd");
454 if (!pcap
->workqueue
) {
456 dev_err(&spi
->dev
, "can't create pcap thread\n");
460 /* redirect interrupts to AP, except adcdone2 */
461 if (!(pdata
->config
& PCAP_SECOND_PORT
))
462 ezx_pcap_write(pcap
, PCAP_REG_INT_SEL
,
463 (1 << PCAP_IRQ_ADCDONE2
));
466 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++) {
467 irq_set_chip_and_handler(i
, &pcap_irq_chip
, handle_simple_irq
);
468 irq_set_chip_data(i
, pcap
);
469 irq_clear_status_flags(i
, IRQ_NOREQUEST
| IRQ_NOPROBE
);
472 /* mask/ack all PCAP interrupts */
473 ezx_pcap_write(pcap
, PCAP_REG_MSR
, PCAP_MASK_ALL_INTERRUPT
);
474 ezx_pcap_write(pcap
, PCAP_REG_ISR
, PCAP_CLEAR_INTERRUPT_REGISTER
);
475 pcap
->msr
= PCAP_MASK_ALL_INTERRUPT
;
477 irq_set_irq_type(spi
->irq
, IRQ_TYPE_EDGE_RISING
);
478 irq_set_chained_handler_and_data(spi
->irq
, pcap_irq_handler
, pcap
);
479 irq_set_irq_wake(spi
->irq
, 1);
482 adc_irq
= pcap_to_irq(pcap
, (pdata
->config
& PCAP_SECOND_PORT
) ?
483 PCAP_IRQ_ADCDONE2
: PCAP_IRQ_ADCDONE
);
485 ret
= devm_request_irq(&spi
->dev
, adc_irq
, pcap_adc_irq
, 0, "ADC",
491 for (i
= 0; i
< pdata
->num_subdevs
; i
++) {
492 ret
= pcap_add_subdev(pcap
, &pdata
->subdevs
[i
]);
497 /* board specific quirks */
504 device_for_each_child(&spi
->dev
, NULL
, pcap_remove_subdev
);
506 for (i
= pcap
->irq_base
; i
< (pcap
->irq_base
+ PCAP_NIRQS
); i
++)
507 irq_set_chip_and_handler(i
, NULL
, NULL
);
508 /* destroy_workqueue: */
509 destroy_workqueue(pcap
->workqueue
);
514 static struct spi_driver ezxpcap_driver
= {
515 .probe
= ezx_pcap_probe
,
516 .remove
= ezx_pcap_remove
,
522 static int __init
ezx_pcap_init(void)
524 return spi_register_driver(&ezxpcap_driver
);
527 static void __exit
ezx_pcap_exit(void)
529 spi_unregister_driver(&ezxpcap_driver
);
532 subsys_initcall(ezx_pcap_init
);
533 module_exit(ezx_pcap_exit
);
535 MODULE_LICENSE("GPL");
536 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
537 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
538 MODULE_ALIAS("spi:ezx-pcap");