Linux 2.6.33-rc8
[linux-2.6/lguest.git] / drivers / mfd / ezx-pcap.c
blobdf405af968fa2f1f1237b1b45a828249ecde65a1
1 /*
2 * Driver for Motorola PCAP2 as present in EZX phones
4 * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
5 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/mfd/ezx-pcap.h>
19 #include <linux/spi/spi.h>
20 #include <linux/gpio.h>
22 #define PCAP_ADC_MAXQ 8
23 struct pcap_adc_request {
24 u8 bank;
25 u8 ch[2];
26 u32 flags;
27 void (*callback)(void *, u16[]);
28 void *data;
31 struct pcap_adc_sync_request {
32 u16 res[2];
33 struct completion completion;
36 struct pcap_chip {
37 struct spi_device *spi;
39 /* IO */
40 u32 buf;
41 struct mutex io_mutex;
43 /* IRQ */
44 unsigned int irq_base;
45 u32 msr;
46 struct work_struct isr_work;
47 struct work_struct msr_work;
48 struct workqueue_struct *workqueue;
50 /* ADC */
51 struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
52 u8 adc_head;
53 u8 adc_tail;
54 struct mutex adc_mutex;
57 /* IO */
58 static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
60 struct spi_transfer t;
61 struct spi_message m;
62 int status;
64 memset(&t, 0, sizeof t);
65 spi_message_init(&m);
66 t.len = sizeof(u32);
67 spi_message_add_tail(&t, &m);
69 pcap->buf = *data;
70 t.tx_buf = (u8 *) &pcap->buf;
71 t.rx_buf = (u8 *) &pcap->buf;
72 status = spi_sync(pcap->spi, &m);
74 if (status == 0)
75 *data = pcap->buf;
77 return status;
80 int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
82 int ret;
84 mutex_lock(&pcap->io_mutex);
85 value &= PCAP_REGISTER_VALUE_MASK;
86 value |= PCAP_REGISTER_WRITE_OP_BIT
87 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
88 ret = ezx_pcap_putget(pcap, &value);
89 mutex_unlock(&pcap->io_mutex);
91 return ret;
93 EXPORT_SYMBOL_GPL(ezx_pcap_write);
95 int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
97 int ret;
99 mutex_lock(&pcap->io_mutex);
100 *value = PCAP_REGISTER_READ_OP_BIT
101 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
103 ret = ezx_pcap_putget(pcap, value);
104 mutex_unlock(&pcap->io_mutex);
106 return ret;
108 EXPORT_SYMBOL_GPL(ezx_pcap_read);
110 int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
112 int ret;
113 u32 tmp = PCAP_REGISTER_READ_OP_BIT |
114 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
116 mutex_lock(&pcap->io_mutex);
117 ret = ezx_pcap_putget(pcap, &tmp);
118 if (ret)
119 goto out_unlock;
121 tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask);
122 tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT |
123 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
125 ret = ezx_pcap_putget(pcap, &tmp);
126 out_unlock:
127 mutex_unlock(&pcap->io_mutex);
129 return ret;
131 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits);
133 /* IRQ */
134 int irq_to_pcap(struct pcap_chip *pcap, int irq)
136 return irq - pcap->irq_base;
138 EXPORT_SYMBOL_GPL(irq_to_pcap);
140 int pcap_to_irq(struct pcap_chip *pcap, int irq)
142 return pcap->irq_base + irq;
144 EXPORT_SYMBOL_GPL(pcap_to_irq);
146 static void pcap_mask_irq(unsigned int irq)
148 struct pcap_chip *pcap = get_irq_chip_data(irq);
150 pcap->msr |= 1 << irq_to_pcap(pcap, irq);
151 queue_work(pcap->workqueue, &pcap->msr_work);
154 static void pcap_unmask_irq(unsigned int irq)
156 struct pcap_chip *pcap = get_irq_chip_data(irq);
158 pcap->msr &= ~(1 << irq_to_pcap(pcap, irq));
159 queue_work(pcap->workqueue, &pcap->msr_work);
162 static struct irq_chip pcap_irq_chip = {
163 .name = "pcap",
164 .mask = pcap_mask_irq,
165 .unmask = pcap_unmask_irq,
168 static void pcap_msr_work(struct work_struct *work)
170 struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
172 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
175 static void pcap_isr_work(struct work_struct *work)
177 struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
178 struct pcap_platform_data *pdata = pcap->spi->dev.platform_data;
179 u32 msr, isr, int_sel, service;
180 int irq;
182 do {
183 ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
184 ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
186 /* We cant service/ack irqs that are assigned to port 2 */
187 if (!(pdata->config & PCAP_SECOND_PORT)) {
188 ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
189 isr &= ~int_sel;
192 ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
193 ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
195 local_irq_disable();
196 service = isr & ~msr;
197 for (irq = pcap->irq_base; service; service >>= 1, irq++) {
198 if (service & 1) {
199 struct irq_desc *desc = irq_to_desc(irq);
201 if (WARN(!desc, KERN_WARNING
202 "Invalid PCAP IRQ %d\n", irq))
203 break;
205 if (desc->status & IRQ_DISABLED)
206 note_interrupt(irq, desc, IRQ_NONE);
207 else
208 desc->handle_irq(irq, desc);
211 local_irq_enable();
212 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
213 } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
216 static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
218 struct pcap_chip *pcap = get_irq_data(irq);
220 desc->chip->ack(irq);
221 queue_work(pcap->workqueue, &pcap->isr_work);
222 return;
225 /* ADC */
226 void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits)
228 u32 tmp;
230 mutex_lock(&pcap->adc_mutex);
231 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
232 tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
233 tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
234 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
235 mutex_unlock(&pcap->adc_mutex);
237 EXPORT_SYMBOL_GPL(pcap_set_ts_bits);
239 static void pcap_disable_adc(struct pcap_chip *pcap)
241 u32 tmp;
243 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
244 tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY);
245 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
248 static void pcap_adc_trigger(struct pcap_chip *pcap)
250 u32 tmp;
251 u8 head;
253 mutex_lock(&pcap->adc_mutex);
254 head = pcap->adc_head;
255 if (!pcap->adc_queue[head]) {
256 /* queue is empty, save power */
257 pcap_disable_adc(pcap);
258 mutex_unlock(&pcap->adc_mutex);
259 return;
261 /* start conversion on requested bank, save TS_M bits */
262 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
263 tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
264 tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
266 if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
267 tmp |= PCAP_ADC_AD_SEL1;
269 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
270 mutex_unlock(&pcap->adc_mutex);
271 ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
274 static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
276 struct pcap_chip *pcap = _pcap;
277 struct pcap_adc_request *req;
278 u16 res[2];
279 u32 tmp;
281 mutex_lock(&pcap->adc_mutex);
282 req = pcap->adc_queue[pcap->adc_head];
284 if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) {
285 mutex_unlock(&pcap->adc_mutex);
286 return IRQ_HANDLED;
289 /* read requested channels results */
290 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
291 tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK);
292 tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
293 tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
294 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
295 ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp);
296 res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT;
297 res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT;
299 pcap->adc_queue[pcap->adc_head] = NULL;
300 pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
301 mutex_unlock(&pcap->adc_mutex);
303 /* pass the results and release memory */
304 req->callback(req->data, res);
305 kfree(req);
307 /* trigger next conversion (if any) on queue */
308 pcap_adc_trigger(pcap);
310 return IRQ_HANDLED;
313 int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
314 void *callback, void *data)
316 struct pcap_adc_request *req;
318 /* This will be freed after we have a result */
319 req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
320 if (!req)
321 return -ENOMEM;
323 req->bank = bank;
324 req->flags = flags;
325 req->ch[0] = ch[0];
326 req->ch[1] = ch[1];
327 req->callback = callback;
328 req->data = data;
330 mutex_lock(&pcap->adc_mutex);
331 if (pcap->adc_queue[pcap->adc_tail]) {
332 mutex_unlock(&pcap->adc_mutex);
333 kfree(req);
334 return -EBUSY;
336 pcap->adc_queue[pcap->adc_tail] = req;
337 pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
338 mutex_unlock(&pcap->adc_mutex);
340 /* start conversion */
341 pcap_adc_trigger(pcap);
343 return 0;
345 EXPORT_SYMBOL_GPL(pcap_adc_async);
347 static void pcap_adc_sync_cb(void *param, u16 res[])
349 struct pcap_adc_sync_request *req = param;
351 req->res[0] = res[0];
352 req->res[1] = res[1];
353 complete(&req->completion);
356 int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
357 u16 res[])
359 struct pcap_adc_sync_request sync_data;
360 int ret;
362 init_completion(&sync_data.completion);
363 ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
364 &sync_data);
365 if (ret)
366 return ret;
367 wait_for_completion(&sync_data.completion);
368 res[0] = sync_data.res[0];
369 res[1] = sync_data.res[1];
371 return 0;
373 EXPORT_SYMBOL_GPL(pcap_adc_sync);
375 /* subdevs */
376 static int pcap_remove_subdev(struct device *dev, void *unused)
378 platform_device_unregister(to_platform_device(dev));
379 return 0;
382 static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
383 struct pcap_subdev *subdev)
385 struct platform_device *pdev;
387 pdev = platform_device_alloc(subdev->name, subdev->id);
388 pdev->dev.parent = &pcap->spi->dev;
389 pdev->dev.platform_data = subdev->platform_data;
391 return platform_device_add(pdev);
394 static int __devexit ezx_pcap_remove(struct spi_device *spi)
396 struct pcap_chip *pcap = dev_get_drvdata(&spi->dev);
397 struct pcap_platform_data *pdata = spi->dev.platform_data;
398 int i, adc_irq;
400 /* remove all registered subdevs */
401 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
403 /* cleanup ADC */
404 adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
405 PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
406 free_irq(adc_irq, pcap);
407 mutex_lock(&pcap->adc_mutex);
408 for (i = 0; i < PCAP_ADC_MAXQ; i++)
409 kfree(pcap->adc_queue[i]);
410 mutex_unlock(&pcap->adc_mutex);
412 /* cleanup irqchip */
413 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
414 set_irq_chip_and_handler(i, NULL, NULL);
416 destroy_workqueue(pcap->workqueue);
418 kfree(pcap);
420 return 0;
423 static int __devinit ezx_pcap_probe(struct spi_device *spi)
425 struct pcap_platform_data *pdata = spi->dev.platform_data;
426 struct pcap_chip *pcap;
427 int i, adc_irq;
428 int ret = -ENODEV;
430 /* platform data is required */
431 if (!pdata)
432 goto ret;
434 pcap = kzalloc(sizeof(*pcap), GFP_KERNEL);
435 if (!pcap) {
436 ret = -ENOMEM;
437 goto ret;
440 mutex_init(&pcap->io_mutex);
441 mutex_init(&pcap->adc_mutex);
442 INIT_WORK(&pcap->isr_work, pcap_isr_work);
443 INIT_WORK(&pcap->msr_work, pcap_msr_work);
444 dev_set_drvdata(&spi->dev, pcap);
446 /* setup spi */
447 spi->bits_per_word = 32;
448 spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
449 ret = spi_setup(spi);
450 if (ret)
451 goto free_pcap;
453 pcap->spi = spi;
455 /* setup irq */
456 pcap->irq_base = pdata->irq_base;
457 pcap->workqueue = create_singlethread_workqueue("pcapd");
458 if (!pcap->workqueue) {
459 dev_err(&spi->dev, "cant create pcap thread\n");
460 goto free_pcap;
463 /* redirect interrupts to AP, except adcdone2 */
464 if (!(pdata->config & PCAP_SECOND_PORT))
465 ezx_pcap_write(pcap, PCAP_REG_INT_SEL,
466 (1 << PCAP_IRQ_ADCDONE2));
468 /* setup irq chip */
469 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
470 set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
471 set_irq_chip_data(i, pcap);
472 #ifdef CONFIG_ARM
473 set_irq_flags(i, IRQF_VALID);
474 #else
475 set_irq_noprobe(i);
476 #endif
479 /* mask/ack all PCAP interrupts */
480 ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT);
481 ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
482 pcap->msr = PCAP_MASK_ALL_INTERRUPT;
484 set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
485 set_irq_data(spi->irq, pcap);
486 set_irq_chained_handler(spi->irq, pcap_irq_handler);
487 set_irq_wake(spi->irq, 1);
489 /* ADC */
490 adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
491 PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
493 ret = request_irq(adc_irq, pcap_adc_irq, 0, "ADC", pcap);
494 if (ret)
495 goto free_irqchip;
497 /* setup subdevs */
498 for (i = 0; i < pdata->num_subdevs; i++) {
499 ret = pcap_add_subdev(pcap, &pdata->subdevs[i]);
500 if (ret)
501 goto remove_subdevs;
504 /* board specific quirks */
505 if (pdata->init)
506 pdata->init(pcap);
508 return 0;
510 remove_subdevs:
511 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
512 /* free_adc: */
513 free_irq(adc_irq, pcap);
514 free_irqchip:
515 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
516 set_irq_chip_and_handler(i, NULL, NULL);
517 /* destroy_workqueue: */
518 destroy_workqueue(pcap->workqueue);
519 free_pcap:
520 kfree(pcap);
521 ret:
522 return ret;
525 static struct spi_driver ezxpcap_driver = {
526 .probe = ezx_pcap_probe,
527 .remove = __devexit_p(ezx_pcap_remove),
528 .driver = {
529 .name = "ezx-pcap",
530 .owner = THIS_MODULE,
534 static int __init ezx_pcap_init(void)
536 return spi_register_driver(&ezxpcap_driver);
539 static void __exit ezx_pcap_exit(void)
541 spi_unregister_driver(&ezxpcap_driver);
544 subsys_initcall(ezx_pcap_init);
545 module_exit(ezx_pcap_exit);
547 MODULE_LICENSE("GPL");
548 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
549 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
550 MODULE_ALIAS("spi:ezx-pcap");