mmc: rtsx_pci: Enable MMC_CAP_ERASE to allow erase/discard/trim requests
[linux/fpc-iii.git] / drivers / mfd / ezx-pcap.c
blob542b47c6bcd2756be9b41ce2e7871ab3cd292ce1
1 /*
2 * Driver for Motorola PCAP2 as present in EZX phones
4 * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
5 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/mfd/ezx-pcap.h>
19 #include <linux/spi/spi.h>
20 #include <linux/gpio.h>
21 #include <linux/slab.h>
23 #define PCAP_ADC_MAXQ 8
24 struct pcap_adc_request {
25 u8 bank;
26 u8 ch[2];
27 u32 flags;
28 void (*callback)(void *, u16[]);
29 void *data;
32 struct pcap_adc_sync_request {
33 u16 res[2];
34 struct completion completion;
37 struct pcap_chip {
38 struct spi_device *spi;
40 /* IO */
41 u32 buf;
42 struct mutex io_mutex;
44 /* IRQ */
45 unsigned int irq_base;
46 u32 msr;
47 struct work_struct isr_work;
48 struct work_struct msr_work;
49 struct workqueue_struct *workqueue;
51 /* ADC */
52 struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
53 u8 adc_head;
54 u8 adc_tail;
55 struct mutex adc_mutex;
58 /* IO */
59 static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
61 struct spi_transfer t;
62 struct spi_message m;
63 int status;
65 memset(&t, 0, sizeof(t));
66 spi_message_init(&m);
67 t.len = sizeof(u32);
68 spi_message_add_tail(&t, &m);
70 pcap->buf = *data;
71 t.tx_buf = (u8 *) &pcap->buf;
72 t.rx_buf = (u8 *) &pcap->buf;
73 status = spi_sync(pcap->spi, &m);
75 if (status == 0)
76 *data = pcap->buf;
78 return status;
81 int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
83 int ret;
85 mutex_lock(&pcap->io_mutex);
86 value &= PCAP_REGISTER_VALUE_MASK;
87 value |= PCAP_REGISTER_WRITE_OP_BIT
88 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
89 ret = ezx_pcap_putget(pcap, &value);
90 mutex_unlock(&pcap->io_mutex);
92 return ret;
94 EXPORT_SYMBOL_GPL(ezx_pcap_write);
96 int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
98 int ret;
100 mutex_lock(&pcap->io_mutex);
101 *value = PCAP_REGISTER_READ_OP_BIT
102 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
104 ret = ezx_pcap_putget(pcap, value);
105 mutex_unlock(&pcap->io_mutex);
107 return ret;
109 EXPORT_SYMBOL_GPL(ezx_pcap_read);
111 int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
113 int ret;
114 u32 tmp = PCAP_REGISTER_READ_OP_BIT |
115 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
117 mutex_lock(&pcap->io_mutex);
118 ret = ezx_pcap_putget(pcap, &tmp);
119 if (ret)
120 goto out_unlock;
122 tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask);
123 tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT |
124 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
126 ret = ezx_pcap_putget(pcap, &tmp);
127 out_unlock:
128 mutex_unlock(&pcap->io_mutex);
130 return ret;
132 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits);
134 /* IRQ */
135 int irq_to_pcap(struct pcap_chip *pcap, int irq)
137 return irq - pcap->irq_base;
139 EXPORT_SYMBOL_GPL(irq_to_pcap);
141 int pcap_to_irq(struct pcap_chip *pcap, int irq)
143 return pcap->irq_base + irq;
145 EXPORT_SYMBOL_GPL(pcap_to_irq);
147 static void pcap_mask_irq(struct irq_data *d)
149 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
151 pcap->msr |= 1 << irq_to_pcap(pcap, d->irq);
152 queue_work(pcap->workqueue, &pcap->msr_work);
155 static void pcap_unmask_irq(struct irq_data *d)
157 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
159 pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq));
160 queue_work(pcap->workqueue, &pcap->msr_work);
163 static struct irq_chip pcap_irq_chip = {
164 .name = "pcap",
165 .irq_disable = pcap_mask_irq,
166 .irq_mask = pcap_mask_irq,
167 .irq_unmask = pcap_unmask_irq,
170 static void pcap_msr_work(struct work_struct *work)
172 struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
174 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
177 static void pcap_isr_work(struct work_struct *work)
179 struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
180 struct pcap_platform_data *pdata = dev_get_platdata(&pcap->spi->dev);
181 u32 msr, isr, int_sel, service;
182 int irq;
184 do {
185 ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
186 ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
188 /* We can't service/ack irqs that are assigned to port 2 */
189 if (!(pdata->config & PCAP_SECOND_PORT)) {
190 ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
191 isr &= ~int_sel;
194 ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
195 ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
197 local_irq_disable();
198 service = isr & ~msr;
199 for (irq = pcap->irq_base; service; service >>= 1, irq++) {
200 if (service & 1)
201 generic_handle_irq(irq);
203 local_irq_enable();
204 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
205 } while (gpio_get_value(pdata->gpio));
208 static void pcap_irq_handler(struct irq_desc *desc)
210 struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
212 desc->irq_data.chip->irq_ack(&desc->irq_data);
213 queue_work(pcap->workqueue, &pcap->isr_work);
216 /* ADC */
217 void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits)
219 u32 tmp;
221 mutex_lock(&pcap->adc_mutex);
222 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
223 tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
224 tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
225 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
226 mutex_unlock(&pcap->adc_mutex);
228 EXPORT_SYMBOL_GPL(pcap_set_ts_bits);
230 static void pcap_disable_adc(struct pcap_chip *pcap)
232 u32 tmp;
234 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
235 tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY);
236 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
239 static void pcap_adc_trigger(struct pcap_chip *pcap)
241 u32 tmp;
242 u8 head;
244 mutex_lock(&pcap->adc_mutex);
245 head = pcap->adc_head;
246 if (!pcap->adc_queue[head]) {
247 /* queue is empty, save power */
248 pcap_disable_adc(pcap);
249 mutex_unlock(&pcap->adc_mutex);
250 return;
252 /* start conversion on requested bank, save TS_M bits */
253 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
254 tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
255 tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
257 if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
258 tmp |= PCAP_ADC_AD_SEL1;
260 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
261 mutex_unlock(&pcap->adc_mutex);
262 ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
265 static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
267 struct pcap_chip *pcap = _pcap;
268 struct pcap_adc_request *req;
269 u16 res[2];
270 u32 tmp;
272 mutex_lock(&pcap->adc_mutex);
273 req = pcap->adc_queue[pcap->adc_head];
275 if (WARN(!req, "adc irq without pending request\n")) {
276 mutex_unlock(&pcap->adc_mutex);
277 return IRQ_HANDLED;
280 /* read requested channels results */
281 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
282 tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK);
283 tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
284 tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
285 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
286 ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp);
287 res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT;
288 res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT;
290 pcap->adc_queue[pcap->adc_head] = NULL;
291 pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
292 mutex_unlock(&pcap->adc_mutex);
294 /* pass the results and release memory */
295 req->callback(req->data, res);
296 kfree(req);
298 /* trigger next conversion (if any) on queue */
299 pcap_adc_trigger(pcap);
301 return IRQ_HANDLED;
304 int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
305 void *callback, void *data)
307 struct pcap_adc_request *req;
309 /* This will be freed after we have a result */
310 req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
311 if (!req)
312 return -ENOMEM;
314 req->bank = bank;
315 req->flags = flags;
316 req->ch[0] = ch[0];
317 req->ch[1] = ch[1];
318 req->callback = callback;
319 req->data = data;
321 mutex_lock(&pcap->adc_mutex);
322 if (pcap->adc_queue[pcap->adc_tail]) {
323 mutex_unlock(&pcap->adc_mutex);
324 kfree(req);
325 return -EBUSY;
327 pcap->adc_queue[pcap->adc_tail] = req;
328 pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
329 mutex_unlock(&pcap->adc_mutex);
331 /* start conversion */
332 pcap_adc_trigger(pcap);
334 return 0;
336 EXPORT_SYMBOL_GPL(pcap_adc_async);
338 static void pcap_adc_sync_cb(void *param, u16 res[])
340 struct pcap_adc_sync_request *req = param;
342 req->res[0] = res[0];
343 req->res[1] = res[1];
344 complete(&req->completion);
347 int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
348 u16 res[])
350 struct pcap_adc_sync_request sync_data;
351 int ret;
353 init_completion(&sync_data.completion);
354 ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
355 &sync_data);
356 if (ret)
357 return ret;
358 wait_for_completion(&sync_data.completion);
359 res[0] = sync_data.res[0];
360 res[1] = sync_data.res[1];
362 return 0;
364 EXPORT_SYMBOL_GPL(pcap_adc_sync);
366 /* subdevs */
367 static int pcap_remove_subdev(struct device *dev, void *unused)
369 platform_device_unregister(to_platform_device(dev));
370 return 0;
373 static int pcap_add_subdev(struct pcap_chip *pcap,
374 struct pcap_subdev *subdev)
376 struct platform_device *pdev;
377 int ret;
379 pdev = platform_device_alloc(subdev->name, subdev->id);
380 if (!pdev)
381 return -ENOMEM;
383 pdev->dev.parent = &pcap->spi->dev;
384 pdev->dev.platform_data = subdev->platform_data;
386 ret = platform_device_add(pdev);
387 if (ret)
388 platform_device_put(pdev);
390 return ret;
393 static int ezx_pcap_remove(struct spi_device *spi)
395 struct pcap_chip *pcap = spi_get_drvdata(spi);
396 int i;
398 /* remove all registered subdevs */
399 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
401 /* cleanup ADC */
402 mutex_lock(&pcap->adc_mutex);
403 for (i = 0; i < PCAP_ADC_MAXQ; i++)
404 kfree(pcap->adc_queue[i]);
405 mutex_unlock(&pcap->adc_mutex);
407 /* cleanup irqchip */
408 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
409 irq_set_chip_and_handler(i, NULL, NULL);
411 destroy_workqueue(pcap->workqueue);
413 return 0;
416 static int ezx_pcap_probe(struct spi_device *spi)
418 struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
419 struct pcap_chip *pcap;
420 int i, adc_irq;
421 int ret = -ENODEV;
423 /* platform data is required */
424 if (!pdata)
425 goto ret;
427 pcap = devm_kzalloc(&spi->dev, sizeof(*pcap), GFP_KERNEL);
428 if (!pcap) {
429 ret = -ENOMEM;
430 goto ret;
433 mutex_init(&pcap->io_mutex);
434 mutex_init(&pcap->adc_mutex);
435 INIT_WORK(&pcap->isr_work, pcap_isr_work);
436 INIT_WORK(&pcap->msr_work, pcap_msr_work);
437 spi_set_drvdata(spi, pcap);
439 /* setup spi */
440 spi->bits_per_word = 32;
441 spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
442 ret = spi_setup(spi);
443 if (ret)
444 goto ret;
446 pcap->spi = spi;
448 /* setup irq */
449 pcap->irq_base = pdata->irq_base;
450 pcap->workqueue = create_singlethread_workqueue("pcapd");
451 if (!pcap->workqueue) {
452 ret = -ENOMEM;
453 dev_err(&spi->dev, "can't create pcap thread\n");
454 goto ret;
457 /* redirect interrupts to AP, except adcdone2 */
458 if (!(pdata->config & PCAP_SECOND_PORT))
459 ezx_pcap_write(pcap, PCAP_REG_INT_SEL,
460 (1 << PCAP_IRQ_ADCDONE2));
462 /* setup irq chip */
463 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
464 irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
465 irq_set_chip_data(i, pcap);
466 irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
469 /* mask/ack all PCAP interrupts */
470 ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT);
471 ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
472 pcap->msr = PCAP_MASK_ALL_INTERRUPT;
474 irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
475 irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap);
476 irq_set_irq_wake(spi->irq, 1);
478 /* ADC */
479 adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
480 PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
482 ret = devm_request_irq(&spi->dev, adc_irq, pcap_adc_irq, 0, "ADC",
483 pcap);
484 if (ret)
485 goto free_irqchip;
487 /* setup subdevs */
488 for (i = 0; i < pdata->num_subdevs; i++) {
489 ret = pcap_add_subdev(pcap, &pdata->subdevs[i]);
490 if (ret)
491 goto remove_subdevs;
494 /* board specific quirks */
495 if (pdata->init)
496 pdata->init(pcap);
498 return 0;
500 remove_subdevs:
501 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
502 free_irqchip:
503 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
504 irq_set_chip_and_handler(i, NULL, NULL);
505 /* destroy_workqueue: */
506 destroy_workqueue(pcap->workqueue);
507 ret:
508 return ret;
511 static struct spi_driver ezxpcap_driver = {
512 .probe = ezx_pcap_probe,
513 .remove = ezx_pcap_remove,
514 .driver = {
515 .name = "ezx-pcap",
519 static int __init ezx_pcap_init(void)
521 return spi_register_driver(&ezxpcap_driver);
524 static void __exit ezx_pcap_exit(void)
526 spi_unregister_driver(&ezxpcap_driver);
529 subsys_initcall(ezx_pcap_init);
530 module_exit(ezx_pcap_exit);
532 MODULE_LICENSE("GPL");
533 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
534 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
535 MODULE_ALIAS("spi:ezx-pcap");